code
string | repo_name
string | path
string | language
string | license
string | size
int64 |
---|---|---|---|---|---|
"""
pygments.formatters.groff
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for groff output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import math
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
__all__ = ['GroffFormatter']
class GroffFormatter(Formatter):
"""
Format tokens with groff escapes to change their color and font style.
.. versionadded:: 2.11
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`monospaced`
If set to true, monospace font will be used (default: ``true``).
`linenos`
If set to true, print the line numbers (default: ``false``).
`wrap`
Wrap lines to the specified number of characters. Disabled if set to 0
(default: ``0``).
"""
name = 'groff'
aliases = ['groff','troff','roff']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.monospaced = get_bool_opt(options, 'monospaced', True)
self.linenos = get_bool_opt(options, 'linenos', False)
self._lineno = 0
self.wrap = get_int_opt(options, 'wrap', 0)
self._linelen = 0
self.styles = {}
self._make_styles()
def _make_styles(self):
regular = '\\f[CR]' if self.monospaced else '\\f[R]'
bold = '\\f[CB]' if self.monospaced else '\\f[B]'
italic = '\\f[CI]' if self.monospaced else '\\f[I]'
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '\\m[%s]' % ndef['color']
end = '\\m[]' + end
if ndef['bold']:
start += bold
end = regular + end
if ndef['italic']:
start += italic
end = regular + end
if ndef['bgcolor']:
start += '\\M[%s]' % ndef['bgcolor']
end = '\\M[]' + end
self.styles[ttype] = start, end
def _define_colors(self, outfile):
colors = set()
for _, ndef in self.style:
if ndef['color'] is not None:
colors.add(ndef['color'])
for color in colors:
outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
def _wrap_line(self, line):
length = len(line.rstrip('\n'))
space = ' ' if self.linenos else ''
newline = ''
if length > self.wrap:
for i in range(0, math.floor(length / self.wrap)):
chunk = line[i*self.wrap:i*self.wrap+self.wrap]
newline += (chunk + '\n' + space)
remainder = length % self.wrap
if remainder > 0:
newline += line[-remainder-1:]
self._linelen = remainder
elif self._linelen + length > self.wrap:
newline = ('\n' + space) + line
self._linelen = length
else:
newline = line
self._linelen += length
return newline
def _escape_chars(self, text):
text = text.replace('\\', '\\[u005C]'). \
replace('.', '\\[char46]'). \
replace('\'', '\\[u0027]'). \
replace('`', '\\[u0060]'). \
replace('~', '\\[u007E]')
copy = text
for char in copy:
if len(char) != len(char.encode()):
uni = char.encode('unicode_escape') \
.decode()[1:] \
.replace('x', 'u00') \
.upper()
text = text.replace(char, '\\[u' + uni[1:] + ']')
return text
def format_unencoded(self, tokensource, outfile):
self._define_colors(outfile)
outfile.write('.nf\n\\f[CR]\n')
if self.linenos:
self._write_lineno(outfile)
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
start, end = self.styles[ttype]
for line in value.splitlines(True):
if self.wrap > 0:
line = self._wrap_line(line)
if start and end:
text = self._escape_chars(line.rstrip('\n'))
if text != '':
outfile.write(''.join((start, text, end)))
else:
outfile.write(self._escape_chars(line.rstrip('\n')))
if line.endswith('\n'):
if self.linenos:
self._write_lineno(outfile)
self._linelen = 0
else:
outfile.write('\n')
self._linelen = 0
outfile.write('\n.fi')
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/groff.py
|
Python
|
mit
| 5,086 |
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import functools
import os
import sys
import os.path
from io import StringIO
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt
try:
import ctags
except ImportError:
ctags = None
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): '&',
ord('<'): '<',
ord('>'): '>',
ord('"'): '"',
ord("'"): ''',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def webify(color):
if color.startswith('calc') or color.startswith('var'):
return color
else:
return '#' + color
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
/*
generated by Pygments <https://pygments.org/>
Copyright 2006-2022 by the Pygments team.
Licensed under the BSD license, see LICENSE for details.
*/
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<!--
generated by Pygments <https://pygments.org/>
Copyright 2006-2022 by the Pygments team.
Licensed under the BSD license, see LICENSE for details.
-->
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`lineanchors` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags (as well as line number elements)
will not use CSS classes, but inline styles. This is not recommended
for larger pieces of code since it increases output size by quite a bit
(default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
.. versionadded:: 0.9
If you select the ``'table'`` line numbers, the wrapping table will
have a CSS class of this string plus ``'table'``, the default is
accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
.. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file.
.. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
.. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted. The line numbers are always
relative to the input (i.e. the first line is line 1) and are
independent of `linenostart`.
.. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``).
.. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks.
.. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``.
This allows easy linking to certain lines.
.. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript.
.. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
.. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
.. versionadded:: 1.6
`filename`
A string used to generate a filename when rendering ``<pre>`` blocks,
for example if displaying source code. If `linenos` is set to
``'table'`` then the filename will be rendered in an initial row
containing a single `<th>` which spans both columns.
.. versionadded:: 2.1
`wrapcode`
Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
by the HTML5 specification.
.. versionadded:: 2.4
`debug_token_types`
Add ``title`` attributes to all token ``<span>`` tags that show the
name of the token.
.. versionadded:: 2.10
**Subclassing the HTML formatter**
.. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, *, include_div):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
self.filename = self._decodeifneeded(options.get('filename', ''))
self.wrapcode = get_bool_opt(options, 'wrapcode', False)
self.span_element_openers = {}
self.debug_token_types = get_bool_opt(options, 'debug_token_types', False)
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _get_css_classes(self, ttype):
"""Return the CSS classes of this token type prefixed with the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls or ''
def _get_css_inline_styles(self, ttype):
"""Return the inline CSS styles for this token type."""
cclass = self.ttype2class.get(ttype)
while cclass is None:
ttype = ttype.parent
cclass = self.ttype2class.get(ttype)
return cclass or ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: %s; ' % webify(ndef['color'])
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: %s; ' % webify(ndef['bgcolor'])
if ndef['border']:
style += 'border: 1px solid %s; ' % webify(ndef['border'])
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
style_lines = []
style_lines.extend(self.get_linenos_style_defs())
style_lines.extend(self.get_background_style_defs(arg))
style_lines.extend(self.get_token_style_defs(arg))
return '\n'.join(style_lines)
def get_token_style_defs(self, arg=None):
prefix = self.get_css_prefix(arg)
styles = [
(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.items()
if cls and style
]
styles.sort()
lines = [
'%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles
]
return lines
def get_background_style_defs(self, arg=None):
prefix = self.get_css_prefix(arg)
bg_color = self.style.background_color
hl_color = self.style.highlight_color
lines = []
if arg and not self.nobackground and bg_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(
0, '%s{ background: %s;%s }' % (
prefix(''), bg_color, text_style
)
)
if hl_color is not None:
lines.insert(
0, '%s { background-color: %s }' % (prefix('hll'), hl_color)
)
return lines
def get_linenos_style_defs(self):
lines = [
'pre { %s }' % self._pre_style,
'td.linenos .normal { %s }' % self._linenos_style,
'span.linenos { %s }' % self._linenos_style,
'td.linenos .special { %s }' % self._linenos_special_style,
'span.linenos.special { %s }' % self._linenos_special_style,
]
return lines
def get_css_prefix(self, arg):
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, str):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
return prefix
@property
def _pre_style(self):
return 'line-height: 125%;'
@property
def _linenos_style(self):
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
self.style.line_number_color,
self.style.line_number_background_color
)
@property
def _linenos_special_style(self):
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
self.style.line_number_special_color,
self.style.line_number_special_background_color
)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, '
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
with open(cssfilename, "w") as cf:
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
except OSError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title=self.title,
cssfile=self.cssfile,
encoding=self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title=self.title,
styledefs=self.get_style_defs('body'),
encoding=self.encoding))
yield from inner
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
anchor_name = self.lineanchors or self.linespans
aln = self.anchorlinenos
nocls = self.noclasses
lines = []
for i in range(fl, fl+lncount):
print_line = i % st == 0
special_line = sp and i % sp == 0
if print_line:
line = '%*d' % (mw, i)
if aln:
line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line)
else:
line = ' ' * mw
if nocls:
if special_line:
style = ' style="%s"' % self._linenos_special_style
else:
style = ' style="%s"' % self._linenos_style
else:
if special_line:
style = ' class="special"'
else:
style = ' class="normal"'
if style:
line = '<span%s>%s</span>' % (style, line)
lines.append(line)
ls = '\n'.join(lines)
# If a filename was specified, we can't put it into the code table as it
# would misalign the line numbers. Hence we emit a separate row for it.
filename_tr = ""
if self.filename:
filename_tr = (
'<tr><th colspan="2" class="filename">'
'<span class="filename">' + self.filename + '</span>'
'</th></tr>')
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
yield 0, (f'<table class="{self.cssclass}table">' + filename_tr +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, '<div>'
yield 0, dummyoutfile.getvalue()
yield 0, '</div>'
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
inner_lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(inner_lines) + num - 1))
anchor_name = self.lineanchors or self.linespans
aln = self.anchorlinenos
nocls = self.noclasses
for _, inner_line in inner_lines:
print_line = num % st == 0
special_line = sp and num % sp == 0
if print_line:
line = '%*d' % (mw, num)
else:
line = ' ' * mw
if nocls:
if special_line:
style = ' style="%s"' % self._linenos_special_style
else:
style = ' style="%s"' % self._linenos_style
else:
if special_line:
style = ' class="linenos special"'
else:
style = ' class="linenos"'
if style:
linenos = '<span%s>%s</span>' % (style, line)
else:
linenos = line
if aln:
yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) +
inner_line)
else:
yield 1, linenos + inner_line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
# subtract 1 since we have to increment i *before* yielding
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
href = "" if self.linenos else ' href="#%s-%d"' % (s, i)
yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
(style and (' style="%s"' % style)) + '>')
yield from inner
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append(self._pre_style)
style = '; '.join(style)
if self.filename and self.linenos != 1:
yield 0, ('<span class="filename">' + self.filename + '</span>')
# the empty span here is to keep leading empty lines from being
# ignored by HTML parsers
yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
yield from inner
yield 0, '</pre>'
def _wrap_code(self, inner):
yield 0, '<code>'
yield from inner
yield 0, '</code>'
@functools.lru_cache(maxsize=100)
def _translate_parts(self, value):
"""HTML-escape a value and split it by newlines."""
return value.translate(_escape_html_table).split('\n')
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
try:
cspan = self.span_element_openers[ttype]
except KeyError:
title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else ''
if nocls:
css_style = self._get_css_inline_styles(ttype)
if css_style:
css_style = self.class2style[css_style][0]
cspan = '<span style="%s"%s>' % (css_style, title)
else:
cspan = ''
else:
css_class = self._get_css_classes(ttype)
if css_class:
cspan = '<span class="%s"%s>' % (css_class, title)
else:
cspan = ''
self.span_element_openers[ttype] = cspan
parts = self._translate_parts(value)
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line)
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token.encode(), 0):
return entry['file'], entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
output = source
if self.wrapcode:
output = self._wrap_code(output)
output = self._wrap_pre(output)
return output
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
# As a special case, we wrap line numbers before line highlighting
# so the line numbers get wrapped in the highlighting tag.
if not self.nowrap and self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
source = self._wrap_div(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/html.py
|
Python
|
mit
| 35,441 |
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
get_choice_opt
import subprocess
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
DEFAULT_FONT_NAME_MAC = 'Menlo'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager:
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
elif sys.platform.startswith('darwin'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_MAC
self._create_mac()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
stdout=subprocess.PIPE, stderr=None)
stdout, _ = proc.communicate()
if proc.returncode == 0:
lines = stdout.splitlines()
for line in lines:
if line.startswith(b'Fontconfig warning:'):
continue
path = line.decode().strip().strip(':')
if path:
return path
return None
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _get_mac_font_path(self, font_map, name, style):
return font_map.get((name + ' ' + style).strip().lower())
def _create_mac(self):
font_map = {}
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
'/Library/Fonts/', '/System/Library/Fonts/'):
font_map.update(
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
for f in os.listdir(font_dir)
if f.lower().endswith(('ttf', 'ttc')))
for name in STYLES['NORMAL']:
path = self._get_mac_font_path(font_map, self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_mac_font_path(font_map, self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except OSError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
lookuperror = None
keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
for keyname in keynames:
try:
key = _winreg.OpenKey(*keyname)
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
return
except FontNotFound as err:
lookuperror = err
finally:
_winreg.CloseKey(key)
except OSError:
pass
else:
# If we get here, we checked all registry keys and had no luck
# We can be in one of two situations now:
# * All key lookups failed. In this case lookuperror is None and we
# will raise a generic error
# * At least one lookup failed with a FontNotFound error. In this
# case, we will raise that as a more specific error
if lookuperror:
raise lookuperror
raise FontNotFound('Can\'t open Windows font registry key')
def get_char_size(self):
"""
Get the character size.
"""
return self.get_text_size('M')
def get_text_size(self, text):
"""
Get the text size (width, height).
"""
font = self.fonts['NORMAL']
if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
return font.getbbox(text)[2:4]
else:
return font.getsize(text)
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
"DejaVu Sans Mono" on \\*nix
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, linelength):
"""
Get the X coordinate of a character position.
"""
return linelength + self.image_pad + self.line_number_width
def _get_text_pos(self, linelength, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(linelength), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_text_bg_color(self, style):
"""
Get the correct background color for the token from the style.
"""
if style['bgcolor'] is not None:
bg_color = '#' + style['bgcolor']
else:
bg_color = None
return bg_color
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxlinelength, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxlinelength) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
text_fg=self.line_number_fg,
text_bg=None,
)
def _draw_text(self, pos, text, font, text_fg, text_bg):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, text_fg, text_bg))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
maxlinelength = linelength = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(linelength, lineno),
temp,
font = self._get_style_font(style),
text_fg = self._get_text_color(style),
text_bg = self._get_text_bg_color(style),
)
temp_width, _ = self.fonts.get_text_size(temp)
linelength += temp_width
maxlinelength = max(maxlinelength, linelength)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
linelength = 0
charno = 0
lineno += 1
self.maxlinelength = maxlinelength
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
if self.line_number_separator:
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxlinelength, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, text_fg, text_bg in self.drawables:
if text_bg:
text_size = draw.textsize(text=value, font=font)
draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
draw.text(pos, value, font=font, fill=text_fg)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/img.py
|
Python
|
mit
| 21,938 |
"""
pygments.formatters.irc
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for IRC output
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pip._vendor.pygments.util import get_choice_opt
__all__ = ['IRCFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
IRC_COLORS = {
Token: ('', ''),
Whitespace: ('gray', 'brightblack'),
Comment: ('gray', 'brightblack'),
Comment.Preproc: ('cyan', 'brightcyan'),
Keyword: ('blue', 'brightblue'),
Keyword.Type: ('cyan', 'brightcyan'),
Operator.Word: ('magenta', 'brightcyan'),
Name.Builtin: ('cyan', 'brightcyan'),
Name.Function: ('green', 'brightgreen'),
Name.Namespace: ('_cyan_', '_brightcyan_'),
Name.Class: ('_green_', '_brightgreen_'),
Name.Exception: ('cyan', 'brightcyan'),
Name.Decorator: ('brightblack', 'gray'),
Name.Variable: ('red', 'brightred'),
Name.Constant: ('red', 'brightred'),
Name.Attribute: ('cyan', 'brightcyan'),
Name.Tag: ('brightblue', 'brightblue'),
String: ('yellow', 'yellow'),
Number: ('blue', 'brightblue'),
Generic.Deleted: ('brightred', 'brightred'),
Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
Generic.Error: ('brightred', 'brightred'),
Error: ('_brightred_', '_brightred_'),
}
IRC_COLOR_MAP = {
'white': 0,
'black': 1,
'blue': 2,
'brightgreen': 3,
'brightred': 4,
'yellow': 5,
'magenta': 6,
'orange': 7,
'green': 7, #compat w/ ansi
'brightyellow': 8,
'lightgreen': 9,
'brightcyan': 9, # compat w/ ansi
'cyan': 10,
'lightblue': 11,
'red': 11, # compat w/ ansi
'brightblue': 12,
'brightmagenta': 13,
'brightblack': 14,
'gray': 15,
}
def ircformat(color, text):
if len(color) < 1:
return text
add = sub = ''
if '_' in color: # italic
add += '\x1D'
sub = '\x1D' + sub
color = color.strip('_')
if '*' in color: # bold
add += '\x02'
sub = '\x02' + sub
color = color.strip('*')
# underline (\x1F) not supported
# backgrounds (\x03FF,BB) not supported
if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
sub = '\x03' + sub
return add + text + sub
return '<'+add+'>'+text+'</'+sub+'>'
class IRCFormatter(Formatter):
r"""
Format tokens with IRC color sequences
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers in the output as well
(default: ``False`` = no line numbers).
"""
name = 'IRC'
aliases = ['irc', 'IRC']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("\n%04d: " % self._lineno)
def _format_unencoded_with_lineno(self, tokensource, outfile):
self._write_lineno(outfile)
for ttype, value in tokensource:
if value.endswith("\n"):
self._write_lineno(outfile)
value = value[:-1]
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype.parent
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
self._write_lineno(outfile)
if line:
outfile.write(ircformat(color, line[:-1]))
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)
outfile.write("\n")
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._format_unencoded_with_lineno(tokensource, outfile)
return
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ircformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/irc.py
|
Python
|
mit
| 5,871 |
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from io import StringIO
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.lexer import Lexer, do_insertions
from pip._vendor.pygments.token import Token, STANDARD_TYPES
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
Wrapping can be disabled using the `nowrap` option.
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a
``\begin{Verbatim}`` environment. This disables most other options
(default: ``False``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', 'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.items():
styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
if not self.nowrap:
outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
'\\catcode`\\_=8\\relax}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, cp)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, cp)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while text:
a, sep1, text = text.partition(self.left)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
value += escape_tex(a, cp) + b
else:
value += escape_tex(a + sep1 + b, cp)
else:
value += escape_tex(a, cp)
else:
value = escape_tex(value, cp)
elif ttype not in Token.Escape:
value = escape_tex(value, cp)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
if not self.nowrap:
outfile.write('\\end{' + self.envname + '}\n')
if self.full:
encoding = self.encoding or 'utf8'
# map known existings encodings from LaTeX distribution
encoding = {
'utf_8': 'utf8',
'latin_1': 'latin1',
'iso_8859_1': 'latin1',
}.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
# find and remove all the escape tokens (replace with an empty string)
# this is very similar to DelegatingLexer.get_tokens_unprocessed.
buffered = ''
insertions = []
insertion_buf = []
for i, t, v in self._find_safe_escape_tokens(text):
if t is None:
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
insertion_buf = []
buffered += v
else:
insertion_buf.append((i, t, v))
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
return do_insertions(insertions,
self.lang.get_tokens_unprocessed(buffered))
def _find_safe_escape_tokens(self, text):
""" find escape tokens that are not in strings or comments """
for i, t, v in self._filter_to(
self.lang.get_tokens_unprocessed(text),
lambda t: t in Token.Comment or t in Token.String
):
if t is None:
for i2, t2, v2 in self._find_escape_tokens(v):
yield i + i2, t2, v2
else:
yield i, None, v
def _filter_to(self, it, pred):
""" Keep only the tokens that match `pred`, merge the others together """
buf = ''
idx = 0
for i, t, v in it:
if pred(t):
if buf:
yield idx, None, buf
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
yield idx, None, buf
def _find_escape_tokens(self, text):
""" Find escape tokens within text, give token=None otherwise """
index = 0
while text:
a, sep1, text = text.partition(self.left)
if a:
yield index, None, a
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/latex.py
|
Python
|
mit
| 19,351 |
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.util import get_choice_opt
from pip._vendor.pygments.token import Token
from pip._vendor.pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
:doc:`lexer list <lexers>`.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
# We ignore self.encoding if it is set, since it gets set for lexer
# and formatter if given with -Oencoding on the command line.
# The RawTokenFormatter outputs only ASCII. Override here.
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
write = outfile.write
flush = outfile.close
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
write = outfile.write
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = b"%r\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write(b"%r\t%r\n" % (ttype, value))
flush()
TESTCASE_BEFORE = '''\
def testNeedsName(lexer):
fragment = %r
tokens = [
'''
TESTCASE_AFTER = '''\
]
assert list(lexer.get_tokens(fragment)) == tokens
'''
class TestcaseFormatter(Formatter):
"""
Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0
"""
name = 'Testcase'
aliases = ['testcase']
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding is not None and self.encoding != 'utf-8':
raise ValueError("Only None and utf-8 are allowed encodings.")
def format(self, tokensource, outfile):
indentation = ' ' * 12
rawbuf = []
outbuf = []
for ttype, value in tokensource:
rawbuf.append(value)
outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
before = TESTCASE_BEFORE % (''.join(rawbuf),)
during = ''.join(outbuf)
after = TESTCASE_AFTER
if self.encoding is None:
outfile.write(before + during + after)
else:
outfile.write(before.encode('utf-8'))
outfile.write(during.encode('utf-8'))
outfile.write(after.encode('utf-8'))
outfile.flush()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/other.py
|
Python
|
mit
| 5,073 |
"""
pygments.formatters.pangomarkup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pango markup output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.formatter import Formatter
__all__ = ['PangoMarkupFormatter']
_escape_table = {
ord('&'): '&',
ord('<'): '<',
}
def escape_special_chars(text, table=_escape_table):
"""Escape & and < for Pango Markup."""
return text.translate(table)
class PangoMarkupFormatter(Formatter):
"""
Format tokens as Pango Markup code. It can then be rendered to an SVG.
.. versionadded:: 2.9
"""
name = 'Pango Markup'
aliases = ['pango', 'pangomarkup']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.styles = {}
for token, style in self.style:
start = ''
end = ''
if style['color']:
start += '<span fgcolor="#%s">' % style['color']
end = '</span>' + end
if style['bold']:
start += '<b>'
end = '</b>' + end
if style['italic']:
start += '<i>'
end = '</i>' + end
if style['underline']:
start += '<u>'
end = '</u>' + end
self.styles[token] = (start, end)
def format_unencoded(self, tokensource, outfile):
lastval = ''
lasttype = None
outfile.write('<tt>')
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += escape_special_chars(value)
else:
if lastval:
stylebegin, styleend = self.styles[lasttype]
outfile.write(stylebegin + lastval + styleend)
lastval = escape_special_chars(value)
lasttype = ttype
if lastval:
stylebegin, styleend = self.styles[lasttype]
outfile.write(stylebegin + lastval + styleend)
outfile.write('</tt>')
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py
|
Python
|
mit
| 2,212 |
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.util import get_int_opt, surrogatepair
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft(R) Word(R) documents.
Please note that ``encoding`` and ``outencoding`` options are ignored.
The RTF format is ASCII natively, but handles unicode characters correctly
thanks to escape sequences.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font family, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
`fontsize`
Size of the font used. Size is specified in half points. The
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
def __init__(self, **options):
r"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvement
if not text:
return ''
# escape text
text = self._escape(text)
buf = []
for c in text:
cn = ord(c)
if cn < (2**7):
# ASCII character
buf.append(str(c))
elif (2**7) <= cn < (2**16):
# single unicode escape sequence
buf.append('{\\u%d}' % cn)
elif (2**16) <= cn:
# RTF limits unicode to 16 bits.
# Force surrogate pairs
buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
return ''.join(buf).replace('\n', '\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
'{\\colortbl;' % (self.fontface and
' ' + self._escape(self.fontface) or
''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write('\\red%d\\green%d\\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write('}\\f0 ')
if self.fontsize:
outfile.write('\\fs%d' % self.fontsize)
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append('\\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append('\\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append('\\b')
if style['italic']:
buf.append('\\i')
if style['underline']:
buf.append('\\ul')
if style['border']:
buf.append('\\chbrdr\\chcfpat%d' %
color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
outfile.write('}')
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/rtf.py
|
Python
|
mit
| 5,014 |
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.token import Comment
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`linenos`
If ``True``, add line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenowidth`
Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
for up to 4-digit line numbers. Increase width for longer code blocks).
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self.linenos = get_bool_opt(options,'linenos',False)
self.linenostart = get_int_opt(options,'linenostart',1)
self.linenostep = get_int_opt(options,'linenostep',1)
self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
counter = self.linenostart
counter_step = self.linenostep
counter_style = self._get_style(Comment)
line_x = x
if self.linenos:
if counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' %
(x+self.linenowidth,y,counter_style,counter))
line_x += self.linenowidth + self.ystep
counter += 1
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n')
if self.linenos and counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' %
(x+self.linenowidth,y,counter_style,counter))
counter += 1
outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/svg.py
|
Python
|
mit
| 7,335 |
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pip._vendor.pygments.console import ansiformat
from pip._vendor.pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('gray', 'brightblack'),
Comment: ('gray', 'brightblack'),
Comment.Preproc: ('cyan', 'brightcyan'),
Keyword: ('blue', 'brightblue'),
Keyword.Type: ('cyan', 'brightcyan'),
Operator.Word: ('magenta', 'brightmagenta'),
Name.Builtin: ('cyan', 'brightcyan'),
Name.Function: ('green', 'brightgreen'),
Name.Namespace: ('_cyan_', '_brightcyan_'),
Name.Class: ('_green_', '_brightgreen_'),
Name.Exception: ('cyan', 'brightcyan'),
Name.Decorator: ('brightblack', 'gray'),
Name.Variable: ('red', 'brightred'),
Name.Constant: ('red', 'brightred'),
Name.Attribute: ('cyan', 'brightcyan'),
Name.Tag: ('brightblue', 'brightblue'),
String: ('yellow', 'yellow'),
Number: ('blue', 'brightblue'),
Generic.Deleted: ('brightred', 'brightred'),
Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
Generic.Prompt: ('**', '**'),
Generic.Error: ('brightred', 'brightred'),
Error: ('_brightred_', '_brightred_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def format(self, tokensource, outfile):
return Formatter.format(self, tokensource, outfile)
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
def _get_color(self, ttype):
# self.colorscheme is a dict containing usually generic types, so we
# have to walk the tree of dots. The base Token type must be a key,
# even if it's empty string, as in the default above.
colors = self.colorscheme.get(ttype)
while colors is None:
ttype = ttype.parent
colors = self.colorscheme.get(ttype)
return colors[self.darkbg]
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._write_lineno(outfile)
for ttype, value in tokensource:
color = self._get_color(ttype)
for line in value.splitlines(True):
if color:
outfile.write(ansiformat(color, line.rstrip('\n')))
else:
outfile.write(line.rstrip('\n'))
if line.endswith('\n'):
if self.linenos:
self._write_lineno(outfile)
else:
outfile.write('\n')
if self.linenos:
outfile.write("\n")
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/terminal.py
|
Python
|
mit
| 4,674 |
"""
pygments.formatters.terminal256
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for 256-color terminal output with ANSI sequences.
RGB-to-XTERM color conversion routines adapted from xterm256-conv
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
by Wolfgang Frisch.
Formatter version 1.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# TODO:
# - Options to map style's bold/underline/italic/border attributes
# to some ANSI attrbutes (something like 'italic=underline')
# - An option to output "style RGB to xterm RGB/index" conversion table
# - An option to indicate that we are running in "reverse background"
# xterm. This means that default colors are white-on-black, not
# black-on-while, so colors like "white background" need to be converted
# to "white background, black foreground", etc...
from pip._vendor.pygments.formatter import Formatter
from pip._vendor.pygments.console import codes
from pip._vendor.pygments.style import ansicolors
__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
class EscapeSequence:
def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
self.fg = fg
self.bg = bg
self.bold = bold
self.underline = underline
self.italic = italic
def escape(self, attrs):
if len(attrs):
return "\x1b[" + ";".join(attrs) + "m"
return ""
def color_string(self):
attrs = []
if self.fg is not None:
if self.fg in ansicolors:
esc = codes[self.fg.replace('ansi','')]
if ';01m' in esc:
self.bold = True
# extract fg color code.
attrs.append(esc[2:4])
else:
attrs.extend(("38", "5", "%i" % self.fg))
if self.bg is not None:
if self.bg in ansicolors:
esc = codes[self.bg.replace('ansi','')]
# extract fg color code, add 10 for bg.
attrs.append(str(int(esc[2:4])+10))
else:
attrs.extend(("48", "5", "%i" % self.bg))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
if self.italic:
attrs.append("03")
return self.escape(attrs)
def true_color_string(self):
attrs = []
if self.fg:
attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
if self.bg:
attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
if self.italic:
attrs.append("03")
return self.escape(attrs)
def reset_string(self):
attrs = []
if self.fg is not None:
attrs.append("39")
if self.bg is not None:
attrs.append("49")
if self.bold or self.underline or self.italic:
attrs.append("00")
return self.escape(attrs)
class Terminal256Formatter(Formatter):
"""
Format tokens with ANSI color sequences, for output in a 256-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
The formatter takes colors from a style defined by the `style` option
and converts them to nearest ANSI 256-color escape sequences. Bold and
underline attributes from the style are preserved (and displayed).
.. versionadded:: 0.9
.. versionchanged:: 2.2
If the used style defines foreground colors in the form ``#ansi*``, then
`Terminal256Formatter` will map these to non extended foreground color.
See :ref:`AnsiTerminalStyle` for more information.
.. versionchanged:: 2.4
The ANSI color names have been updated with names that are easier to
understand and align with colornames of other projects and terminals.
See :ref:`this table <new-ansi-color-names>` for more information.
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""
name = 'Terminal256'
aliases = ['terminal256', 'console256', '256']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.xterm_colors = []
self.best_match = {}
self.style_string = {}
self.usebold = 'nobold' not in options
self.useunderline = 'nounderline' not in options
self.useitalic = 'noitalic' not in options
self._build_color_table() # build an RGB-to-256 color conversion table
self._setup_styles() # convert selected style's colors to term. colors
self.linenos = options.get('linenos', False)
self._lineno = 0
def _build_color_table(self):
# colors 0..15: 16 basic colors
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
# colors 16..232: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(217):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
self.xterm_colors.append((r, g, b))
# colors 233..253: grayscale
for i in range(1, 22):
v = 8 + i * 10
self.xterm_colors.append((v, v, v))
def _closest_color(self, r, g, b):
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 0
for i in range(0, 254):
values = self.xterm_colors[i]
rd = r - values[0]
gd = g - values[1]
bd = b - values[2]
d = rd*rd + gd*gd + bd*bd
if d < distance:
match = i
distance = d
return match
def _color_index(self, color):
index = self.best_match.get(color, None)
if color in ansicolors:
# strip the `ansi/#ansi` part and look up code
index = color
self.best_match[color] = index
if index is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
index = self._closest_color(r, g, b)
self.best_match[color] = index
return index
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
# get foreground from ansicolor if set
if ndef['ansicolor']:
escape.fg = self._color_index(ndef['ansicolor'])
elif ndef['color']:
escape.fg = self._color_index(ndef['color'])
if ndef['bgansicolor']:
escape.bg = self._color_index(ndef['bgansicolor'])
elif ndef['bgcolor']:
escape.bg = self._color_index(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
if self.useitalic and ndef['italic']:
escape.italic = True
self.style_string[str(ttype)] = (escape.color_string(),
escape.reset_string())
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
def format(self, tokensource, outfile):
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._write_lineno(outfile)
for ttype, value in tokensource:
not_found = True
while ttype and not_found:
try:
# outfile.write( "<" + str(ttype) + ">" )
on, off = self.style_string[str(ttype)]
# Like TerminalFormatter, add "reset colors" escape sequence
# on newline.
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(on + line + off)
if self.linenos:
self._write_lineno(outfile)
else:
outfile.write('\n')
if spl[-1]:
outfile.write(on + spl[-1] + off)
not_found = False
# outfile.write( '#' + str(ttype) + '#' )
except KeyError:
# ottype = ttype
ttype = ttype.parent
# outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
if not_found:
outfile.write(value)
if self.linenos:
outfile.write("\n")
class TerminalTrueColorFormatter(Terminal256Formatter):
r"""
Format tokens with ANSI color sequences, for output in a true-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
.. versionadded:: 2.1
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
"""
name = 'TerminalTrueColor'
aliases = ['terminal16m', 'console16m', '16m']
filenames = []
def _build_color_table(self):
pass
def _color_tuple(self, color):
try:
rgb = int(str(color), 16)
except ValueError:
return None
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
return (r, g, b)
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
if ndef['color']:
escape.fg = self._color_tuple(ndef['color'])
if ndef['bgcolor']:
escape.bg = self._color_tuple(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
if self.useitalic and ndef['italic']:
escape.italic = True
self.style_string[str(ttype)] = (escape.true_color_string(),
escape.reset_string())
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/formatters/terminal256.py
|
Python
|
mit
| 11,753 |
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import time
from pip._vendor.pygments.filter import apply_filters, Filter
from pip._vendor.pygments.filters import get_filter_by_name
from pip._vendor.pygments.token import Error, Text, Other, _TokenType
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator, Future, guess_decode
from pip._vendor.pygments.regexopt import regex_opt
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
'default', 'words']
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
(b'\xff\xfe\0\0', 'utf-32'),
(b'\0\0\xfe\xff', 'utf-32be'),
(b'\xff\xfe', 'utf-16'),
(b'\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(mcs, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(mcs, name, bases, d)
class Lexer(metaclass=LexerMeta):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
.. versionadded:: 1.3
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
Latin1 detection. Can also be ``'chardet'`` to use the chardet
library, if it is installed.
``inencoding``
Overrides the ``encoding`` if given.
"""
#: Name of the lexer
name = None
#: URL of the language specification/definition
url = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'guess')
self.encoding = options.get('inencoding') or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, str):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
try:
from pip._vendor import chardet
except ImportError as e:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/') from e
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = text.decode(enc.get('encoding') or 'utf-8',
'replace')
text = decoded
else:
text = text.decode(self.encoding)
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
else:
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for _, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (index, tokentype, value) pairs where "index"
is the starting position of the token within the input text.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
# ------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str): # pylint: disable=invalid-name
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit:
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit() # pylint: disable=invalid-name
class combined(tuple): # pylint: disable=invalid-name
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch:
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer,
_PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This:
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class default:
"""
Indicates a state or state action (e.g. #pop) to apply.
For example default('#pop') is equivalent to ('', Token, '#pop')
Note that state tuples may be used as well.
.. versionadded:: 2.0
"""
def __init__(self, state):
self.state = state
class words(Future):
"""
Indicates a list of literal words that is transformed into an optimized
regex that matches any of the words.
.. versionadded:: 2.0
"""
def __init__(self, words, prefix='', suffix=''):
self.words = words
self.prefix = prefix
self.suffix = suffix
def get(self):
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err)) from err
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in toks.items():
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer, metaclass=RegexLexerMeta):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: At all time there is a stack of states. Initially, the stack contains
#: a single state 'root'. The top of the stack is called "the current state".
#:
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: ``new_state`` can be omitted to signify no state transition.
#: If ``new_state`` is a string, it is pushed on the stack. This ensure
#: the new current state is ``new_state``.
#: If ``new_state`` is a tuple of strings, all of those strings are pushed
#: on the stack and the current state will be the last element of the list.
#: ``new_state`` can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again. Note that if you push while in a combined state, the combined
#: state itself is pushed, and not only the state in which the rule is
#: defined.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the initial stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
yield from action(self, m)
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(statestack) > 1:
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop, but keep at least one state on the stack
# (random code leading to unexpected pops should
# not allow exceptions)
if abs(new_state) >= len(statestack):
del statestack[1:]
else:
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
# We are here only if all state tokens have been considered
# and there was not a match on any of them.
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, '\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext:
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
yield from action(self, m, ctx)
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(ctx.stack) > 1:
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# see RegexLexer for why this check is made
if abs(new_state) >= len(ctx.stack):
del ctx.stack[1:]
else:
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, '\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
yield from tokens
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the position of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
if tmpval:
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
if oldi < len(v):
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
class ProfilingRegexLexerMeta(RegexLexerMeta):
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
def _process_regex(cls, regex, rflags, state):
if isinstance(regex, words):
rex = regex_opt(regex.words, prefix=regex.prefix,
suffix=regex.suffix)
else:
rex = regex
compiled = re.compile(rex, rflags)
def match_func(text, pos, endpos=sys.maxsize):
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
t0 = time.time()
res = compiled.match(text, pos, endpos)
t1 = time.time()
info[0] += 1
info[1] += t1 - t0
return res
return match_func
class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
_prof_sort_index = 4 # defaults to time per call
def get_tokens_unprocessed(self, text, stack=('root',)):
# this needs to be a stack, since using(this) will produce nested calls
self.__class__._prof_data.append({})
yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
rawdata = self.__class__._prof_data.pop()
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
n, 1000 * t, 1000 * t / n)
for ((s, r), (n, t)) in rawdata.items()),
key=lambda x: x[self._prof_sort_index],
reverse=True)
sum_total = sum(x[3] for x in data)
print()
print('Profiling result for %s lexing %d chars in %.3f ms' %
(self.__class__.__name__, len(text), sum_total))
print('=' * 110)
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
print('-' * 110)
for d in data:
print('%-20s %-65s %5d %8.4f %8.4f' % d)
print('=' * 110)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/lexer.py
|
Python
|
mit
| 32,005 |
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
from fnmatch import fnmatch
from os.path import basename
from pip._vendor.pygments.lexers._mapping import LEXERS
from pip._vendor.pygments.modeline import get_filetype_from_buffer
from pip._vendor.pygments.plugin import find_plugin_lexers
from pip._vendor.pygments.util import ClassNotFound, guess_decode
COMPAT = {
'Python3Lexer': 'PythonLexer',
'Python3TracebackLexer': 'PythonTracebackLexer',
}
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
_lexer_cache = {}
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers(plugins=True):
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
If *plugins* is true (the default), plugin lexers supplied by entrypoints
are also returned. Otherwise, only builtin ones are considered.
"""
for item in LEXERS.values():
yield item[1:]
if plugins:
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.values():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def find_lexer_class_by_name(_alias):
"""Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
"""Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(lexername, filename))
lexer_class = custom_namespace[lexername]
# And finally instantiate it with the options
return lexer_class(**options)
except OSError as err:
raise ClassNotFound('cannot read %s: %s' % (filename, err))
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound('error when loading custom lexer: %s' % err)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.values():
for filename in filenames:
if fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch(fn, filename):
matches.append((cls, filename))
if isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus, cls.__name__
return cls.priority + bonus, cls.__name__
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in LEXERS.values():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
yield from find_plugin_lexers()
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if fnmatch(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if fnmatch(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
if not isinstance(_text, str):
inencoding = options.get('inencoding', options.get('encoding'))
if inencoding:
_text = _text.decode(inencoding or 'utf8')
else:
_text, _ = guess_decode(_text)
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
if name in COMPAT:
return getattr(self, COMPAT[name])
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py
|
Python
|
mit
| 11,174 |
# Automatically generated by scripts/gen_mapfiles.py.
# DO NOT EDIT BY HAND; run `make mapfiles` instead.
LEXERS = {
'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')),
'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')),
'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()),
'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)),
'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()),
'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig', '*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()),
'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)),
'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()),
'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')),
'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pip._vendor.pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()),
'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MCFunctionLexer': ('pip._vendor.pygments.lexers.mcfunction', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()),
'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)),
'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)),
'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()),
'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()),
'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()),
'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SNBTLexer': ('pip._vendor.pygments.lexers.mcfunction', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)),
'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()),
'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()),
'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)),
'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()),
'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()),
'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')),
'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
}
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py
|
Python
|
mit
| 70,232 |
"""
pygments.lexers.python
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Python and related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import keyword
from pip._vendor.pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
default, words, combined, do_insertions, this
from pip._vendor.pygments.util import get_bool_opt, shebang_matches
from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other, Error
from pip._vendor.pygments import unistring as uni
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'Python2Lexer', 'Python2TracebackLexer',
'CythonLexer', 'DgLexer', 'NumPyLexer']
line_re = re.compile('.*?\n')
class PythonLexer(RegexLexer):
"""
For Python source code (version 3.x).
.. versionadded:: 0.10
.. versionchanged:: 2.5
This is now the default ``PythonLexer``. It is still available as the
alias ``Python3Lexer``.
"""
name = 'Python'
url = 'http://www.python.org'
aliases = ['python', 'py', 'sage', 'python3', 'py3']
filenames = [
'*.py',
'*.pyw',
# Jython
'*.jy',
# Sage
'*.sage',
# SCons
'*.sc',
'SConstruct',
'SConscript',
# Skylark/Starlark (used by Bazel, Buck, and Pants)
'*.bzl',
'BUCK',
'BUILD',
'BUILD.bazel',
'WORKSPACE',
# Twisted Application infrastructure
'*.tac',
]
mimetypes = ['text/x-python', 'application/x-python',
'text/x-python3', 'application/x-python3']
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting (still valid in Py3)
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsaux%]', String.Interpol),
# the new style '{}'.format(...) string formatting
(r'\{'
r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
r'(\![sra])?' # conversion
r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
r'\}', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%{\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%|(\{{1,2})', ttype)
# newlines are an error (use "nl" state)
]
def fstring_rules(ttype):
return [
# Assuming that a '}' is the closing brace after format specifier.
# Sadly, this means that we won't detect syntax error. But it's
# more important to parse correct syntax correctly, than to
# highlight invalid syntax.
(r'\}', String.Interpol),
(r'\{', String.Interpol, 'expr-inside-fstring'),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"{}\n]+', ttype),
(r'[\'"\\]', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
bygroups(Text, String.Affix, String.Doc)),
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
bygroups(Text, String.Affix, String.Doc)),
(r'\A#!.+$', Comment.Hashbang),
(r'#.*$', Comment.Single),
(r'\\\n', Text),
(r'\\', Text),
include('keywords'),
include('soft-keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('expr'),
],
'expr': [
# raw f-strings
('(?i)(rf|fr)(""")',
bygroups(String.Affix, String.Double),
combined('rfstringescape', 'tdqf')),
("(?i)(rf|fr)(''')",
bygroups(String.Affix, String.Single),
combined('rfstringescape', 'tsqf')),
('(?i)(rf|fr)(")',
bygroups(String.Affix, String.Double),
combined('rfstringescape', 'dqf')),
("(?i)(rf|fr)(')",
bygroups(String.Affix, String.Single),
combined('rfstringescape', 'sqf')),
# non-raw f-strings
('([fF])(""")', bygroups(String.Affix, String.Double),
combined('fstringescape', 'tdqf')),
("([fF])(''')", bygroups(String.Affix, String.Single),
combined('fstringescape', 'tsqf')),
('([fF])(")', bygroups(String.Affix, String.Double),
combined('fstringescape', 'dqf')),
("([fF])(')", bygroups(String.Affix, String.Single),
combined('fstringescape', 'sqf')),
# raw bytes and strings
('(?i)(rb|br|r)(""")',
bygroups(String.Affix, String.Double), 'tdqs'),
("(?i)(rb|br|r)(''')",
bygroups(String.Affix, String.Single), 'tsqs'),
('(?i)(rb|br|r)(")',
bygroups(String.Affix, String.Double), 'dqs'),
("(?i)(rb|br|r)(')",
bygroups(String.Affix, String.Single), 'sqs'),
# non-raw strings
('([uU]?)(""")', bygroups(String.Affix, String.Double),
combined('stringescape', 'tdqs')),
("([uU]?)(''')", bygroups(String.Affix, String.Single),
combined('stringescape', 'tsqs')),
('([uU]?)(")', bygroups(String.Affix, String.Double),
combined('stringescape', 'dqs')),
("([uU]?)(')", bygroups(String.Affix, String.Single),
combined('stringescape', 'sqs')),
# non-raw bytes
('([bB])(""")', bygroups(String.Affix, String.Double),
combined('bytesescape', 'tdqs')),
("([bB])(''')", bygroups(String.Affix, String.Single),
combined('bytesescape', 'tsqs')),
('([bB])(")', bygroups(String.Affix, String.Double),
combined('bytesescape', 'dqs')),
("([bB])(')", bygroups(String.Affix, String.Single),
combined('bytesescape', 'sqs')),
(r'[^\S\n]+', Text),
include('numbers'),
(r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
(r'[]{}:(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
include('expr-keywords'),
include('builtins'),
include('magicfuncs'),
include('magicvars'),
include('name'),
],
'expr-inside-fstring': [
(r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
# without format specifier
(r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
r'(\![sraf])?' # conversion
r'\}', String.Interpol, '#pop'),
# with format specifier
# we'll catch the remaining '}' in the outer scope
(r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
r'(\![sraf])?' # conversion
r':', String.Interpol, '#pop'),
(r'\s+', Text), # allow new lines
include('expr'),
],
'expr-inside-fstring-inner': [
(r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
(r'[])}]', Punctuation, '#pop'),
(r'\s+', Text), # allow new lines
include('expr'),
],
'expr-keywords': [
# Based on https://docs.python.org/3/reference/expressions.html
(words((
'async for', 'await', 'else', 'for', 'if', 'lambda',
'yield', 'yield from'), suffix=r'\b'),
Keyword),
(words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
],
'keywords': [
(words((
'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with'), suffix=r'\b'),
Keyword),
(words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
],
'soft-keywords': [
# `match`, `case` and `_` soft keywords
(r'(^[ \t]*)' # at beginning of line + possible indentation
r'(match|case)\b' # a possible keyword
r'(?![ \t]*(?:' # not followed by...
r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
r'|'.join(keyword.kwlist) + r')\b))', # pattern matching
bygroups(Text, Keyword), 'soft-keywords-inner'),
],
'soft-keywords-inner': [
# optional `_` keyword
(r'(\s+)([^\n_]*)(_\b)', bygroups(Text, using(this), Keyword)),
default('#pop')
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray',
'breakpoint', 'bytes', 'chr', 'classmethod', 'compile', 'complex',
'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter',
'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr',
'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass',
'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview',
'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print',
'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple',
'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
'NotImplementedError', 'OSError', 'OverflowError',
'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning',
'RuntimeError', 'RuntimeWarning', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError',
'Warning', 'WindowsError', 'ZeroDivisionError',
# new builtin exceptions from PEP 3151
'BlockingIOError', 'ChildProcessError', 'ConnectionError',
'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
'PermissionError', 'ProcessLookupError', 'TimeoutError',
# others new in Python 3
'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError',
'EncodingWarning'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'magicfuncs': [
(words((
'__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__',
'__and__', '__anext__', '__await__', '__bool__', '__bytes__',
'__call__', '__complex__', '__contains__', '__del__', '__delattr__',
'__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__',
'__eq__', '__exit__', '__float__', '__floordiv__', '__format__',
'__ge__', '__get__', '__getattr__', '__getattribute__',
'__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__',
'__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__',
'__imul__', '__index__', '__init__', '__instancecheck__',
'__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__',
'__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__',
'__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
'__new__', '__next__', '__or__', '__pos__', '__pow__',
'__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__',
'__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__',
'__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__',
'__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
'__rxor__', '__set__', '__setattr__', '__setitem__', '__str__',
'__sub__', '__subclasscheck__', '__truediv__',
'__xor__'), suffix=r'\b'),
Name.Function.Magic),
],
'magicvars': [
(words((
'__annotations__', '__bases__', '__class__', '__closure__',
'__code__', '__defaults__', '__dict__', '__doc__', '__file__',
'__func__', '__globals__', '__kwdefaults__', '__module__',
'__mro__', '__name__', '__objclass__', '__qualname__',
'__self__', '__slots__', '__weakref__'), suffix=r'\b'),
Name.Variable.Magic),
],
'numbers': [
(r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)'
r'([eE][+-]?\d(?:_?\d)*)?', Number.Float),
(r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float),
(r'0[oO](?:_?[0-7])+', Number.Oct),
(r'0[bB](?:_?[01])+', Number.Bin),
(r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex),
(r'\d(?:_?\d)*', Number.Integer),
],
'name': [
(r'@' + uni_name, Name.Decorator),
(r'@', Operator), # new matrix multiplication operator
(uni_name, Name),
],
'funcname': [
include('magicfuncs'),
(uni_name, Name.Function, '#pop'),
default('#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
(r'\.', Name.Namespace),
# if None occurs here, it's "raise x from None", since None can
# never be a module name
(r'None\b', Name.Builtin.Pseudo, '#pop'),
(uni_name, Name.Namespace),
default('#pop'),
],
'rfstringescape': [
(r'\{\{', String.Escape),
(r'\}\}', String.Escape),
],
'fstringescape': [
include('rfstringescape'),
include('stringescape'),
],
'bytesescape': [
(r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'stringescape': [
(r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape),
include('bytesescape')
],
'fstrings-single': fstring_rules(String.Single),
'fstrings-double': fstring_rules(String.Double),
'strings-single': innerstring_rules(String.Single),
'strings-double': innerstring_rules(String.Double),
'dqf': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('fstrings-double')
],
'sqf': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('fstrings-single')
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings-double')
],
'sqs': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings-single')
],
'tdqf': [
(r'"""', String.Double, '#pop'),
include('fstrings-double'),
(r'\n', String.Double)
],
'tsqf': [
(r"'''", String.Single, '#pop'),
include('fstrings-single'),
(r'\n', String.Single)
],
'tdqs': [
(r'"""', String.Double, '#pop'),
include('strings-double'),
(r'\n', String.Double)
],
'tsqs': [
(r"'''", String.Single, '#pop'),
include('strings-single'),
(r'\n', String.Single)
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \
'import ' in text[:1000]
Python3Lexer = PythonLexer
class Python2Lexer(RegexLexer):
"""
For Python 2.x source code.
.. versionchanged:: 2.5
This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
refers to the Python 3 variant. File name patterns like ``*.py`` have
been moved to Python 3 as well.
"""
name = 'Python 2.x'
url = 'http://www.python.org'
aliases = ['python2', 'py2']
filenames = [] # now taken over by PythonLexer (3.x)
mimetypes = ['text/x-python2', 'application/x-python2']
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
bygroups(Text, String.Affix, String.Doc)),
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
bygroups(Text, String.Affix, String.Doc)),
(r'[^\S\n]+', Text),
(r'\A#!.+$', Comment.Hashbang),
(r'#.*$', Comment.Single),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('builtins'),
include('magicfuncs'),
include('magicvars'),
include('backtick'),
('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
bygroups(String.Affix, String.Double), 'tdqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
bygroups(String.Affix, String.Single), 'tsqs'),
('([rR]|[uUbB][rR]|[rR][uUbB])(")',
bygroups(String.Affix, String.Double), 'dqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(')",
bygroups(String.Affix, String.Single), 'sqs'),
('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
combined('stringescape', 'tdqs')),
("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
combined('stringescape', 'tsqs')),
('([uUbB]?)(")', bygroups(String.Affix, String.Double),
combined('stringescape', 'dqs')),
("([uUbB]?)(')", bygroups(String.Affix, String.Single),
combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
'print', 'raise', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with'), suffix=r'\b'),
Keyword),
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
'unichr', 'unicode', 'vars', 'xrange', 'zip'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
'MemoryError', 'NameError',
'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
'PendingDeprecationWarning', 'ReferenceError',
'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'magicfuncs': [
(words((
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
'__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
'__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
'__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
'__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
'__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
'__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
'__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
'__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
'__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
'__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
'__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
'__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
'__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
'__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
'__unicode__', '__xor__'), suffix=r'\b'),
Name.Function.Magic),
],
'magicvars': [
(words((
'__bases__', '__class__', '__closure__', '__code__', '__defaults__',
'__dict__', '__doc__', '__file__', '__func__', '__globals__',
'__metaclass__', '__module__', '__mro__', '__name__', '__self__',
'__slots__', '__weakref__'),
suffix=r'\b'),
Name.Variable.Magic),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[\w.]+', Name.Decorator),
(r'[a-zA-Z_]\w*', Name),
],
'funcname': [
include('magicfuncs'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
default('#pop'),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(?:[ \t]|\\\n)+', Text),
(r'as\b', Keyword.Namespace),
(r',', Operator),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
default('#pop') # all else: go back
],
'fromimport': [
(r'(?:[ \t]|\\\n)+', Text),
(r'import\b', Keyword.Namespace, '#pop'),
# if None occurs here, it's "raise x from None", since None can
# never be a module name
(r'None\b', Name.Builtin.Pseudo, '#pop'),
# sadly, in "raise x from y" y will be highlighted as namespace too
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# anything else here also means "raise x from y" and is therefore
# not an error
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings-single': innerstring_rules(String.Single),
'strings-double': innerstring_rules(String.Double),
'dqs': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings-double')
],
'sqs': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings-single')
],
'tdqs': [
(r'"""', String.Double, '#pop'),
include('strings-double'),
(r'\n', String.Double)
],
'tsqs': [
(r"'''", String.Single, '#pop'),
include('strings-single'),
(r'\n', String.Single)
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?2(\.\d)?')
class PythonConsoleLexer(Lexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print a
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``True``.
.. versionadded:: 1.0
.. versionchanged:: 2.5
Now defaults to ``True``.
"""
name = 'Python console session'
aliases = ['pycon']
mimetypes = ['text/x-python-doctest']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', True)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
if self.python3:
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
else:
pylexer = Python2Lexer(**self.options)
tblexer = Python2TracebackLexer(**self.options)
curcode = ''
insertions = []
curtb = ''
tbindex = 0
tb = 0
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>> ') or line.startswith('... '):
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:4])]))
curcode += line[4:]
elif line.rstrip() == '...' and not tb:
# only a new >>> prompt can end an exception block
# otherwise an ellipsis in place of the traceback frames
# will be mishandled
insertions.append((len(curcode),
[(0, Generic.Prompt, '...')]))
curcode += line[3:]
else:
if curcode:
yield from do_insertions(
insertions, pylexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if (line.startswith('Traceback (most recent call last):') or
re.match(' File "[^"]+", line \\d+\\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
elif line == 'KeyboardInterrupt\n':
yield match.start(), Name.Class, line
elif tb:
curtb += line
if not (line.startswith(' ') or line.strip() == '...'):
tb = 0
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
curtb = ''
else:
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode))
if curtb:
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
class PythonTracebackLexer(RegexLexer):
"""
For Python 3.x tracebacks, with support for chained exceptions.
.. versionadded:: 1.0
.. versionchanged:: 2.5
This is now the default ``PythonTracebackLexer``. It is still available
as the alias ``Python3TracebackLexer``.
"""
name = 'Python Traceback'
aliases = ['pytb', 'py3tb']
filenames = ['*.pytb', '*.py3tb']
mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
tokens = {
'root': [
(r'\n', Text),
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
(r'^During handling of the above exception, another '
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
(r'^.*\n', Other),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(PythonLexer), Text), 'markers'),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^([^:]+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_][\w.]*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
'markers': [
# Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>`
# error locations in Python 3.11+, or single-caret markers
# for syntax errors before that.
(r'^( {4,})([~^]+)(\n)',
bygroups(Text, Punctuation.Marker, Text),
'#pop'),
default('#pop'),
],
}
Python3TracebackLexer = PythonTracebackLexer
class Python2TracebackLexer(RegexLexer):
"""
For Python tracebacks.
.. versionadded:: 0.7
.. versionchanged:: 2.5
This class has been renamed from ``PythonTracebackLexer``.
``PythonTracebackLexer`` now refers to the Python 3 variant.
"""
name = 'Python 2.x Traceback'
aliases = ['py2tb']
filenames = ['*.py2tb']
mimetypes = ['text/x-python2-traceback']
tokens = {
'root': [
# Cover both (most recent call last) and (innermost last)
# The optional ^C allows us to catch keyboard interrupt signals.
(r'^(\^C)?(Traceback.*\n)',
bygroups(Text, Generic.Traceback), 'intb'),
# SyntaxError starts with this.
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
(r'^.*\n', Other),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python2Lexer), Text), 'marker'),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^([^:]+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_]\w*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
'marker': [
# For syntax errors.
(r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
default('#pop'),
],
}
class CythonLexer(RegexLexer):
"""
For Pyrex and Cython source code.
.. versionadded:: 1.1
"""
name = 'Cython'
url = 'http://cython.org'
aliases = ['cython', 'pyx', 'pyrex']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'(<)([a-zA-Z0-9.?]+)(>)',
bygroups(Punctuation, Keyword.Type, Punctuation)),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
Name, Punctuation)),
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
# (should actually start a block with only cdefs)
(r'(cdef)(:)', bygroups(Keyword, Punctuation)),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
Keyword),
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t',
'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned',
'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError',
'OSError', 'OverflowError', 'OverflowWarning',
'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError',
'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError',
'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@\w+', Name.Decorator),
(r'[a-zA-Z_]\w*', Name),
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'cdef': [
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
(r'(struct|enum|union|class)\b', Keyword),
(r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
bygroups(Name.Function, Text), '#pop'),
(r'([a-zA-Z_]\w*)(\s*)(,)',
bygroups(Name.Function, Text, Punctuation)),
(r'from\b', Keyword, '#pop'),
(r'as\b', Keyword),
(r':', Punctuation, '#pop'),
(r'(?=["\'])', Text, '#pop'),
(r'[a-zA-Z_]\w*', Keyword.Type),
(r'.', Text),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class DgLexer(RegexLexer):
"""
Lexer for dg,
a functional and object-oriented programming language
running on the CPython 3 VM.
.. versionadded:: 1.6
"""
name = 'dg'
aliases = ['dg']
filenames = ['*.dg']
mimetypes = ['text/x-dg']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment.Single),
(r'(?i)0b[01]+', Number.Bin),
(r'(?i)0o[0-7]+', Number.Oct),
(r'(?i)0x[0-9a-f]+', Number.Hex),
(r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
(r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
(r'(?i)[+-]?[0-9]+j?', Number.Integer),
(r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
(r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
(r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
(r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
(r"`\w+'*`", Operator),
(r'\b(and|in|is|or|where)\b', Operator.Word),
(r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
(words((
'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str',
'super', 'tuple', 'tuple\'', 'type'),
prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst',
'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init',
'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len',
'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow',
'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd',
'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'),
prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
Name.Builtin.Pseudo),
(r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
Name.Exception),
(r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
r"SystemExit)(?!['\w])", Name.Exception),
(r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|"
r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
(r"[A-Z_]+'*(?!['\w])", Name),
(r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
(r"\w+'*", Name),
(r'[()]', Punctuation),
(r'.', Error),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'string': [
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String),
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop')
],
'sqs': [
(r"'", String, '#pop')
],
'tdqs': [
(r'"""', String, '#pop')
],
'tsqs': [
(r"'''", String, '#pop')
],
}
class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
.. versionadded:: 0.10
"""
name = 'NumPy'
url = 'https://numpy.org/'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = {
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
def analyse_text(text):
ltext = text[:1000]
return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or
'import ' in ltext) \
and ('import numpy' in ltext or 'from numpy import' in ltext)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/lexers/python.py
|
Python
|
mit
| 53,376 |
"""
pygments.modeline
~~~~~~~~~~~~~~~~~
A simple modeline parser (based on pymodeline).
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
__all__ = ['get_filetype_from_buffer']
modeline_re = re.compile(r'''
(?: vi | vim | ex ) (?: [<=>]? \d* )? :
.* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
''', re.VERBOSE)
def get_filetype_from_line(l):
m = modeline_re.search(l)
if m:
return m.group(1)
def get_filetype_from_buffer(buf, max_lines=5):
"""
Scan the buffer for modelines and return filetype if one is found.
"""
lines = buf.splitlines()
for l in lines[-1:-max_lines-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
for i in range(max_lines, -1, -1):
if i < len(lines):
ret = get_filetype_from_line(lines[i])
if ret:
return ret
return None
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/modeline.py
|
Python
|
mit
| 986 |
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments plugin interface. By default, this tries to use
``importlib.metadata``, which is in the Python standard
library since Python 3.8, or its ``importlib_metadata``
backport for earlier versions of Python. It falls back on
``pkg_resources`` if not found. Finally, if ``pkg_resources``
is not found either, no plugins are loaded at all.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def iter_entry_points(group_name):
try:
from importlib.metadata import entry_points
except ImportError:
try:
from importlib_metadata import entry_points
except ImportError:
try:
from pip._vendor.pkg_resources import iter_entry_points
except (ImportError, OSError):
return []
else:
return iter_entry_points(group_name)
groups = entry_points()
if hasattr(groups, 'select'):
# New interface in Python 3.10 and newer versions of the
# importlib_metadata backport.
return groups.select(group=group_name)
else:
# Older interface, deprecated in Python 3.10 and recent
# importlib_metadata, but we need it in Python 3.8 and 3.9.
return groups.get(group_name, [])
def find_plugin_lexers():
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/plugin.py
|
Python
|
mit
| 2,591 |
"""
pygments.regexopt
~~~~~~~~~~~~~~~~~
An algorithm that generates optimized regexes for matching long lists of
literal strings.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from re import escape
from os.path import commonprefix
from itertools import groupby
from operator import itemgetter
CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
FIRST_ELEMENT = itemgetter(0)
def make_charset(letters):
return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
def regex_opt_inner(strings, open_paren):
"""Return a regex that matches any string in the sorted list of strings."""
close_paren = open_paren and ')' or ''
# print strings, repr(open_paren)
if not strings:
# print '-> nothing left'
return ''
first = strings[0]
if len(strings) == 1:
# print '-> only 1 string'
return open_paren + escape(first) + close_paren
if not first:
# print '-> first string empty'
return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ '?' + close_paren
if len(first) == 1:
# multiple one-char strings? make a charset
oneletter = []
rest = []
for s in strings:
if len(s) == 1:
oneletter.append(s)
else:
rest.append(s)
if len(oneletter) > 1: # do we have more than one oneletter string?
if rest:
# print '-> 1-character + rest'
return open_paren + regex_opt_inner(rest, '') + '|' \
+ make_charset(oneletter) + close_paren
# print '-> only 1-character'
return open_paren + make_charset(oneletter) + close_paren
prefix = commonprefix(strings)
if prefix:
plen = len(prefix)
# we have a prefix for all strings
# print '-> prefix:', prefix
return open_paren + escape(prefix) \
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ close_paren
# is there a suffix?
strings_rev = [s[::-1] for s in strings]
suffix = commonprefix(strings_rev)
if suffix:
slen = len(suffix)
# print '-> suffix:', suffix[::-1]
return open_paren \
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ escape(suffix[::-1]) + close_paren
# recurse on common 1-string prefixes
# print '-> last resort'
return open_paren + \
'|'.join(regex_opt_inner(list(group[1]), '')
for group in groupby(strings, lambda s: s[0] == first[0])) \
+ close_paren
def regex_opt(strings, prefix='', suffix=''):
"""Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
"""
strings = sorted(strings)
return prefix + regex_opt_inner(strings, '(') + suffix
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/regexopt.py
|
Python
|
mit
| 3,072 |
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner:
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos.
"""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolean that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/scanner.py
|
Python
|
mit
| 3,092 |
"""
pygments.sphinxext
~~~~~~~~~~~~~~~~~~
Sphinx extension to generate automatic documentation of lexers,
formatters and filters.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import Directive
from sphinx.util.nodes import nested_parse_with_titles
MODULEDOC = '''
.. module:: %s
%s
%s
'''
LEXERDOC = '''
.. class:: %s
:Short names: %s
:Filenames: %s
:MIME types: %s
%s
'''
FMTERDOC = '''
.. class:: %s
:Short names: %s
:Filenames: %s
%s
'''
FILTERDOC = '''
.. class:: %s
:Name: %s
%s
'''
class PygmentsDoc(Directive):
"""
A directive to collect all lexers/formatters/filters and generate
autoclass directives for them.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
self.filenames = set()
if self.arguments[0] == 'lexers':
out = self.document_lexers()
elif self.arguments[0] == 'formatters':
out = self.document_formatters()
elif self.arguments[0] == 'filters':
out = self.document_filters()
else:
raise Exception('invalid argument for "pygmentsdoc" directive')
node = nodes.compound()
vl = ViewList(out.split('\n'), source='')
nested_parse_with_titles(self.state, vl, node)
for fn in self.filenames:
self.state.document.settings.record_dependencies.add(fn)
return node.children
def document_lexers(self):
from pip._vendor.pygments.lexers._mapping import LEXERS
out = []
modules = {}
moduledocstrings = {}
for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
if not cls.__doc__:
print("Warning: %s does not have a docstring." % classname)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
modules.setdefault(module, []).append((
classname,
', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
', '.join(data[4]) or 'None',
docstring))
if module not in moduledocstrings:
moddoc = mod.__doc__
if isinstance(moddoc, bytes):
moddoc = moddoc.decode('utf8')
moduledocstrings[module] = moddoc
for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
if moduledocstrings[module] is None:
raise Exception("Missing docstring for %s" % (module,))
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
out.append(MODULEDOC % (module, heading, '-'*len(heading)))
for data in lexers:
out.append(LEXERDOC % data)
return ''.join(out)
def document_formatters(self):
from pip._vendor.pygments.formatters import FORMATTERS
out = []
for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
heading = cls.__name__
out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*') or 'None',
docstring))
return ''.join(out)
def document_filters(self):
from pip._vendor.pygments.filters import FILTERS
out = []
for name, cls in FILTERS.items():
self.filenames.add(sys.modules[cls.__module__].__file__)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
out.append(FILTERDOC % (cls.__name__, name, docstring))
return ''.join(out)
def setup(app):
app.add_directive('pygmentsdoc', PygmentsDoc)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/sphinxext.py
|
Python
|
mit
| 4,630 |
"""
pygments.style
~~~~~~~~~~~~~~
Basic style object.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.token import Token, STANDARD_TYPES
# Default mapping of ansixxx to RGB colors.
_ansimap = {
# dark
'ansiblack': '000000',
'ansired': '7f0000',
'ansigreen': '007f00',
'ansiyellow': '7f7fe0',
'ansiblue': '00007f',
'ansimagenta': '7f007f',
'ansicyan': '007f7f',
'ansigray': 'e5e5e5',
# normal
'ansibrightblack': '555555',
'ansibrightred': 'ff0000',
'ansibrightgreen': '00ff00',
'ansibrightyellow': 'ffff00',
'ansibrightblue': '0000ff',
'ansibrightmagenta': 'ff00ff',
'ansibrightcyan': '00ffff',
'ansiwhite': 'ffffff',
}
# mapping of deprecated #ansixxx colors to new color names
_deprecated_ansicolors = {
# dark
'#ansiblack': 'ansiblack',
'#ansidarkred': 'ansired',
'#ansidarkgreen': 'ansigreen',
'#ansibrown': 'ansiyellow',
'#ansidarkblue': 'ansiblue',
'#ansipurple': 'ansimagenta',
'#ansiteal': 'ansicyan',
'#ansilightgray': 'ansigray',
# normal
'#ansidarkgray': 'ansibrightblack',
'#ansired': 'ansibrightred',
'#ansigreen': 'ansibrightgreen',
'#ansiyellow': 'ansibrightyellow',
'#ansiblue': 'ansibrightblue',
'#ansifuchsia': 'ansibrightmagenta',
'#ansiturquoise': 'ansibrightcyan',
'#ansiwhite': 'ansiwhite',
}
ansicolors = set(_ansimap)
class StyleMeta(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text in ansicolors:
return text
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0] * 2 + col[1] * 2 + col[2] * 2
elif text == '':
return ''
elif text.startswith('var') or text.startswith('calc'):
return text
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
ansicolor = bgansicolor = None
color = t[0]
if color in _deprecated_ansicolors:
color = _deprecated_ansicolors[color]
if color in ansicolors:
ansicolor = color
color = _ansimap[color]
bgcolor = t[4]
if bgcolor in _deprecated_ansicolors:
bgcolor = _deprecated_ansicolors[bgcolor]
if bgcolor in ansicolors:
bgansicolor = bgcolor
bgcolor = _ansimap[bgcolor]
return {
'color': color or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': bgcolor or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
'ansicolor': ansicolor,
'bgansicolor': bgansicolor,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
class Style(metaclass=StyleMeta):
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
#: highlight background color
highlight_color = '#ffffcc'
#: line number font color
line_number_color = 'inherit'
#: line number background color
line_number_background_color = 'transparent'
#: special line number font color
line_number_special_color = '#000000'
#: special line number background color
line_number_special_background_color = '#ffffc0'
#: Style definitions for individual token types.
styles = {}
# Attribute for lexers defined within Pygments. If set
# to True, the style is not shown in the style gallery
# on the website. This is intended for language-specific
# styles.
web_style_gallery_exclude = False
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/style.py
|
Python
|
mit
| 6,257 |
"""
pygments.styles
~~~~~~~~~~~~~~~
Contains built-in styles.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pip._vendor.pygments.plugin import find_plugin_styles
from pip._vendor.pygments.util import ClassNotFound
#: Maps style names to 'submodule::classname'.
STYLE_MAP = {
'default': 'default::DefaultStyle',
'emacs': 'emacs::EmacsStyle',
'friendly': 'friendly::FriendlyStyle',
'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle',
'colorful': 'colorful::ColorfulStyle',
'autumn': 'autumn::AutumnStyle',
'murphy': 'murphy::MurphyStyle',
'manni': 'manni::ManniStyle',
'material': 'material::MaterialStyle',
'monokai': 'monokai::MonokaiStyle',
'perldoc': 'perldoc::PerldocStyle',
'pastie': 'pastie::PastieStyle',
'borland': 'borland::BorlandStyle',
'trac': 'trac::TracStyle',
'native': 'native::NativeStyle',
'fruity': 'fruity::FruityStyle',
'bw': 'bw::BlackWhiteStyle',
'vim': 'vim::VimStyle',
'vs': 'vs::VisualStudioStyle',
'tango': 'tango::TangoStyle',
'rrt': 'rrt::RrtStyle',
'xcode': 'xcode::XcodeStyle',
'igor': 'igor::IgorStyle',
'paraiso-light': 'paraiso_light::ParaisoLightStyle',
'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle',
'lovelace': 'lovelace::LovelaceStyle',
'algol': 'algol::AlgolStyle',
'algol_nu': 'algol_nu::Algol_NuStyle',
'arduino': 'arduino::ArduinoStyle',
'rainbow_dash': 'rainbow_dash::RainbowDashStyle',
'abap': 'abap::AbapStyle',
'solarized-dark': 'solarized::SolarizedDarkStyle',
'solarized-light': 'solarized::SolarizedLightStyle',
'sas': 'sas::SasStyle',
'staroffice' : 'staroffice::StarofficeStyle',
'stata': 'stata_light::StataLightStyle',
'stata-light': 'stata_light::StataLightStyle',
'stata-dark': 'stata_dark::StataDarkStyle',
'inkpot': 'inkpot::InkPotStyle',
'zenburn': 'zenburn::ZenburnStyle',
'gruvbox-dark': 'gruvbox::GruvboxDarkStyle',
'gruvbox-light': 'gruvbox::GruvboxLightStyle',
'dracula': 'dracula::DraculaStyle',
'one-dark': 'onedark::OneDarkStyle',
'lilypond' : 'lilypond::LilyPondStyle',
'nord': 'nord::NordStyle',
'nord-darker': 'nord::NordDarkerStyle',
'github-dark': 'gh_dark::GhDarkStyle'
}
def get_style_by_name(name):
if name in STYLE_MAP:
mod, cls = STYLE_MAP[name].split('::')
builtin = "yes"
else:
for found_name, style in find_plugin_styles():
if name == found_name:
return style
# perhaps it got dropped into our styles package
builtin = ""
mod = name
cls = name.title() + "Style"
try:
mod = __import__('pygments.styles.' + mod, None, None, [cls])
except ImportError:
raise ClassNotFound("Could not find style module %r" % mod +
(builtin and ", though it should be builtin") + ".")
try:
return getattr(mod, cls)
except AttributeError:
raise ClassNotFound("Could not find style class %r in style module." % cls)
def get_all_styles():
"""Return a generator for all styles by name,
both builtin and plugin."""
yield from STYLE_MAP
for name, _ in find_plugin_styles():
yield name
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/styles/__init__.py
|
Python
|
mit
| 3,419 |
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
def __copy__(self):
# These instances are supposed to be singletons
return self
def __deepcopy__(self, memo):
# These instances are supposed to be singletons
return self
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct children of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Escape: 'esc',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Function.Magic: 'fm',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Name.Variable.Magic: 'vm',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Affix: 'sa',
String.Backtick: 'sb',
String.Char: 'sc',
String.Delimiter: 'dl',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Bin: 'mb',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Punctuation.Marker: 'pm',
Comment: 'c',
Comment.Hashbang: 'ch',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.PreprocFile: 'cpf',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/token.py
|
Python
|
mit
| 6,184 |
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from io import TextIOWrapper
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*
(?: \s+ # optional in HTML5
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")?
)
[^>]*>
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
re.IGNORECASE | re.DOTALL | re.MULTILINE)
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
class ClassNotFound(ValueError):
"""Raised if one of the lookup functions didn't find a matching class."""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, str):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, str):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.search(text)
if m is None:
return False
doctype = m.group(1)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.search(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
def surrogatepair(c):
"""Given a unicode character code with length greater than 16 bits,
return the two 16 bit surrogate pair.
"""
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
class Future:
"""Generic class to defer some work.
Handled specially in RegexLexerMeta, to support regex string construction at
first use.
"""
def get(self):
raise NotImplementedError
def guess_decode(text):
"""Decode *text* with guessed encoding.
First try UTF-8; this should fail for non-UTF-8 encodings.
Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
try:
text = text.decode('utf-8')
return text, 'utf-8'
except UnicodeDecodeError:
try:
import locale
prefencoding = locale.getpreferredencoding()
text = text.decode()
return text, prefencoding
except (UnicodeDecodeError, LookupError):
text = text.decode('latin1')
return text, 'latin1'
def guess_decode_from_terminal(text, term):
"""Decode *text* coming from terminal *term*.
First try the terminal encoding, if given.
Then try UTF-8. Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
if getattr(term, 'encoding', None):
try:
text = text.decode(term.encoding)
except UnicodeDecodeError:
pass
else:
return text, term.encoding
return guess_decode(text)
def terminal_encoding(term):
"""Return our best guess of encoding for the given *term*."""
if getattr(term, 'encoding', None):
return term.encoding
import locale
return locale.getpreferredencoding()
class UnclosingTextIOWrapper(TextIOWrapper):
# Don't close underlying buffer on destruction.
def close(self):
self.flush()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pygments/util.py
|
Python
|
mit
| 9,110 |
# module pyparsing.py
#
# Copyright (c) 2003-2022 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pip._vendor.pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'<And>`,
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from typing import NamedTuple
class version_info(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@property
def __version__(self):
return (
"{}.{}.{}".format(self.major, self.minor, self.micro)
+ (
"{}{}{}".format(
"r" if self.releaselevel[0] == "c" else "",
self.releaselevel[0],
self.serial,
),
"",
)[self.releaselevel == "final"]
)
def __str__(self):
return "{} {} / {}".format(__name__, self.__version__, __version_time__)
def __repr__(self):
return "{}.{}({})".format(
__name__,
type(self).__name__,
", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
)
__version_info__ = version_info(3, 0, 9, "final", 0)
__version_time__ = "05 May 2022 07:02 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# define backward compat synonyms
if "pyparsing_unicode" not in globals():
pyparsing_unicode = unicode
if "pyparsing_common" not in globals():
pyparsing_common = common
if "pyparsing_test" not in globals():
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"AtLineStart",
"AtStringStart",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"PositionToken",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"c_style_comment",
"col",
"common_html_entity",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"printables",
"punc8bit",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted_string",
"srange",
"string_end",
"string_start",
"trace_parse_action",
"unicode_string",
"with_attribute",
"indentedBlock",
"original_text_for",
"ungroup",
"infix_notation",
"locatedExpr",
"with_class",
"CloseMatch",
"token_map",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"condition_as_parse_action",
"pyparsing_test",
# pre-PEP8 compatibility names
"__versionTime__",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"htmlComment",
"javaStyleComment",
"lineEnd",
"lineStart",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"oneOf",
"opAssoc",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"withAttribute",
"indentedBlock",
"originalTextFor",
"infixNotation",
"locatedExpr",
"withClass",
"tokenMap",
"conditionAsParseAction",
"autoname_elements",
]
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/__init__.py
|
Python
|
mit
| 9,171 |
# actions.py
from .exceptions import ParseException
from .util import col
class OnlyOnce:
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, method_call):
from .core import _trim_arity
self.callable = _trim_arity(method_call)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
def reset(self):
"""
Allow the associated parse action to be called once more.
"""
self.called = False
def match_only_at_col(n):
"""
Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verify_col(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column {}".format(n))
return verify_col
def replace_with(repl_str):
"""
Helper method for common parse actions that simply return
a literal value. Especially useful when used with
:class:`transform_string<ParserElement.transform_string>` ().
Example::
num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
term = na | num
term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [repl_str]
def remove_quotes(s, l, t):
"""
Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use remove_quotes to strip quotation marks from parsed results
quoted_string.set_parse_action(remove_quotes)
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def with_attribute(*args, **attr_dict):
"""
Helper to create a validating parse action to be used with start
tags created with :class:`make_xml_tags` or
:class:`make_html_tags`. Use ``with_attribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``with_attribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`with_class`.
To verify that the attribute exists, but without specifying a value,
pass ``with_attribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = make_html_tags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().set_parse_action(with_attribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.search_string(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.search_string(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attr_dict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(
s,
l,
"attribute {!r} has value {!r}, must be {!r}".format(
attrName, tokens[attrName], attrValue
),
)
return pa
with_attribute.ANY_VALUE = object()
def with_class(classname, namespace=""):
"""
Simplified version of :class:`with_attribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = make_html_tags("div")
div_grid = div().set_parse_action(with_class("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.search_string(html):
print(grid_header.body)
div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.search_string(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "{}:class".format(namespace) if namespace else "class"
return with_attribute(**{classattr: classname})
# pre-PEP8 compatibility symbols
replaceWith = replace_with
removeQuotes = remove_quotes
withAttribute = with_attribute
withClass = with_class
matchOnlyAtCol = match_only_at_col
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/actions.py
|
Python
|
mit
| 6,426 |
# common.py
from .core import *
from .helpers import delimited_list, any_open_tag, any_close_tag
from datetime import datetime
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""Here are some common low-level expressions that may be useful in
jump-starting parser development:
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
:class:`scientific notation<sci_real>`)
- common :class:`programming identifiers<identifier>`
- network addresses (:class:`MAC<mac_address>`,
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- ISO8601 :class:`dates<iso8601_date>` and
:class:`datetime<iso8601_datetime>`
- :class:`UUID<uuid>`
- :class:`comma-separated list<comma_separated_list>`
- :class:`url`
Parse actions:
- :class:`convertToInteger`
- :class:`convertToFloat`
- :class:`convertToDate`
- :class:`convertToDatetime`
- :class:`stripHTMLTags`
- :class:`upcaseTokens`
- :class:`downcaseTokens`
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convert_to_integer = token_map(int)
"""
Parse action for converting parsed integers to Python int
"""
convert_to_float = token_map(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = (
Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
)
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = (
Regex(r"[+-]?\d+")
.set_name("signed integer")
.set_parse_action(convert_to_integer)
)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (
signed_integer().set_parse_action(convert_to_float)
+ "/"
+ signed_integer().set_parse_action(convert_to_float)
).set_name("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
mixed_integer = (
fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
).set_name("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.add_parse_action(sum)
real = (
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
.set_name("real number")
.set_parse_action(convert_to_float)
)
"""expression that parses a floating point number and returns a float"""
sci_real = (
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
.set_name("real number with scientific notation")
.set_parse_action(convert_to_float)
)
"""expression that parses a floating point number with optional
scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).setName("number").streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = (
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
.set_name("fnumber")
.set_parse_action(convert_to_float)
)
"""any int or real number, returned as float"""
identifier = Word(identchars, identbodychars).set_name("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
).set_name("IPv4 address")
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
"full IPv6 address"
)
_short_ipv6_address = (
Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ "::"
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
).set_name("short IPv6 address")
_short_ipv6_address.add_condition(
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
ipv6_address = Combine(
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
"IPv6 address"
)
).set_name("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
).set_name("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convert_to_date(fmt: str = "%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(ss, ll, tt):
try:
return datetime.strptime(tt[0], fmt).date()
except ValueError as ve:
raise ParseException(ss, ll, str(ve))
return cvt_fn
@staticmethod
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
"""Helper to create a parse action for converting parsed
datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
).set_name("ISO8601 date")
"ISO8601 date (``yyyy-mm-dd``)"
iso8601_datetime = Regex(
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
).set_name("ISO8601 datetime")
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
_html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
@staticmethod
def strip_html_tags(s: str, l: int, tokens: ParseResults):
"""Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
td, td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body)
Prints::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transform_string(tokens[0])
_commasepitem = (
Combine(
OneOrMore(
~Literal(",")
+ ~LineEnd()
+ Word(printables, exclude_chars=",")
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
)
)
.streamline()
.set_name("commaItem")
)
comma_separated_list = delimited_list(
Opt(quoted_string.copy() | _commasepitem, default="")
).set_name("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
"""Parse action to convert tokens to upper case."""
downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
"""Parse action to convert tokens to lower case."""
# fmt: off
url = Regex(
# https://mathiasbynens.be/demo/url-regex
# https://gist.github.com/dperini/729294
r"^" +
# protocol identifier (optional)
# short syntax // still required
r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
# user:pass BasicAuth (optional)
r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
r"(?P<host>" +
# IP address exclusion
# private & local networks
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0
# excludes network & broadcast addresses
# (first & last IP address of each class)
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
r"|" +
# host & domain names, may end with dot
# can be replaced by a shortest alternative
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
r"(?:" +
r"(?:" +
r"[a-z0-9\u00a1-\uffff]" +
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
r")?" +
r"[a-z0-9\u00a1-\uffff]\." +
r")+" +
# TLD identifier name, may end with dot
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
r")" +
# port number (optional)
r"(:(?P<port>\d{2,5}))?" +
# resource path (optional)
r"(?P<path>\/[^?# ]*)?" +
# query string (optional)
r"(\?(?P<query>[^#]*))?" +
# fragment (optional)
r"(#(?P<fragment>\S*))?" +
r"$"
).set_name("url")
# fmt: on
# pre-PEP8 compatibility names
convertToInteger = convert_to_integer
convertToFloat = convert_to_float
convertToDate = convert_to_date
convertToDatetime = convert_to_datetime
stripHTMLTags = strip_html_tags
upcaseTokens = upcase_tokens
downcaseTokens = downcase_tokens
_builtin_exprs = [
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
]
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/common.py
|
Python
|
mit
| 12,936 |
#
# core.py
#
import os
import typing
from typing import (
NamedTuple,
Union,
Callable,
Any,
Generator,
Tuple,
List,
TextIO,
Set,
Sequence,
)
from abc import ABC, abstractmethod
from enum import Enum
import string
import copy
import warnings
import re
import sys
from collections.abc import Iterable
import traceback
import types
from operator import itemgetter
from functools import wraps
from threading import RLock
from pathlib import Path
from .util import (
_FifoCache,
_UnboundedCache,
__config_flags,
_collapse_string_to_ranges,
_escape_regex_range_chars,
_bslash,
_flatten,
LRUMemo as _LRUMemo,
UnboundedMemo as _UnboundedMemo,
)
from .exceptions import *
from .actions import *
from .results import ParseResults, _ParseResultsWithOffset
from .unicode import pyparsing_unicode
_MAX_INT = sys.maxsize
str_type: Tuple[type, ...] = (str, bytes)
#
# Copyright (c) 2003-2022 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
if sys.version_info >= (3, 8):
from functools import cached_property
else:
class cached_property:
def __init__(self, func):
self._func = func
def __get__(self, instance, owner=None):
ret = instance.__dict__[self._func.__name__] = self._func(instance)
return ret
class __compat__(__config_flags):
"""
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
behavior
"""
_type_desc = "compatibility"
collect_all_And_tokens = True
_all_names = [__ for __ in locals() if not __.startswith("_")]
_fixed_names = """
collect_all_And_tokens
""".split()
class __diag__(__config_flags):
_type_desc = "diagnostic"
warn_multiple_tokens_in_named_alternation = False
warn_ungrouped_named_tokens_in_collection = False
warn_name_set_on_empty_Forward = False
warn_on_parse_using_empty_Forward = False
warn_on_assignment_to_Forward = False
warn_on_multiple_string_args_to_oneof = False
warn_on_match_first_with_lshift_operator = False
enable_debug_on_named_expressions = False
_all_names = [__ for __ in locals() if not __.startswith("_")]
_warning_names = [name for name in _all_names if name.startswith("warn")]
_debug_names = [name for name in _all_names if name.startswith("enable_debug")]
@classmethod
def enable_all_warnings(cls) -> None:
for name in cls._warning_names:
cls.enable(name)
class Diagnostics(Enum):
"""
Diagnostic configuration (all default to disabled)
- ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
- ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
with a results name, but has no contents defined
- ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
defined in a grammar but has never had an expression attached to it
- ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
- ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
incorrectly called with multiple str arguments
- ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
calls to :class:`ParserElement.set_name`
Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
All warnings can be enabled by calling :class:`enable_all_warnings`.
"""
warn_multiple_tokens_in_named_alternation = 0
warn_ungrouped_named_tokens_in_collection = 1
warn_name_set_on_empty_Forward = 2
warn_on_parse_using_empty_Forward = 3
warn_on_assignment_to_Forward = 4
warn_on_multiple_string_args_to_oneof = 5
warn_on_match_first_with_lshift_operator = 6
enable_debug_on_named_expressions = 7
def enable_diag(diag_enum: Diagnostics) -> None:
"""
Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.enable(diag_enum.name)
def disable_diag(diag_enum: Diagnostics) -> None:
"""
Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.disable(diag_enum.name)
def enable_all_warnings() -> None:
"""
Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
"""
__diag__.enable_all_warnings()
# hide abstract class
del __config_flags
def _should_enable_warnings(
cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
) -> bool:
enable = bool(warn_env_var)
for warn_opt in cmd_line_warn_options:
w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
":"
)[:5]
if not w_action.lower().startswith("i") and (
not (w_message or w_category or w_module) or w_module == "pyparsing"
):
enable = True
elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
enable = False
return enable
if _should_enable_warnings(
sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
):
enable_all_warnings()
# build list of single arg builtins, that can be used as parse actions
_single_arg_builtins = {
sum,
len,
sorted,
reversed,
list,
tuple,
set,
any,
all,
min,
max,
}
_generatorType = types.GeneratorType
ParseAction = Union[
Callable[[], Any],
Callable[[ParseResults], Any],
Callable[[int, ParseResults], Any],
Callable[[str, int, ParseResults], Any],
]
ParseCondition = Union[
Callable[[], bool],
Callable[[ParseResults], bool],
Callable[[int, ParseResults], bool],
Callable[[str, int, ParseResults], bool],
]
ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
DebugSuccessAction = Callable[
[str, int, int, "ParserElement", ParseResults, bool], None
]
DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
alphas = string.ascii_uppercase + string.ascii_lowercase
identchars = pyparsing_unicode.Latin1.identchars
identbodychars = pyparsing_unicode.Latin1.identbodychars
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
printables = "".join([c for c in string.printable if c not in string.whitespace])
_trim_arity_call_line: traceback.StackSummary = None
def _trim_arity(func, max_limit=3):
"""decorator to trim function calls to match the arity of the target"""
global _trim_arity_call_line
if func in _single_arg_builtins:
return lambda s, l, t: func(t)
limit = 0
found_arity = False
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
# fmt: off
LINE_DIFF = 7
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
_trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1])
pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF)
def wrapper(*args):
nonlocal found_arity, limit
while 1:
try:
ret = func(*args[limit:])
found_arity = True
return ret
except TypeError as te:
# re-raise TypeErrors if they did not come from our arity testing
if found_arity:
raise
else:
tb = te.__traceback__
trim_arity_type_error = (
extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth
)
del tb
if trim_arity_type_error:
if limit < max_limit:
limit += 1
continue
raise
# fmt: on
# copy func name to wrapper for sensible debug output
# (can't use functools.wraps, since that messes with function signature)
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
wrapper.__name__ = func_name
wrapper.__doc__ = func.__doc__
return wrapper
def condition_as_parse_action(
fn: ParseCondition, message: str = None, fatal: bool = False
) -> ParseAction:
"""
Function to convert a simple predicate function that returns ``True`` or ``False``
into a parse action. Can be used in places when a parse action is required
and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
to an operator level in :class:`infix_notation`).
Optional keyword arguments:
- ``message`` - define a custom message to be used in the raised exception
- ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
otherwise will raise :class:`ParseException`
"""
msg = message if message is not None else "failed user-defined condition"
exc_type = ParseFatalException if fatal else ParseException
fn = _trim_arity(fn)
@wraps(fn)
def pa(s, l, t):
if not bool(fn(s, l, t)):
raise exc_type(s, l, msg)
return pa
def _default_start_debug_action(
instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False
):
cache_hit_str = "*" if cache_hit else ""
print(
(
"{}Match {} at loc {}({},{})\n {}\n {}^".format(
cache_hit_str,
expr,
loc,
lineno(loc, instring),
col(loc, instring),
line(loc, instring),
" " * (col(loc, instring) - 1),
)
)
)
def _default_success_debug_action(
instring: str,
startloc: int,
endloc: int,
expr: "ParserElement",
toks: ParseResults,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list()))
def _default_exception_debug_action(
instring: str,
loc: int,
expr: "ParserElement",
exc: Exception,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print(
"{}Match {} failed, {} raised: {}".format(
cache_hit_str, expr, type(exc).__name__, exc
)
)
def null_debug_action(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
class ParserElement(ABC):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS: str = " \n\t\r"
verbose_stacktrace: bool = False
_literalStringClass: typing.Optional[type] = None
@staticmethod
def set_default_whitespace_chars(chars: str) -> None:
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.set_default_whitespace_chars(" \t")
Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
# update whitespace all parse expressions defined in this module
for expr in _builtin_exprs:
if expr.copyDefaultWhiteChars:
expr.whiteChars = set(chars)
@staticmethod
def inline_literals_using(cls: type) -> None:
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inline_literals_using(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
class DebugActions(NamedTuple):
debug_try: typing.Optional[DebugStartAction]
debug_match: typing.Optional[DebugSuccessAction]
debug_fail: typing.Optional[DebugExceptionAction]
def __init__(self, savelist: bool = False):
self.parseAction: List[ParseAction] = list()
self.failAction: typing.Optional[ParseFailAction] = None
self.customName = None
self._defaultName = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
# used when checking for left-recursion
self.mayReturnEmpty = False
self.keepTabs = False
self.ignoreExprs: List["ParserElement"] = list()
self.debug = False
self.streamlined = False
# optimize exception handling for subclasses that don't advance parse index
self.mayIndexError = True
self.errmsg = ""
# mark results names as modal (report only last) or cumulative (list all)
self.modalResults = True
# custom debug actions
self.debugActions = self.DebugActions(None, None, None)
# avoid redundant calls to preParse
self.callPreparse = True
self.callDuringTry = False
self.suppress_warnings_: List[Diagnostics] = []
def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement":
"""
Suppress warnings emitted for a particular diagnostic on this expression.
Example::
base = pp.Forward()
base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
# statement would normally raise a warning, but is now suppressed
print(base.parseString("x"))
"""
self.suppress_warnings_.append(warning_type)
return self
def copy(self) -> "ParserElement":
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
return cpy
def set_results_name(
self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
) -> "ParserElement":
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
Normally, results names are assigned as you would assign keys in a dict:
any existing value is overwritten by later values. If it is necessary to
keep all values captured for a particular results name, call ``set_results_name``
with ``list_all_matches`` = True.
NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.set_results_name("name")``
- see :class:`__call__`. If ``list_all_matches`` is required, use
``expr("name*")``.
Example::
date_str = (integer.set_results_name("year") + '/'
+ integer.set_results_name("month") + '/'
+ integer.set_results_name("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
listAllMatches = listAllMatches or list_all_matches
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, listAllMatches=False):
if name is None:
return self
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def set_break(self, break_flag: bool = True) -> "ParserElement":
"""
Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
disable.
"""
if break_flag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
# this call to pdb.set_trace() is intentional, not a checkin error
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse actions can be called to perform data conversions, do extra validation,
update external data structures, or enhance or replace the parsed tokens.
Each parse action ``fn`` is a callable method with 0-3 arguments, called as
``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
The parsed tokens are passed to the parse action as ParseResults. They can be
modified in place using list-style append, extend, and pop operations to update
the parsed list elements; and with dictionary-style item set and del operations
to add, update, or remove any named results. If the tokens are modified in place,
it is not necessary to return them with a return statement.
Parse actions can also completely replace the given tokens, with another ``ParseResults``
object, or with some entirely different object (common for parse actions that perform data
conversions). A convenient way to build a new parse result is to define the values
using a dict, and then create the return value using :class:`ParseResults.from_dict`.
If None is passed as the ``fn`` parse action, all previously added parse actions for this
expression are cleared.
Optional keyword arguments:
- call_during_try = (default= ``False``) indicate if parse action should be run during
lookaheads and alternate testing. For parse actions that have side effects, it is
important to only call the parse action once it is determined that it is being
called as part of a successful parse. For parse actions that perform additional
validation, then call_during_try should be passed as True, so that the validation
code is included in the preliminary "try" parses.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parse_string` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
# parse dates in the form YYYY/MM/DD
# use parse action to convert toks from str to int at parse time
def convert_to_int(toks):
return int(toks[0])
# use a parse action to verify that the date is a valid date
def is_valid_date(instring, loc, toks):
from datetime import date
year, month, day = toks[::2]
try:
date(year, month, day)
except ValueError:
raise ParseException(instring, loc, "invalid date given")
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
# add parse actions
integer.set_parse_action(convert_to_int)
date_str.set_parse_action(is_valid_date)
# note that integer fields are now ints, not strings
date_str.run_tests('''
# successful parse - note that integer fields were converted to ints
1999/12/31
# fail - invalid date
1999/13/31
''')
"""
if list(fns) == [None]:
self.parseAction = []
else:
if not all(callable(fn) for fn in fns):
raise TypeError("parse actions must be callable")
self.parseAction = [_trim_arity(fn) for fn in fns]
self.callDuringTry = kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
"""
Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
See examples in :class:`copy`.
"""
self.parseAction += [_trim_arity(fn) for fn in fns]
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement":
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
functions passed to ``add_condition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
ParseException
- call_during_try = boolean to indicate if this method should be called during internal tryParse calls,
default=False
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0),
(line:1, col:1)
"""
for fn in fns:
self.parseAction.append(
condition_as_parse_action(
fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
)
)
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def set_fail_action(self, fn: ParseFailAction) -> "ParserElement":
"""
Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s, loc, expr, err)`` where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
instrlen = len(instring)
white_chars = self.whiteChars
while loc < instrlen and instring[loc] in white_chars:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# @profile
def _parseNoCache(
self, instring, loc, doActions=True, callPreParse=True
) -> Tuple[int, ParseResults]:
TRY, MATCH, FAIL = 0, 1, 2
debugging = self.debug # and doActions)
len_instring = len(instring)
if debugging or self.failAction:
# print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
try:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.debugActions.debug_try:
self.debugActions.debug_try(instring, tokens_start, self, False)
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except Exception as err:
# print("Exception raised:", err)
if self.debugActions.debug_fail:
self.debugActions.debug_fail(
instring, tokens_start, self, err, False
)
if self.failAction:
self.failAction(instring, tokens_start, self, err)
raise
else:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
tokens = self.postParse(instring, loc, tokens)
ret_tokens = ParseResults(
tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
except Exception as err:
# print "Exception raised in user parse action:", err
if self.debugActions.debug_fail:
self.debugActions.debug_fail(
instring, tokens_start, self, err, False
)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
if debugging:
# print("Matched", self, "->", ret_tokens.as_list())
if self.debugActions.debug_match:
self.debugActions.debug_match(
instring, tokens_start, loc, self, ret_tokens, False
)
return loc, ret_tokens
def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int:
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
if raise_fatal:
raise
raise ParseException(instring, loc, self.errmsg, self)
def can_parse_next(self, instring: str, loc: int) -> bool:
try:
self.try_parse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
# cache for left-recursion in Forward references
recursion_lock = RLock()
recursion_memos: typing.Dict[
Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
] = {}
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = (
{}
) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(
self, instring, loc, doActions=True, callPreParse=True
) -> Tuple[int, ParseResults]:
HIT, MISS = 0, 1
TRY, MATCH, FAIL = 0, 1, 2
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy(), loc))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if self.debug and self.debugActions.debug_try:
try:
self.debugActions.debug_try(instring, loc, self, cache_hit=True)
except TypeError:
pass
if isinstance(value, Exception):
if self.debug and self.debugActions.debug_fail:
try:
self.debugActions.debug_fail(
instring, loc, self, value, cache_hit=True
)
except TypeError:
pass
raise value
loc_, result, endloc = value[0], value[1].copy(), value[2]
if self.debug and self.debugActions.debug_match:
try:
self.debugActions.debug_match(
instring, loc_, endloc, self, result, cache_hit=True
)
except TypeError:
pass
return loc_, result
_parse = _parseNoCache
@staticmethod
def reset_cache() -> None:
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(
ParserElement.packrat_cache_stats
)
ParserElement.recursion_memos.clear()
_packratEnabled = False
_left_recursion_enabled = False
@staticmethod
def disable_memoization() -> None:
"""
Disables active Packrat or Left Recursion parsing and their memoization
This method also works if neither Packrat nor Left Recursion are enabled.
This makes it safe to call before activating Packrat nor Left Recursion
to clear any previous settings.
"""
ParserElement.reset_cache()
ParserElement._left_recursion_enabled = False
ParserElement._packratEnabled = False
ParserElement._parse = ParserElement._parseNoCache
@staticmethod
def enable_left_recursion(
cache_size_limit: typing.Optional[int] = None, *, force=False
) -> None:
"""
Enables "bounded recursion" parsing, which allows for both direct and indirect
left-recursion. During parsing, left-recursive :class:`Forward` elements are
repeatedly matched with a fixed recursion depth that is gradually increased
until finding the longest match.
Example::
from pip._vendor import pyparsing as pp
pp.ParserElement.enable_left_recursion()
E = pp.Forward("E")
num = pp.Word(pp.nums)
# match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
E <<= E + '+' - num | num
print(E.parse_string("1+2+3"))
Recursion search naturally memoizes matches of ``Forward`` elements and may
thus skip reevaluation of parse actions during backtracking. This may break
programs with parse actions which rely on strict ordering of side-effects.
Parameters:
- cache_size_limit - (default=``None``) - memoize at most this many
``Forward`` elements during matching; if ``None`` (the default),
memoize all ``Forward`` elements.
Bounded Recursion parsing works similar but not identical to Packrat parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._packratEnabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if cache_size_limit is None:
ParserElement.recursion_memos = _UnboundedMemo()
elif cache_size_limit > 0:
ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit)
else:
raise NotImplementedError("Memo size of %s" % cache_size_limit)
ParserElement._left_recursion_enabled = True
@staticmethod
def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None:
"""
Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enable_packrat`.
For best results, call ``enable_packrat()`` immediately after
importing pyparsing.
Example::
from pip._vendor import pyparsing
pyparsing.ParserElement.enable_packrat()
Packrat parsing works similar but not identical to Bounded Recursion parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._left_recursion_enabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = _UnboundedCache()
else:
ParserElement.packrat_cache = _FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parse_string(
self, instring: str, parse_all: bool = False, *, parseAll: bool = False
) -> ParseResults:
"""
Parse a string with respect to the parser definition. This function is intended as the primary interface to the
client code.
:param instring: The input string to be parsed.
:param parse_all: If set, the entire input string must match the grammar.
:param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
:raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
:returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
an object with attributes if the given parser includes results names.
If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
is also equivalent to ending the grammar with :class:`StringEnd`().
To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
being parsed, one can ensure a consistent view of the input string by doing one of the following:
- calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
- define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
parse action's ``s`` argument, or
- explicitly expand the tabs in your input string before calling ``parse_string``.
Examples:
By default, partial matches are OK.
>>> res = Word('a').parse_string('aaaaabaaa')
>>> print(res)
['aaaaa']
The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
directly to see more examples.
It raises an exception if parse_all flag is set and instring does not match the whole grammar.
>>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
Traceback (most recent call last):
...
pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6)
"""
parseAll = parse_all or parseAll
ParserElement.reset_cache()
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
raise exc.with_traceback(None)
else:
return tokens
def scan_string(
self,
instring: str,
max_matches: int = _MAX_INT,
overlap: bool = False,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> Generator[Tuple[ParseResults, int, int], None, None]:
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``max_matches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parse_string` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens, start, end in Word(alphas).scan_string(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
maxMatches = min(maxMatches, max_matches)
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = str(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
if debug:
print(
{
"tokens": tokens.asList(),
"start": preloc,
"end": nextLoc,
}
)
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def transform_string(self, instring: str, *, debug: bool = False) -> str:
"""
Extension to :class:`scan_string`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transform_string``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transform_string()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transform_string()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.set_parse_action(lambda toks: toks[0].title())
print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out: List[str] = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transform_string and scan_string
self.keepTabs = True
try:
for t, s, e in self.scan_string(instring, debug=debug):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.as_list()
elif isinstance(t, Iterable) and not isinstance(t, str_type):
out.extend(t)
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join([str(s) for s in _flatten(out)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def search_string(
self,
instring: str,
max_matches: int = _MAX_INT,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> ParseResults:
"""
Another extension to :class:`scan_string`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``max_matches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
maxMatches = min(maxMatches, max_matches)
try:
return ParseResults(
[t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)]
)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def split(
self,
instring: str,
maxsplit: int = _MAX_INT,
include_separators: bool = False,
*,
includeSeparators=False,
) -> Generator[str, None, None]:
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``include_separators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = one_of(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
includeSeparators = includeSeparators or include_separators
last = 0
for t, s, e in self.scan_string(instring, max_matches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other) -> "ParserElement":
"""
Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
converts them to :class:`Literal`s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
Literal('start') + ... + Literal('end')
is equivalent to:
Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
Note that the skipped text is returned with '_skipped' as a results name,
and to support having multiple skips in the same parser, the value returned is
a list of all skipped text.
"""
if other is Ellipsis:
return _PendingSkip(self)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return And([self, other])
def __radd__(self, other) -> "ParserElement":
"""
Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
"""
if other is Ellipsis:
return SkipTo(self)("_skipped*") + self
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other + self
def __sub__(self, other) -> "ParserElement":
"""
Implementation of ``-`` operator, returns :class:`And` with error stop
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return self + And._ErrorStop() + other
def __rsub__(self, other) -> "ParserElement":
"""
Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other - self
def __mul__(self, other) -> "ParserElement":
"""
Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also be multiplied by a 2-integer
tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None, n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None, n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None, n) + ~expr``
"""
if other is Ellipsis:
other = (0, None)
elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
other = ((0,) + other[1:] + (None,))[:2]
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = tuple(o if o is not Ellipsis else None for o in other)
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError(
"cannot multiply ParserElement and ({}) objects".format(
",".join(type(item).__name__ for item in other)
)
)
else:
raise TypeError(
"cannot multiply ParserElement and {} objects".format(
type(other).__name__
)
)
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError(
"second tuple value must be greater or equal to first tuple value"
)
if minElements == optElements == 0:
return And([])
if optElements:
def makeOptionalList(n):
if n > 1:
return Opt(self + makeOptionalList(n - 1))
else:
return Opt(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other) -> "ParserElement":
return self.__mul__(other)
def __or__(self, other) -> "ParserElement":
"""
Implementation of ``|`` operator - returns :class:`MatchFirst`
"""
if other is Ellipsis:
return _PendingSkip(self, must_skip=True)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return MatchFirst([self, other])
def __ror__(self, other) -> "ParserElement":
"""
Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other | self
def __xor__(self, other) -> "ParserElement":
"""
Implementation of ``^`` operator - returns :class:`Or`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return Or([self, other])
def __rxor__(self, other) -> "ParserElement":
"""
Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other ^ self
def __and__(self, other) -> "ParserElement":
"""
Implementation of ``&`` operator - returns :class:`Each`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return Each([self, other])
def __rand__(self, other) -> "ParserElement":
"""
Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other & self
def __invert__(self) -> "ParserElement":
"""
Implementation of ``~`` operator - returns :class:`NotAny`
"""
return NotAny(self)
# disable __iter__ to override legacy use of sequential access to __getitem__ to
# iterate over a sequence
__iter__ = None
def __getitem__(self, key):
"""
use ``[]`` indexing notation as a short form for expression repetition:
- ``expr[n]`` is equivalent to ``expr*n``
- ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- ``expr[n, ...]`` or ``expr[n,]`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr[..., n]`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
``None`` may be used in place of ``...``.
Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
if more than ``n`` ``expr``s exist in the input stream. If this behavior is
desired, then write ``expr[..., n] + ~expr``.
"""
# convert single arg keys to tuples
try:
if isinstance(key, str_type):
key = (key,)
iter(key)
except TypeError:
key = (key, key)
if len(key) > 2:
raise TypeError(
"only 1 or 2 index arguments supported ({}{})".format(
key[:5], "... [{}]".format(len(key)) if len(key) > 5 else ""
)
)
# clip to 2 elements
ret = self * tuple(key[:2])
return ret
def __call__(self, name: str = None) -> "ParserElement":
"""
Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
passed as ``True``.
If ``name` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
"""
if name is not None:
return self._setResultsName(name)
else:
return self.copy()
def suppress(self) -> "ParserElement":
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def ignore_whitespace(self, recursive: bool = True) -> "ParserElement":
"""
Enables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern.
:param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = True
return self
def leave_whitespace(self, recursive: bool = True) -> "ParserElement":
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
:param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = False
return self
def set_whitespace_chars(
self, chars: Union[Set[str], str], copy_defaults: bool = False
) -> "ParserElement":
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = set(chars)
self.copyDefaultWhiteChars = copy_defaults
return self
def parse_with_tabs(self) -> "ParserElement":
"""
Overrides default behavior to expand ``<TAB>`` s to spaces before parsing the input string.
Must be called before ``parse_string`` when the input grammar contains elements that
match ``<TAB>`` characters.
"""
self.keepTabs = True
return self
def ignore(self, other: "ParserElement") -> "ParserElement":
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = Word(alphas)[1, ...]
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj']
patt.ignore(c_style_comment)
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj', 'lskjd']
"""
import typing
if isinstance(other, str_type):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def set_debug_actions(
self,
start_action: DebugStartAction,
success_action: DebugSuccessAction,
exception_action: DebugExceptionAction,
) -> "ParserElement":
"""
Customize display of debugging messages while doing pattern matching:
- ``start_action`` - method to be called when an expression is about to be parsed;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
- ``success_action`` - method to be called when an expression has successfully parsed;
should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
- ``exception_action`` - method to be called when expression fails to parse;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
"""
self.debugActions = self.DebugActions(
start_action or _default_start_debug_action,
success_action or _default_success_debug_action,
exception_action or _default_exception_debug_action,
)
self.debug = True
return self
def set_debug(self, flag: bool = True) -> "ParserElement":
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to ``True`` to enable, ``False`` to disable.
Example::
wd = Word(alphas).set_name("alphaword")
integer = Word(nums).set_name("numword")
term = wd | integer
# turn on debugging for wd
wd.set_debug()
term[1, ...].parse_string("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`set_debug_actions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
"""
if flag:
self.set_debug_actions(
_default_start_debug_action,
_default_success_debug_action,
_default_exception_debug_action,
)
else:
self.debug = False
return self
@property
def default_name(self) -> str:
if self._defaultName is None:
self._defaultName = self._generateDefaultName()
return self._defaultName
@abstractmethod
def _generateDefaultName(self):
"""
Child classes must define this method, which defines how the ``default_name`` is set.
"""
def set_name(self, name: str) -> "ParserElement":
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.customName = name
self.errmsg = "Expected " + self.name
if __diag__.enable_debug_on_named_expressions:
self.set_debug()
return self
@property
def name(self) -> str:
# This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
return self.customName if self.customName is not None else self.default_name
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return str(self)
def streamline(self) -> "ParserElement":
self.streamlined = True
self._defaultName = None
return self
def recurse(self) -> Sequence["ParserElement"]:
return []
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.recurse():
e._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None) -> None:
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self._checkRecursion([])
def parse_file(
self,
file_or_filename: Union[str, Path, TextIO],
encoding: str = "utf-8",
parse_all: bool = False,
*,
parseAll: bool = False,
) -> ParseResults:
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
parseAll = parseAll or parse_all
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r", encoding=encoding) as f:
file_contents = f.read()
try:
return self.parse_string(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, str_type):
return self.matches(other, parse_all=True)
elif isinstance(other, ParserElement):
return vars(self) == vars(other)
return False
def __hash__(self):
return id(self)
def matches(
self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
) -> bool:
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- ``test_string`` - to test against this expression for a match
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
parseAll = parseAll and parse_all
try:
self.parse_string(str(test_string), parse_all=parseAll)
return True
except ParseBaseException:
return False
def run_tests(
self,
tests: Union[str, List[str]],
parse_all: bool = True,
comment: typing.Optional[Union["ParserElement", str]] = "#",
full_dump: bool = True,
print_results: bool = True,
failure_tests: bool = False,
post_parse: Callable[[str, ParseResults], str] = None,
file: typing.Optional[TextIO] = None,
with_line_numbers: bool = False,
*,
parseAll: bool = True,
fullDump: bool = True,
printResults: bool = True,
failureTests: bool = False,
postParse: Callable[[str, ParseResults], str] = None,
) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]:
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- ``tests`` - a list of separate test strings, or a multiline string of test strings
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
- ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- ``print_results`` - (default= ``True``) prints test output to stdout
- ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
- ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
- ``file`` - (default= ``None``) optional file-like object to which test output will be written;
if None, will default to ``sys.stdout``
- ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failure_tests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.run_tests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.run_tests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failure_tests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading ``'r'``.)
"""
from .testing import pyparsing_test
parseAll = parseAll and parse_all
fullDump = fullDump and full_dump
printResults = printResults and print_results
failureTests = failureTests or failure_tests
postParse = postParse or post_parse
if isinstance(tests, str_type):
line_strip = type(tests).strip
tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()]
if isinstance(comment, str_type):
comment = Literal(comment)
if file is None:
file = sys.stdout
print_ = file.write
result: Union[ParseResults, Exception]
allResults = []
comments = []
success = True
NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
BOM = "\ufeff"
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(
pyparsing_test.with_line_numbers(t) if with_line_numbers else t
)
continue
if not t:
continue
out = [
"\n" + "\n".join(comments) if comments else "",
pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
]
comments = []
try:
# convert newline marks to actual newlines, and strip leading BOM if present
t = NL.transform_string(t.lstrip(BOM))
result = self.parse_string(t, parse_all=parseAll)
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
out.append(pe.explain())
out.append("FAIL: " + str(pe))
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(pe.__traceback__))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc))
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(exc.__traceback__))
success = success and failureTests
result = exc
else:
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append(
"{} failed: {}: {}".format(
postParse.__name__, type(e).__name__, e
)
)
else:
out.append(result.dump(full=fullDump))
out.append("")
if printResults:
print_("\n".join(out))
allResults.append((t, result))
return success, allResults
def create_diagram(
self,
output_html: Union[TextIO, Path, str],
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
**kwargs,
) -> None:
"""
Create a railroad diagram for the parser.
Parameters:
- output_html (str or file-like object) - output target for generated
diagram HTML
- vertical (int) - threshold for formatting multiple alternatives vertically
instead of horizontally (default=3)
- show_results_names - bool flag whether diagram should show annotations for
defined results names
- show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box
Additional diagram-formatting keyword arguments can also be included;
see railroad.Diagram class.
"""
try:
from .diagram import to_railroad, railroad_to_html
except ImportError as ie:
raise Exception(
"must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
) from ie
self.streamline()
railroad = to_railroad(
self,
vertical=vertical,
show_results_names=show_results_names,
show_groups=show_groups,
diagram_kwargs=kwargs,
)
if isinstance(output_html, (str, Path)):
with open(output_html, "w", encoding="utf-8") as diag_file:
diag_file.write(railroad_to_html(railroad))
else:
# we were passed a file-like object, just write to it
output_html.write(railroad_to_html(railroad))
setDefaultWhitespaceChars = set_default_whitespace_chars
inlineLiteralsUsing = inline_literals_using
setResultsName = set_results_name
setBreak = set_break
setParseAction = set_parse_action
addParseAction = add_parse_action
addCondition = add_condition
setFailAction = set_fail_action
tryParse = try_parse
canParseNext = can_parse_next
resetCache = reset_cache
enableLeftRecursion = enable_left_recursion
enablePackrat = enable_packrat
parseString = parse_string
scanString = scan_string
searchString = search_string
transformString = transform_string
setWhitespaceChars = set_whitespace_chars
parseWithTabs = parse_with_tabs
setDebugActions = set_debug_actions
setDebug = set_debug
defaultName = default_name
setName = set_name
parseFile = parse_file
runTests = run_tests
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class _PendingSkip(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr: ParserElement, must_skip: bool = False):
super().__init__()
self.anchor = expr
self.must_skip = must_skip
def _generateDefaultName(self):
return str(self.anchor + Empty()).replace("Empty", "...")
def __add__(self, other) -> "ParserElement":
skipper = SkipTo(other).set_name("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.as_list() == [""]:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.as_list()[-1:] == [""]:
t.pop("_skipped")
t["_skipped"] = "missing <" + repr(self.anchor) + ">"
return (
self.anchor + skipper().add_parse_action(must_skip)
| skipper().add_parse_action(show_skip)
) + other
return self.anchor + skipper + other
def __repr__(self):
return self.defaultName
def parseImpl(self, *args):
raise Exception(
"use of `...` expression without following SkipTo target expression"
)
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self):
super().__init__(savelist=False)
def _generateDefaultName(self):
return type(self).__name__
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parse_string('blah') # -> ['blah']
Literal('blah').parse_string('blahfooblah') # -> ['blah']
Literal('blah').parse_string('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __init__(self, match_string: str = "", *, matchString: str = ""):
super().__init__()
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
try:
self.firstMatchChar = match_string[0]
except IndexError:
raise ValueError("null string passed to Literal; use Empty() instead")
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: modify __class__ to select
# a parseImpl optimized for single-character check
if self.matchLen == 1 and type(self) is Literal:
self.__class__ = _SingleCharLiteral
def _generateDefaultName(self):
return repr(self.match)
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and instring.startswith(
self.match, loc
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class _SingleCharLiteral(Literal):
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is,
it must be immediately followed by a non-keyword character. Compare
with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``identChars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parse_string("start") # -> ['start']
Keyword("start").parse_string("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(
self,
match_string: str = "",
ident_chars: typing.Optional[str] = None,
caseless: bool = False,
*,
matchString: str = "",
identChars: typing.Optional[str] = None,
):
super().__init__()
identChars = identChars or ident_chars
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
try:
self.firstMatchChar = match_string[0]
except IndexError:
raise ValueError("null string passed to Keyword; use Empty() instead")
self.errmsg = "Expected {} {}".format(type(self).__name__, self.name)
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = match_string.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def _generateDefaultName(self):
return repr(self.match)
def parseImpl(self, instring, loc, doActions=True):
errmsg = self.errmsg
errloc = loc
if self.caseless:
if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
if loc == 0 or instring[loc - 1].upper() not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
):
return loc + self.matchLen, self.match
else:
# followed by keyword char
errmsg += ", was immediately followed by keyword character"
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
else:
if (
instring[loc] == self.firstMatchChar
and self.matchLen == 1
or instring.startswith(self.match, loc)
):
if loc == 0 or instring[loc - 1] not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
):
return loc + self.matchLen, self.match
else:
# followed by keyword char
errmsg += (
", keyword was immediately followed by keyword character"
)
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
raise ParseException(instring, errloc, errmsg, self)
@staticmethod
def set_default_keyword_chars(chars) -> None:
"""
Overrides the default characters used by :class:`Keyword` expressions.
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = set_default_keyword_chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, match_string: str = "", *, matchString: str = ""):
match_string = matchString or match_string
super().__init__(match_string.upper())
# Preserve the defining literal.
self.returnString = match_string
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__(
self,
match_string: str = "",
ident_chars: typing.Optional[str] = None,
*,
matchString: str = "",
identChars: typing.Optional[str] = None,
):
identChars = identChars or ident_chars
match_string = matchString or match_string
super().__init__(match_string, identChars, caseless=True)
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
- ``max_mismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(
self,
match_string: str,
max_mismatches: int = None,
*,
maxMismatches: int = 1,
caseless=False,
):
maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
super().__init__()
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected {!r} (with up to {} mismatches)".format(
self.match_string, self.maxMismatches
)
self.caseless = caseless
self.mayIndexError = False
self.mayReturnEmpty = False
def _generateDefaultName(self):
return "{}:{!r}".format(type(self).__name__, self.match_string)
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(
zip(instring[loc:maxloc], match_string)
):
src, mat = s_m
if self.caseless:
src, mat = src.lower(), mat.lower()
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = start + match_stringloc + 1
results = ParseResults([instring[start:loc]])
results["original"] = match_string
results["mismatches"] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Parameters:
- ``init_chars`` - string of all characters that should be used to
match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
if ``body_chars`` is also specified, then this is the string of
initial characters
- ``body_chars`` - string of characters that
can be used for matching after a matched initial character as
given in ``init_chars``; if omitted, same as the initial characters
(default=``None``)
- ``min`` - minimum number of characters to match (default=1)
- ``max`` - maximum number of characters to match (default=0)
- ``exact`` - exact number of characters to match (default=0)
- ``as_keyword`` - match as a keyword (default=``False``)
- ``exclude_chars`` - characters that might be
found in the input ``body_chars`` string but which should not be
accepted for matching ;useful to define a word of all
printables except for one or two characters, for instance
(default=``None``)
:class:`srange` is useful for defining custom character set strings
for defining :class:`Word` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
``alphas``, ``nums``, and ``printables`` are also defined in several
Unicode sets - see :class:`pyparsing_unicode``.
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, exclude_chars=",")
"""
def __init__(
self,
init_chars: str = "",
body_chars: typing.Optional[str] = None,
min: int = 1,
max: int = 0,
exact: int = 0,
as_keyword: bool = False,
exclude_chars: typing.Optional[str] = None,
*,
initChars: typing.Optional[str] = None,
bodyChars: typing.Optional[str] = None,
asKeyword: bool = False,
excludeChars: typing.Optional[str] = None,
):
initChars = initChars or init_chars
bodyChars = bodyChars or body_chars
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__()
if not initChars:
raise ValueError(
"invalid {}, initChars cannot be empty string".format(
type(self).__name__
)
)
initChars = set(initChars)
self.initChars = initChars
if excludeChars:
excludeChars = set(excludeChars)
initChars -= excludeChars
if bodyChars:
bodyChars = set(bodyChars) - excludeChars
self.initCharsOrig = "".join(sorted(initChars))
if bodyChars:
self.bodyCharsOrig = "".join(sorted(bodyChars))
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = "".join(sorted(initChars))
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
# see if we can make a regex for this Word
if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0):
if self.bodyChars == self.initChars:
if max == 0:
repeat = "+"
elif max == 1:
repeat = ""
else:
repeat = "{{{},{}}}".format(
self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen
)
self.reString = "[{}]{}".format(
_collapse_string_to_ranges(self.initChars),
repeat,
)
elif len(self.initChars) == 1:
if max == 0:
repeat = "*"
else:
repeat = "{{0,{}}}".format(max - 1)
self.reString = "{}[{}]{}".format(
re.escape(self.initCharsOrig),
_collapse_string_to_ranges(self.bodyChars),
repeat,
)
else:
if max == 0:
repeat = "*"
elif max == 2:
repeat = ""
else:
repeat = "{{0,{}}}".format(max - 1)
self.reString = "[{}][{}]{}".format(
_collapse_string_to_ranges(self.initChars),
_collapse_string_to_ranges(self.bodyChars),
repeat,
)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except re.error:
self.re = None
else:
self.re_match = self.re.match
self.__class__ = _WordRegex
def _generateDefaultName(self):
def charsAsStr(s):
max_repr_len = 16
s = _collapse_string_to_ranges(s, re_escape=False)
if len(s) > max_repr_len:
return s[: max_repr_len - 3] + "..."
else:
return s
if self.initChars != self.bodyChars:
base = "W:({}, {})".format(
charsAsStr(self.initChars), charsAsStr(self.bodyChars)
)
else:
base = "W:({})".format(charsAsStr(self.initChars))
# add length specification
if self.minLen > 1 or self.maxLen != _MAX_INT:
if self.minLen == self.maxLen:
if self.minLen == 1:
return base[2:]
else:
return base + "{{{}}}".format(self.minLen)
elif self.maxLen == _MAX_INT:
return base + "{{{},...}}".format(self.minLen)
else:
return base + "{{{},{}}}".format(self.minLen, self.maxLen)
return base
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
elif self.asKeyword:
if (
start > 0
and instring[start - 1] in bodychars
or loc < instrlen
and instring[loc] in bodychars
):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _WordRegex(Word):
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
class Char(_WordRegex):
"""A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(
self,
charset: str,
as_keyword: bool = False,
exclude_chars: typing.Optional[str] = None,
*,
asKeyword: bool = False,
excludeChars: typing.Optional[str] = None,
):
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__(
charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
)
self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars))
if asKeyword:
self.reString = r"\b{}\b".format(self.reString)
self.re = re.compile(self.reString)
self.re_match = self.re.match
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
If the given regex contains named groups (defined using ``(?P<name>...)``),
these will be preserved as named :class:`ParseResults`.
If instead of the Python stdlib ``re`` module you wish to use a different RE module
(such as the ``regex`` module), you can do so by building your ``Regex`` object with
a compiled RE that was compiled using ``regex``.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
# named fields in a regex will be returned as named results
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# the Regex class will accept re's compiled using the regex module
import regex
parser = pp.Regex(regex.compile(r'[0-9]'))
"""
def __init__(
self,
pattern: Any,
flags: Union[re.RegexFlag, int] = 0,
as_group_list: bool = False,
as_match: bool = False,
*,
asGroupList: bool = False,
asMatch: bool = False,
):
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module <https://docs.python.org/3/library/re.html>`_ module for an
explanation of the acceptable patterns and flags.
"""
super().__init__()
asGroupList = asGroupList or as_group_list
asMatch = asMatch or as_match
if isinstance(pattern, str_type):
if not pattern:
raise ValueError("null string passed to Regex; use Empty() instead")
self._re = None
self.reString = self.pattern = pattern
self.flags = flags
elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
self._re = pattern
self.pattern = self.reString = pattern.pattern
self.flags = flags
else:
raise TypeError(
"Regex may only be constructed with a string or a compiled RE object"
)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
@cached_property
def re(self):
if self._re:
return self._re
else:
try:
return re.compile(self.pattern, self.flags)
except re.error:
raise ValueError(
"invalid pattern ({!r}) passed to Regex".format(self.pattern)
)
@cached_property
def re_match(self):
return self.re.match
@cached_property
def mayReturnEmpty(self):
return self.re_match("") is not None
def _generateDefaultName(self):
return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\"))
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
if d:
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def sub(self, repl: str) -> ParserElement:
r"""
Return :class:`Regex` with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
print(make_html.transform_string("h1:main title:"))
# prints "<h1>main title</h1>"
"""
if self.asGroupList:
raise TypeError("cannot use sub() with Regex(asGroupList=True)")
if self.asMatch and callable(repl):
raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)")
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.add_parse_action(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- ``quote_char`` - string of one or more characters defining the
quote delimiting string
- ``esc_char`` - character to re_escape quotes, typically backslash
(default= ``None``)
- ``esc_quote`` - special quote sequence to re_escape an embedded quote
string (such as SQL's ``""`` to re_escape an embedded ``"``)
(default= ``None``)
- ``multiline`` - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- ``unquote_results`` - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- ``end_quote_char`` - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quote_char)
- ``convert_whitespace_escapes`` - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
Example::
qs = QuotedString('"')
print(qs.search_string('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', end_quote_char='}}')
print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', esc_quote='""')
print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))
def __init__(
self,
quote_char: str = "",
esc_char: typing.Optional[str] = None,
esc_quote: typing.Optional[str] = None,
multiline: bool = False,
unquote_results: bool = True,
end_quote_char: typing.Optional[str] = None,
convert_whitespace_escapes: bool = True,
*,
quoteChar: str = "",
escChar: typing.Optional[str] = None,
escQuote: typing.Optional[str] = None,
unquoteResults: bool = True,
endQuoteChar: typing.Optional[str] = None,
convertWhitespaceEscapes: bool = True,
):
super().__init__()
escChar = escChar or esc_char
escQuote = escQuote or esc_quote
unquoteResults = unquoteResults and unquote_results
endQuoteChar = endQuoteChar or end_quote_char
convertWhitespaceEscapes = (
convertWhitespaceEscapes and convert_whitespace_escapes
)
quote_char = quoteChar or quote_char
# remove white space from quote chars - wont work anyway
quote_char = quote_char.strip()
if not quote_char:
raise ValueError("quote_char cannot be the empty string")
if endQuoteChar is None:
endQuoteChar = quote_char
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
raise ValueError("endQuoteChar cannot be the empty string")
self.quoteChar = quote_char
self.quoteCharLen = len(quote_char)
self.firstQuoteChar = quote_char[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
sep = ""
inner_pattern = ""
if escQuote:
inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote))
sep = "|"
if escChar:
inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar))
sep = "|"
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
if len(self.endQuoteChar) > 1:
inner_pattern += (
"{}(?:".format(sep)
+ "|".join(
"(?:{}(?!{}))".format(
re.escape(self.endQuoteChar[:i]),
re.escape(self.endQuoteChar[i:]),
)
for i in range(len(self.endQuoteChar) - 1, 0, -1)
)
+ ")"
)
sep = "|"
if multiline:
self.flags = re.MULTILINE | re.DOTALL
inner_pattern += r"{}(?:[^{}{}])".format(
sep,
_escape_regex_range_chars(self.endQuoteChar[0]),
(_escape_regex_range_chars(escChar) if escChar is not None else ""),
)
else:
self.flags = 0
inner_pattern += r"{}(?:[^{}\n\r{}])".format(
sep,
_escape_regex_range_chars(self.endQuoteChar[0]),
(_escape_regex_range_chars(escChar) if escChar is not None else ""),
)
self.pattern = "".join(
[
re.escape(self.quoteChar),
"(?:",
inner_pattern,
")*",
re.escape(self.endQuoteChar),
]
)
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
self.re_match = self.re.match
except re.error:
raise ValueError(
"invalid pattern {!r} passed to Regex".format(self.pattern)
)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def _generateDefaultName(self):
if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type):
return "string enclosed in {!r}".format(self.quoteChar)
return "quoted string, starting with {} ending with {}".format(
self.quoteChar, self.endQuoteChar
)
def parseImpl(self, instring, loc, doActions=True):
result = (
instring[loc] == self.firstQuoteChar
and self.re_match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
if isinstance(ret, str_type):
# replace escaped whitespace
if "\\" in ret and self.convertWhitespaceEscapes:
for wslit, wschar in self.ws_map:
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(
self,
not_chars: str = "",
min: int = 1,
max: int = 0,
exact: int = 0,
*,
notChars: str = "",
):
super().__init__()
self.skipWhitespace = False
self.notChars = not_chars or notChars
self.notCharsSet = set(self.notChars)
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use "
"Opt(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = self.minLen == 0
self.mayIndexError = False
def _generateDefaultName(self):
not_chars_str = _collapse_string_to_ranges(self.notChars)
if len(not_chars_str) > 16:
return "!W:({}...)".format(self.notChars[: 16 - 3])
else:
return "!W:({})".format(self.notChars)
def parseImpl(self, instring, loc, doActions=True):
notchars = self.notCharsSet
if instring[loc] in notchars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
" ": "<SP>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
"\u00A0": "<NBSP>",
"\u1680": "<OGHAM_SPACE_MARK>",
"\u180E": "<MONGOLIAN_VOWEL_SEPARATOR>",
"\u2000": "<EN_QUAD>",
"\u2001": "<EM_QUAD>",
"\u2002": "<EN_SPACE>",
"\u2003": "<EM_SPACE>",
"\u2004": "<THREE-PER-EM_SPACE>",
"\u2005": "<FOUR-PER-EM_SPACE>",
"\u2006": "<SIX-PER-EM_SPACE>",
"\u2007": "<FIGURE_SPACE>",
"\u2008": "<PUNCTUATION_SPACE>",
"\u2009": "<THIN_SPACE>",
"\u200A": "<HAIR_SPACE>",
"\u200B": "<ZERO_WIDTH_SPACE>",
"\u202F": "<NNBSP>",
"\u205F": "<MMSP>",
"\u3000": "<IDEOGRAPHIC_SPACE>",
}
def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0):
super().__init__()
self.matchWhite = ws
self.set_whitespace_chars(
"".join(c for c in self.whiteStrs if c not in self.matchWhite),
copy_defaults=True,
)
# self.leave_whitespace()
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def _generateDefaultName(self):
return "".join(White.whiteStrs[c] for c in self.matchWhite)
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class PositionToken(Token):
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__(self, colno: int):
super().__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while (
loc < instrlen
and instring[loc].isspace()
and col(loc, instring) != self.col
):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc:newloc]
return newloc, ret
class LineStart(PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super().__init__()
self.leave_whitespace()
self.orig_whiteChars = set() | self.whiteChars
self.whiteChars.discard("\n")
self.skipper = Empty().set_whitespace_chars(self.whiteChars)
self.errmsg = "Expected start of line"
def preParse(self, instring, loc):
if loc == 0:
return loc
else:
ret = self.skipper.preParse(instring, loc)
if "\n" in self.orig_whiteChars:
while instring[ret : ret + 1] == "\n":
ret = self.skipper.preParse(instring, ret + 1)
return ret
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self):
super().__init__()
self.whiteChars.discard("\n")
self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self):
super().__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__(self):
super().__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(PositionToken):
"""Matches if the current position is at the beginning of a
:class:`Word`, and is not preceded by any character in a given
set of ``word_chars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
wordChars = word_chars if wordChars == printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (
instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(PositionToken):
"""Matches if the current position is at the end of a :class:`Word`,
and is not followed by any character in a given set of ``word_chars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
wordChars = word_chars if wordChars == printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (
instring[loc] in self.wordChars
or instring[loc - 1] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(savelist)
self.exprs: List[ParserElement]
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, str_type):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, str_type) for expr in exprs):
exprs = (
self._literalStringClass(e) if isinstance(e, str_type) else e
for e in exprs
)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def recurse(self) -> Sequence[ParserElement]:
return self.exprs[:]
def append(self, other) -> ParserElement:
self.exprs.append(other)
self._defaultName = None
return self
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().leave_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().ignore_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def _generateDefaultName(self):
return "{}:({})".format(self.__class__.__name__, str(self.exprs))
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
for e in self.exprs:
e.streamline()
# collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self._defaultName = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self._defaultName = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + str(self)
return self
def validate(self, validateTrace=None) -> None:
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self._checkRecursion([])
def copy(self) -> ParserElement:
ret = super().copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
for e in self.exprs:
if (
isinstance(e, ParserElement)
and e.resultsName
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"collides with {!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class And(ParseExpression):
"""
Requires all given :class:`ParseExpression` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = Word(alphas)[1, ...]
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.leave_whitespace()
def _generateDefaultName(self):
return "-"
def __init__(
self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True
):
exprs: List[ParserElement] = list(exprs_arg)
if exprs and Ellipsis in exprs:
tmp = []
for i, expr in enumerate(exprs):
if expr is Ellipsis:
if i < len(exprs) - 1:
skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1]
tmp.append(SkipTo(skipto_arg)("_skipped*"))
else:
raise Exception(
"cannot construct And with sequence ending in ..."
)
else:
tmp.append(expr)
exprs[:] = tmp
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
if not isinstance(self.exprs[0], White):
self.set_whitespace_chars(
self.exprs[0].whiteChars,
copy_defaults=self.exprs[0].copyDefaultWhiteChars,
)
self.skipWhitespace = self.exprs[0].skipWhitespace
else:
self.skipWhitespace = False
else:
self.mayReturnEmpty = True
self.callPreparse = True
def streamline(self) -> ParserElement:
# collapse any _PendingSkip's
if self.exprs:
if any(
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
for e in self.exprs[:-1]
):
for i, e in enumerate(self.exprs[:-1]):
if e is None:
continue
if (
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
):
e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
self.exprs[i + 1] = None
self.exprs = [e for e in self.exprs if e is not None]
super().streamline()
# link any IndentedBlocks to the prior expression
for prev, cur in zip(self.exprs, self.exprs[1:]):
# traverse cur or any first embedded expr of cur looking for an IndentedBlock
# (but watch out for recursive grammar)
seen = set()
while cur:
if id(cur) in seen:
break
seen.add(id(cur))
if isinstance(cur, IndentedBlock):
prev.add_parse_action(
lambda s, l, t, cur_=cur: setattr(
cur_, "parent_anchor", col(l, s)
)
)
break
subs = cur.recurse()
cur = next(iter(subs), None)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
# pass False as callPreParse arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(
instring, loc, doActions, callPreParse=False
)
errorStop = False
for e in self.exprs[1:]:
# if isinstance(e, And._ErrorStop):
if type(e) is And._ErrorStop:
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(
instring, len(instring), self.errmsg, self
)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # And([self, other])
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e._checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def _generateDefaultName(self):
inner = " ".join(str(e) for e in self.exprs)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return "{" + inner + "}"
class Or(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.saveAsList = any(e.saveAsList for e in self.exprs)
self.skipWhitespace = all(
e.skipWhitespace and not isinstance(e, White) for e in self.exprs
)
else:
self.saveAsList = False
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
fatals = []
if all(e.callPreparse for e in self.exprs):
loc = self.preParse(instring, loc)
for e in self.exprs:
try:
loc2 = e.try_parse(instring, loc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
fatals.append(pfe)
maxException = None
maxExcLoc = -1
except ParseException as err:
if not fatals:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
# re-evaluate all matches in descending order of length of match, in case attached actions
# might change whether or how much they match of the input.
matches.sort(key=itemgetter(0), reverse=True)
if not doActions:
# no further conditions or parse actions to change the selection of
# alternative, so the first match will be the best match
best_expr = matches[0][1]
return best_expr._parse(instring, loc, doActions)
longest = -1, None
for loc1, expr1 in matches:
if loc1 <= longest[0]:
# already have a longer match than this one will deliver, we are done
return longest
try:
loc2, toks = expr1._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
else:
if loc2 >= loc1:
return loc2, toks
# didn't match as much as before
elif loc2 > longest[0]:
longest = loc2, toks
if longest != (-1, None):
return longest
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
max_fatal = fatals[0]
raise max_fatal
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ixor__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # Or([self, other])
def _generateDefaultName(self):
return "{" + " ^ ".join(str(e) for e in self.exprs) + "}"
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"will return a list of all parsed tokens in an And alternative, "
"in prior versions only the first token was returned; enclose "
"contained argument in Group".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
more than one expression matches, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
if self.exprs:
self.saveAsList = any(e.saveAsList for e in self.exprs)
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(
e.skipWhitespace and not isinstance(e, White) for e in self.exprs
)
else:
self.saveAsList = False
self.mayReturnEmpty = True
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
return e._parse(
instring,
loc,
doActions,
)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
raise
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ior__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # MatchFirst([self, other])
def _generateDefaultName(self):
return "{" + " | ".join(str(e) for e in self.exprs) + "}"
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"will return a list of all parsed tokens in an And alternative, "
"in prior versions only the first token was returned; enclose "
"contained argument in Group".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class Each(ParseExpression):
"""Requires all given :class:`ParseExpression` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
shape_spec.run_tests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
return self
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict(
(id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
)
opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
opt2 = [
e
for e in self.exprs
if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
]
self.optionals = opt1 + opt2
self.multioptionals = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, _MultipleMatch)
]
self.multirequired = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, OneOrMore)
]
self.required = [
e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
multis = self.multioptionals[:]
matchOrder = []
keepMatching = True
failed = []
fatals = []
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + multis
failed.clear()
fatals.clear()
for e in tmpExprs:
try:
tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
fatals.append(pfe)
failed.append(e)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
# look for any ParseFatalExceptions
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
max_fatal = fatals[0]
raise max_fatal
if tmpReqd:
missing = ", ".join([str(e) for e in tmpReqd])
raise ParseException(
instring,
loc,
"Missing one or more required elements ({})".format(missing),
)
# add any unmatched Opts, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
total_results = ParseResults([])
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
total_results += results
return loc, total_results
def _generateDefaultName(self):
return "{" + " & ".join(str(e) for e in self.exprs) + "}"
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
super().__init__(savelist)
if isinstance(expr, str_type):
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr)
elif issubclass(type(self), self._literalStringClass):
expr = Literal(expr)
else:
expr = self._literalStringClass(Literal(expr))
self.expr = expr
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.set_whitespace_chars(
expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def recurse(self) -> Sequence[ParserElement]:
return [self.expr] if self.expr is not None else []
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException(instring, loc, "No expression defined", self)
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
super().leave_whitespace(recursive)
if recursive:
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
super().ignore_whitespace(recursive)
if recursive:
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self) -> ParserElement:
super().streamline()
if self.expr is not None:
self.expr.streamline()
return self
def _checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None) -> None:
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self):
return "{}:({})".format(self.__class__.__name__, str(self.expr))
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class IndentedBlock(ParseElementEnhance):
"""
Expression to match one or more expressions at a given indentation level.
Useful for parsing text where structure is implied by indentation (like Python source code).
"""
class _Indent(Empty):
def __init__(self, ref_col: int):
super().__init__()
self.errmsg = "expected indent at column {}".format(ref_col)
self.add_condition(lambda s, l, t: col(l, s) == ref_col)
class _IndentGreater(Empty):
def __init__(self, ref_col: int):
super().__init__()
self.errmsg = "expected indent at column greater than {}".format(ref_col)
self.add_condition(lambda s, l, t: col(l, s) > ref_col)
def __init__(
self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
):
super().__init__(expr, savelist=True)
# if recursive:
# raise NotImplementedError("IndentedBlock with recursive is not implemented")
self._recursive = recursive
self._grouped = grouped
self.parent_anchor = 1
def parseImpl(self, instring, loc, doActions=True):
# advance parse position to non-whitespace by using an Empty()
# this should be the column to be used for all subsequent indented lines
anchor_loc = Empty().preParse(instring, loc)
# see if self.expr matches at the current location - if not it will raise an exception
# and no further work is necessary
self.expr.try_parse(instring, anchor_loc, doActions)
indent_col = col(anchor_loc, instring)
peer_detect_expr = self._Indent(indent_col)
inner_expr = Empty() + peer_detect_expr + self.expr
if self._recursive:
sub_indent = self._IndentGreater(indent_col)
nested_block = IndentedBlock(
self.expr, recursive=self._recursive, grouped=self._grouped
)
nested_block.set_debug(self.debug)
nested_block.parent_anchor = indent_col
inner_expr += Opt(sub_indent + nested_block)
inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
block = OneOrMore(inner_expr)
trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
if self._grouped:
wrapper = Group
else:
wrapper = lambda expr: expr
return (wrapper(block) + Optional(trailing_undent)).parseImpl(
instring, anchor_loc, doActions
)
class AtStringStart(ParseElementEnhance):
"""Matches if expression matches at the beginning of the parse
string::
AtStringStart(Word(nums)).parse_string("123")
# prints ["123"]
AtStringStart(Word(nums)).parse_string(" 123")
# raises ParseException
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
raise ParseException(instring, loc, "not found at string start")
return super().parseImpl(instring, loc, doActions)
class AtLineStart(ParseElementEnhance):
r"""Matches if an expression matches at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (AtLineStart('AAA') + restOfLine).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) != 1:
raise ParseException(instring, loc, "not found at line start")
return super().parseImpl(instring, loc, doActions)
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- expr - expression that must match prior to the current parse
location
- retreat - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, :class:`Literal`,
:class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
with a specified exact or maximum length, then the retreat
parameter is not required. Otherwise, retreat must be specified to
give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(
self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None
):
super().__init__(expr)
self.expr = self.expr().leave_whitespace()
self.mayReturnEmpty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str_type):
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = "not preceded by " + str(expr)
self.skipWhitespace = False
self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
def parseImpl(self, instring, loc=0, doActions=True):
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
else:
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[max(0, loc - self.retreat) : loc]
last_expr = ParseException(instring, loc, self.errmsg)
for offset in range(1, min(loc, self.retreat + 1) + 1):
try:
# print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
_, ret = test_expr._parse(
instring_slice, len(instring_slice) - offset
)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
return loc, ret
class Located(ParseElementEnhance):
"""
Decorates a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- ``locn_start`` - location where matched expression begins
- ``locn_end`` - location where matched expression ends
- ``value`` - the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parse_with_tabs`
Example::
wd = Word(alphas)
for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[0, ['ljsdf'], 5]
[8, ['lksdjjf'], 15]
[18, ['lkkjj'], 23]
"""
def parseImpl(self, instring, loc, doActions=True):
start = loc
loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False)
ret_tokens = ParseResults([start, tokens, loc])
ret_tokens["locn_start"] = start
ret_tokens["value"] = tokens
ret_tokens["locn_end"] = loc
if self.resultsName:
# must return as a list, so that the name will be attached to the complete group
return loc, [ret_tokens]
else:
return loc, ret_tokens
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the ``'~'`` operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Opt(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infix_notation
boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...]
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
# do NOT use self.leave_whitespace(), don't want to propagate to exprs
# self.leave_whitespace()
self.skipWhitespace = False
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.can_parse_next(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def _generateDefaultName(self):
return "~{" + str(self.expr) + "}"
class _MultipleMatch(ParseElementEnhance):
def __init__(
self,
expr: ParserElement,
stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
stopOn: typing.Optional[Union[ParserElement, str]] = None,
):
super().__init__(expr)
stopOn = stopOn or stop_on
self.saveAsList = True
ender = stopOn
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.stopOn(ender)
def stopOn(self, ender) -> ParserElement:
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
return self
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions)
try:
hasIgnoreExprs = not not self.ignoreExprs
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
for e in [self.expr] + self.expr.recurse():
if (
isinstance(e, ParserElement)
and e.resultsName
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"collides with {!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stop_on - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stop_on attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parse_string(text).pprint()
"""
def _generateDefaultName(self):
return "{" + str(self.expr) + "}..."
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``stop_on`` - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression) - (default= ``None``)
Example: similar to :class:`OneOrMore`
"""
def __init__(
self,
expr: ParserElement,
stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
stopOn: typing.Optional[Union[ParserElement, str]] = None,
):
super().__init__(expr, stopOn=stopOn or stop_on)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super().parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, ParseResults([], name=self.resultsName)
def _generateDefaultName(self):
return "[" + str(self.expr) + "]..."
class _NullToken:
def __bool__(self):
return False
def __str__(self):
return ""
class Opt(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``default`` (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
zip.run_tests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
__optionalNotMatched = _NullToken()
def __init__(
self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
):
super().__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
self_expr = self.expr
try:
loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
default_value = self.defaultValue
if default_value is not self.__optionalNotMatched:
if self_expr.resultsName:
tokens = ParseResults([default_value])
tokens[self_expr.resultsName] = default_value
else:
tokens = [default_value]
else:
tokens = []
return loc, tokens
def _generateDefaultName(self):
inner = str(self.expr)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return "[" + inner + "]"
Optional = Opt
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- ``expr`` - target expression marking the end of the data to be skipped
- ``include`` - if ``True``, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element
list) (default= ``False``).
- ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the :class:`SkipTo` is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quoted_string)
string_data.set_parse_action(token_map(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.search_string(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: '6'
- desc: 'Intermittent system crash'
- issue_num: '101'
- sev: 'Critical'
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: '14'
- desc: "Spelling error on Login ('log|n')"
- issue_num: '94'
- sev: 'Cosmetic'
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: '47'
- desc: 'System slow when running too many reports'
- issue_num: '79'
- sev: 'Minor'
"""
def __init__(
self,
other: Union[ParserElement, str],
include: bool = False,
ignore: bool = None,
fail_on: typing.Optional[Union[ParserElement, str]] = None,
*,
failOn: Union[ParserElement, str] = None,
):
super().__init__(other)
failOn = failOn or fail_on
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, str_type):
self.failOn = self._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
self_expr_parse = self.expr._parse
self_failOn_canParseNext = (
self.failOn.canParseNext if self.failOn is not None else None
)
self_ignoreExpr_tryParse = (
self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
)
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
self_expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the ``'<<'`` operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
fwd_expr << a | b | c
will actually be evaluated as::
(fwd_expr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwd_expr << (a | b | c)
Converting to use the ``'<<='`` operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None):
self.caller_frame = traceback.extract_stack(limit=2)[0]
super().__init__(other, savelist=False)
self.lshift_line = None
def __lshift__(self, other):
if hasattr(self, "caller_frame"):
del self.caller_frame
if isinstance(other, str_type):
other = self._literalStringClass(other)
self.expr = other
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.set_whitespace_chars(
self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
self.lshift_line = traceback.extract_stack(limit=2)[-2]
return self
def __ilshift__(self, other):
return self << other
def __or__(self, other):
caller_line = traceback.extract_stack(limit=2)[-2]
if (
__diag__.warn_on_match_first_with_lshift_operator
and caller_line == self.lshift_line
and Diagnostics.warn_on_match_first_with_lshift_operator
not in self.suppress_warnings_
):
warnings.warn(
"using '<<' operator with '|' is probably an error, use '<<='",
stacklevel=2,
)
ret = super().__or__(other)
return ret
def __del__(self):
# see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
if (
self.expr is None
and __diag__.warn_on_assignment_to_Forward
and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
):
warnings.warn_explicit(
"Forward defined here but no expression attached later using '<<=' or '<<'",
UserWarning,
filename=self.caller_frame.filename,
lineno=self.caller_frame.lineno,
)
def parseImpl(self, instring, loc, doActions=True):
if (
self.expr is None
and __diag__.warn_on_parse_using_empty_Forward
and Diagnostics.warn_on_parse_using_empty_Forward
not in self.suppress_warnings_
):
# walk stack until parse_string, scan_string, search_string, or transform_string is found
parse_fns = [
"parse_string",
"scan_string",
"search_string",
"transform_string",
]
tb = traceback.extract_stack(limit=200)
for i, frm in enumerate(reversed(tb), start=1):
if frm.name in parse_fns:
stacklevel = i + 1
break
else:
stacklevel = 2
warnings.warn(
"Forward expression was never assigned a value, will not parse any input",
stacklevel=stacklevel,
)
if not ParserElement._left_recursion_enabled:
return super().parseImpl(instring, loc, doActions)
# ## Bounded Recursion algorithm ##
# Recursion only needs to be processed at ``Forward`` elements, since they are
# the only ones that can actually refer to themselves. The general idea is
# to handle recursion stepwise: We start at no recursion, then recurse once,
# recurse twice, ..., until more recursion offers no benefit (we hit the bound).
#
# The "trick" here is that each ``Forward`` gets evaluated in two contexts
# - to *match* a specific recursion level, and
# - to *search* the bounded recursion level
# and the two run concurrently. The *search* must *match* each recursion level
# to find the best possible match. This is handled by a memo table, which
# provides the previous match to the next level match attempt.
#
# See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
#
# There is a complication since we not only *parse* but also *transform* via
# actions: We do not want to run the actions too often while expanding. Thus,
# we expand using `doActions=False` and only run `doActions=True` if the next
# recursion level is acceptable.
with ParserElement.recursion_lock:
memo = ParserElement.recursion_memos
try:
# we are parsing at a specific recursion expansion - use it as-is
prev_loc, prev_result = memo[loc, self, doActions]
if isinstance(prev_result, Exception):
raise prev_result
return prev_loc, prev_result.copy()
except KeyError:
act_key = (loc, self, True)
peek_key = (loc, self, False)
# we are searching for the best recursion expansion - keep on improving
# both `doActions` cases must be tracked separately here!
prev_loc, prev_peek = memo[peek_key] = (
loc - 1,
ParseException(
instring, loc, "Forward recursion without base case", self
),
)
if doActions:
memo[act_key] = memo[peek_key]
while True:
try:
new_loc, new_peek = super().parseImpl(instring, loc, False)
except ParseException:
# we failed before getting any match – do not hide the error
if isinstance(prev_peek, Exception):
raise
new_loc, new_peek = prev_loc, prev_peek
# the match did not get better: we are done
if new_loc <= prev_loc:
if doActions:
# replace the match for doActions=False as well,
# in case the action did backtrack
prev_loc, prev_result = memo[peek_key] = memo[act_key]
del memo[peek_key], memo[act_key]
return prev_loc, prev_result.copy()
del memo[peek_key]
return prev_loc, prev_peek.copy()
# the match did get better: see if we can improve further
else:
if doActions:
try:
memo[act_key] = super().parseImpl(instring, loc, True)
except ParseException as e:
memo[peek_key] = memo[act_key] = (new_loc, e)
raise
prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
self.skipWhitespace = False
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
self.skipWhitespace = True
return self
def streamline(self) -> ParserElement:
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None) -> None:
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self):
# Avoid infinite recursion by setting a temporary _defaultName
self._defaultName = ": ..."
# Use the string representation of main expression.
retString = "..."
try:
if self.expr is not None:
retString = str(self.expr)[:1000]
else:
retString = "None"
finally:
return self.__class__.__name__ + ": " + retString
def copy(self) -> ParserElement:
if self.expr is not None:
return super().copy()
else:
ret = Forward()
ret <<= self
return ret
def _setResultsName(self, name, list_all_matches=False):
if (
__diag__.warn_name_set_on_empty_Forward
and Diagnostics.warn_name_set_on_empty_Forward
not in self.suppress_warnings_
):
if self.expr is None:
warnings.warn(
"{}: setting results name {!r} on {} expression "
"that has no contained expression".format(
"warn_name_set_on_empty_Forward", name, type(self).__name__
),
stacklevel=3,
)
return super()._setResultsName(name, list_all_matches)
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseExpression`, for converting parsed results.
"""
def __init__(self, expr: Union[ParserElement, str], savelist=False):
super().__init__(expr) # , savelist)
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parse_string('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(
self,
expr: ParserElement,
join_string: str = "",
adjacent: bool = True,
*,
joinString: typing.Optional[str] = None,
):
super().__init__(expr)
joinString = joinString if joinString is not None else join_string
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leave_whitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other) -> ParserElement:
if self.adjacent:
ParserElement.ignore(self, other)
else:
super().ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
The optional ``aslist`` argument when set to True will return the
parsed tokens as a Python list instead of a pyparsing ParseResults.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Opt(delimited_list(term))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', 'a', 'b', '100']
func = ident + Group(Opt(delimited_list(term)))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr: ParserElement, aslist: bool = False):
super().__init__(expr)
self.saveAsList = True
self._asPythonList = aslist
def postParse(self, instring, loc, tokenlist):
if self._asPythonList:
return ParseResults.List(
tokenlist.asList()
if isinstance(tokenlist, ParseResults)
else list(tokenlist)
)
else:
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
The optional ``asdict`` argument when set to True will return the
parsed tokens as a Python dict instead of a pyparsing ParseResults.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
# print attributes as plain groups
print(attr_expr[1, ...].parse_string(text).dump())
# instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names
result = Dict(Group(attr_expr)[1, ...]).parse_string(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.as_dict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: 'light blue'
- posn: 'upper left'
- shape: 'SQUARE'
- texture: 'burlap'
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__(self, expr: ParserElement, asdict: bool = False):
super().__init__(expr)
self.saveAsList = True
self._asPythonDict = asdict
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = str(ikey).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
try:
dictvalue = tok.copy() # ParseResults(i)
except Exception:
exc = TypeError(
"could not extract dict values from parsed results"
" - Dict expression must contain Grouped expressions"
)
raise exc from None
del dictvalue[0]
if len(dictvalue) != 1 or (
isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self._asPythonDict:
return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
else:
return [tokenlist] if self.resultsName else tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + (',' + wd)[...]
print(wd_list1.parse_string(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + (Suppress(',') + wd)[...]
print(wd_list2.parse_string(source))
# Skipped text (using '...') can be suppressed as well
source = "lead in START relevant text END trailing text"
start_marker = Keyword("START")
end_marker = Keyword("END")
find_body = Suppress(...) + start_marker + ... + end_marker
print(find_body.parse_string(source)
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
['START', 'relevant text ', 'END']
(See also :class:`delimited_list`.)
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
if expr is ...:
expr = _PendingSkip(NoMatch())
super().__init__(expr)
def __add__(self, other) -> "ParserElement":
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) + other
else:
return super().__add__(other)
def __sub__(self, other) -> "ParserElement":
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) - other
else:
return super().__sub__(other)
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self) -> ParserElement:
return self
def trace_parse_action(f: ParseAction) -> ParseAction:
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@trace_parse_action
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = wd[1, ...].set_parse_action(remove_duplicate_chars)
print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
sys.stderr.write(
">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t)
)
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving {} (exception: {})\n".format(thisFunc, exc))
raise
sys.stderr.write("<<leaving {} (ret: {!r})\n".format(thisFunc, ret))
return ret
z.__name__ = f.__name__
return z
# convenience constants for positional expressions
empty = Empty().set_name("empty")
line_start = LineStart().set_name("line_start")
line_end = LineEnd().set_name("line_end")
string_start = StringStart().set_name("string_start")
string_end = StringEnd().set_name("string_end")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).set_parse_action(
lambda s, l, t: t[0][1]
)
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").set_parse_action(
lambda s, l, t: chr(int(t[0].lstrip(r"\0x"), 16))
)
_escapedOctChar = Regex(r"\\0[0-7]+").set_parse_action(
lambda s, l, t: chr(int(t[0][1:], 8))
)
_singleChar = (
_escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = (
Literal("[")
+ Opt("^").set_results_name("negate")
+ Group(OneOrMore(_charRange | _singleChar)).set_results_name("body")
+ "]"
)
def srange(s: str) -> str:
r"""Helper to easily define string ranges for use in :class:`Word`
construction. Borrows syntax from regexp ``'[]'`` string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
_expanded = (
lambda p: p
if not isinstance(p, ParseResults)
else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
)
try:
return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body)
except Exception:
return ""
def token_map(func, *args) -> ParseAction:
"""Helper to define a parse action by mapping a function to all
elements of a :class:`ParseResults` list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transform_string`::
hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16))
hex_ints.run_tests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).set_parse_action(token_map(str.upper))
upperword[1, ...].run_tests('''
my kingdom for a horse
''')
wd = Word(alphas).set_parse_action(token_map(str.title))
wd[1, ...].set_parse_action(' '.join).run_tests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
pa.__name__ = func_name
return pa
def autoname_elements() -> None:
"""
Utility to simplify mass-naming of parser elements, for
generating railroad diagram with named subdiagrams.
"""
for name, var in sys._getframe().f_back.f_locals.items():
if isinstance(var, ParserElement) and not var.customName:
var.set_name(name)
dbl_quoted_string = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
).set_name("string enclosed in double quotes")
sgl_quoted_string = Combine(
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).set_name("string enclosed in single quotes")
quoted_string = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).set_name("quotedString using single or double quotes")
unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
# build list of built-in expressions, for future reference if a global default value
# gets updated
_builtin_exprs: List[ParserElement] = [
v for v in vars().values() if isinstance(v, ParserElement)
]
# backward compatibility names
tokenMap = token_map
conditionAsParseAction = condition_as_parse_action
nullDebugAction = null_debug_action
sglQuotedString = sgl_quoted_string
dblQuotedString = dbl_quoted_string
quotedString = quoted_string
unicodeString = unicode_string
lineStart = line_start
lineEnd = line_end
stringStart = string_start
stringEnd = string_end
traceParseAction = trace_parse_action
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/core.py
|
Python
|
mit
| 213,344 |
import railroad
from pip._vendor import pyparsing
import typing
from typing import (
List,
NamedTuple,
Generic,
TypeVar,
Dict,
Callable,
Set,
Iterable,
)
from jinja2 import Template
from io import StringIO
import inspect
jinja2_template_source = """\
<!DOCTYPE html>
<html>
<head>
{% if not head %}
<style type="text/css">
.railroad-heading {
font-family: monospace;
}
</style>
{% else %}
{{ head | safe }}
{% endif %}
</head>
<body>
{{ body | safe }}
{% for diagram in diagrams %}
<div class="railroad-group">
<h1 class="railroad-heading">{{ diagram.title }}</h1>
<div class="railroad-description">{{ diagram.text }}</div>
<div class="railroad-svg">
{{ diagram.svg }}
</div>
</div>
{% endfor %}
</body>
</html>
"""
template = Template(jinja2_template_source)
# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
NamedDiagram = NamedTuple(
"NamedDiagram",
[("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
)
"""
A simple structure for associating a name with a railroad diagram
"""
T = TypeVar("T")
class EachItem(railroad.Group):
"""
Custom railroad item to compose a:
- Group containing a
- OneOrMore containing a
- Choice of the elements in the Each
with the group label indicating that all must be matched
"""
all_label = "[ALL]"
def __init__(self, *items):
choice_item = railroad.Choice(len(items) - 1, *items)
one_or_more_item = railroad.OneOrMore(item=choice_item)
super().__init__(one_or_more_item, label=self.all_label)
class AnnotatedItem(railroad.Group):
"""
Simple subclass of Group that creates an annotation label
"""
def __init__(self, label: str, item):
super().__init__(item=item, label="[{}]".format(label) if label else label)
class EditablePartial(Generic[T]):
"""
Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
constructed.
"""
# We need this here because the railroad constructors actually transform the data, so can't be called until the
# entire tree is assembled
def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
self.func = func
self.args = args
self.kwargs = kwargs
@classmethod
def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
"""
If you call this function in the same way that you would call the constructor, it will store the arguments
as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
"""
return EditablePartial(func=func, args=list(args), kwargs=kwargs)
@property
def name(self):
return self.kwargs["name"]
def __call__(self) -> T:
"""
Evaluate the partial and return the result
"""
args = self.args.copy()
kwargs = self.kwargs.copy()
# This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
# args=['list', 'of', 'things'])
arg_spec = inspect.getfullargspec(self.func)
if arg_spec.varargs in self.kwargs:
args += kwargs.pop(arg_spec.varargs)
return self.func(*args, **kwargs)
def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
"""
Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
:params kwargs: kwargs to be passed in to the template
"""
data = []
for diagram in diagrams:
if diagram.diagram is None:
continue
io = StringIO()
diagram.diagram.writeSvg(io.write)
title = diagram.name
if diagram.index == 0:
title += " (root)"
data.append({"title": title, "text": "", "svg": io.getvalue()})
return template.render(diagrams=data, **kwargs)
def resolve_partial(partial: "EditablePartial[T]") -> T:
"""
Recursively resolves a collection of Partials into whatever type they are
"""
if isinstance(partial, EditablePartial):
partial.args = resolve_partial(partial.args)
partial.kwargs = resolve_partial(partial.kwargs)
return partial()
elif isinstance(partial, list):
return [resolve_partial(x) for x in partial]
elif isinstance(partial, dict):
return {key: resolve_partial(x) for key, x in partial.items()}
else:
return partial
def to_railroad(
element: pyparsing.ParserElement,
diagram_kwargs: typing.Optional[dict] = None,
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
) -> List[NamedDiagram]:
"""
Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
creation if you want to access the Railroad tree before it is converted to HTML
:param element: base element of the parser being diagrammed
:param diagram_kwargs: kwargs to pass to the Diagram() constructor
:param vertical: (optional) - int - limit at which number of alternatives should be
shown vertically instead of horizontally
:param show_results_names - bool to indicate whether results name annotations should be
included in the diagram
:param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
surrounding box
"""
# Convert the whole tree underneath the root
lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
_to_diagram_element(
element,
lookup=lookup,
parent=None,
vertical=vertical,
show_results_names=show_results_names,
show_groups=show_groups,
)
root_id = id(element)
# Convert the root if it hasn't been already
if root_id in lookup:
if not element.customName:
lookup[root_id].name = ""
lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
# Now that we're finished, we can convert from intermediate structures into Railroad elements
diags = list(lookup.diagrams.values())
if len(diags) > 1:
# collapse out duplicate diags with the same name
seen = set()
deduped_diags = []
for d in diags:
# don't extract SkipTo elements, they are uninformative as subdiagrams
if d.name == "...":
continue
if d.name is not None and d.name not in seen:
seen.add(d.name)
deduped_diags.append(d)
resolved = [resolve_partial(partial) for partial in deduped_diags]
else:
# special case - if just one diagram, always display it, even if
# it has no name
resolved = [resolve_partial(partial) for partial in diags]
return sorted(resolved, key=lambda diag: diag.index)
def _should_vertical(
specification: int, exprs: Iterable[pyparsing.ParserElement]
) -> bool:
"""
Returns true if we should return a vertical list of elements
"""
if specification is None:
return False
else:
return len(_visible_exprs(exprs)) >= specification
class ElementState:
"""
State recorded for an individual pyparsing Element
"""
# Note: this should be a dataclass, but we have to support Python 3.5
def __init__(
self,
element: pyparsing.ParserElement,
converted: EditablePartial,
parent: EditablePartial,
number: int,
name: str = None,
parent_index: typing.Optional[int] = None,
):
#: The pyparsing element that this represents
self.element: pyparsing.ParserElement = element
#: The name of the element
self.name: typing.Optional[str] = name
#: The output Railroad element in an unconverted state
self.converted: EditablePartial = converted
#: The parent Railroad element, which we store so that we can extract this if it's duplicated
self.parent: EditablePartial = parent
#: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
self.number: int = number
#: The index of this inside its parent
self.parent_index: typing.Optional[int] = parent_index
#: If true, we should extract this out into a subdiagram
self.extract: bool = False
#: If true, all of this element's children have been filled out
self.complete: bool = False
def mark_for_extraction(
self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
):
"""
Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
:param el_id: id of the element
:param state: element/diagram state tracker
:param name: name to use for this element's text
:param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
root element when we know we're finished
"""
self.extract = True
# Set the name
if not self.name:
if name:
# Allow forcing a custom name
self.name = name
elif self.element.customName:
self.name = self.element.customName
else:
self.name = ""
# Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
# to be added
# Also, if this is just a string literal etc, don't bother extracting it
if force or (self.complete and _worth_extracting(self.element)):
state.extract_into_diagram(el_id)
class ConverterState:
"""
Stores some state that persists between recursions into the element tree
"""
def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
#: A dictionary mapping ParserElements to state relating to them
self._element_diagram_states: Dict[int, ElementState] = {}
#: A dictionary mapping ParserElement IDs to subdiagrams generated from them
self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
#: The index of the next unnamed element
self.unnamed_index: int = 1
#: The index of the next element. This is used for sorting
self.index: int = 0
#: Shared kwargs that are used to customize the construction of diagrams
self.diagram_kwargs: dict = diagram_kwargs or {}
self.extracted_diagram_names: Set[str] = set()
def __setitem__(self, key: int, value: ElementState):
self._element_diagram_states[key] = value
def __getitem__(self, key: int) -> ElementState:
return self._element_diagram_states[key]
def __delitem__(self, key: int):
del self._element_diagram_states[key]
def __contains__(self, key: int):
return key in self._element_diagram_states
def generate_unnamed(self) -> int:
"""
Generate a number used in the name of an otherwise unnamed diagram
"""
self.unnamed_index += 1
return self.unnamed_index
def generate_index(self) -> int:
"""
Generate a number used to index a diagram
"""
self.index += 1
return self.index
def extract_into_diagram(self, el_id: int):
"""
Used when we encounter the same token twice in the same tree. When this
happens, we replace all instances of that token with a terminal, and
create a new subdiagram for the token
"""
position = self[el_id]
# Replace the original definition of this element with a regular block
if position.parent:
ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
if "item" in position.parent.kwargs:
position.parent.kwargs["item"] = ret
elif "items" in position.parent.kwargs:
position.parent.kwargs["items"][position.parent_index] = ret
# If the element we're extracting is a group, skip to its content but keep the title
if position.converted.func == railroad.Group:
content = position.converted.kwargs["item"]
else:
content = position.converted
self.diagrams[el_id] = EditablePartial.from_call(
NamedDiagram,
name=position.name,
diagram=EditablePartial.from_call(
railroad.Diagram, content, **self.diagram_kwargs
),
index=position.number,
)
del self[el_id]
def _worth_extracting(element: pyparsing.ParserElement) -> bool:
"""
Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
themselves have children, then its complex enough to extract
"""
children = element.recurse()
return any(child.recurse() for child in children)
def _apply_diagram_item_enhancements(fn):
"""
decorator to ensure enhancements to a diagram item (such as results name annotations)
get applied on return from _to_diagram_element (we do this since there are several
returns in _to_diagram_element)
"""
def _inner(
element: pyparsing.ParserElement,
parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
) -> typing.Optional[EditablePartial]:
ret = fn(
element,
parent,
lookup,
vertical,
index,
name_hint,
show_results_names,
show_groups,
)
# apply annotation for results name, if present
if show_results_names and ret is not None:
element_results_name = element.resultsName
if element_results_name:
# add "*" to indicate if this is a "list all results" name
element_results_name += "" if element.modalResults else "*"
ret = EditablePartial.from_call(
railroad.Group, item=ret, label=element_results_name
)
return ret
return _inner
def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
non_diagramming_exprs = (
pyparsing.ParseElementEnhance,
pyparsing.PositionToken,
pyparsing.And._ErrorStop,
)
return [
e
for e in exprs
if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
]
@_apply_diagram_item_enhancements
def _to_diagram_element(
element: pyparsing.ParserElement,
parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
) -> typing.Optional[EditablePartial]:
"""
Recursively converts a PyParsing Element to a railroad Element
:param lookup: The shared converter state that keeps track of useful things
:param index: The index of this element within the parent
:param parent: The parent of this element in the output tree
:param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
do so
:param name_hint: If provided, this will override the generated name
:param show_results_names: bool flag indicating whether to add annotations for results names
:returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
:param show_groups: bool flag indicating whether to show groups using bounding box
"""
exprs = element.recurse()
name = name_hint or element.customName or element.__class__.__name__
# Python's id() is used to provide a unique identifier for elements
el_id = id(element)
element_results_name = element.resultsName
# Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
if not element.customName:
if isinstance(
element,
(
# pyparsing.TokenConverter,
# pyparsing.Forward,
pyparsing.Located,
),
):
# However, if this element has a useful custom name, and its child does not, we can pass it on to the child
if exprs:
if not exprs[0].customName:
propagated_name = name
else:
propagated_name = None
return _to_diagram_element(
element.expr,
parent=parent,
lookup=lookup,
vertical=vertical,
index=index,
name_hint=propagated_name,
show_results_names=show_results_names,
show_groups=show_groups,
)
# If the element isn't worth extracting, we always treat it as the first time we say it
if _worth_extracting(element):
if el_id in lookup:
# If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
# so we have to extract it into a new diagram.
looked_up = lookup[el_id]
looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
return ret
elif el_id in lookup.diagrams:
# If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
# just put in a marker element that refers to the sub-diagram
ret = EditablePartial.from_call(
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
)
return ret
# Recursively convert child elements
# Here we find the most relevant Railroad element for matching pyparsing Element
# We use ``items=[]`` here to hold the place for where the child elements will go once created
if isinstance(element, pyparsing.And):
# detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
# (all will have the same name, and resultsName)
if not exprs:
return None
if len(set((e.name, e.resultsName) for e in exprs)) == 1:
ret = EditablePartial.from_call(
railroad.OneOrMore, item="", repeat=str(len(exprs))
)
elif _should_vertical(vertical, exprs):
ret = EditablePartial.from_call(railroad.Stack, items=[])
else:
ret = EditablePartial.from_call(railroad.Sequence, items=[])
elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
if not exprs:
return None
if _should_vertical(vertical, exprs):
ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
else:
ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
elif isinstance(element, pyparsing.Each):
if not exprs:
return None
ret = EditablePartial.from_call(EachItem, items=[])
elif isinstance(element, pyparsing.NotAny):
ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
elif isinstance(element, pyparsing.FollowedBy):
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
elif isinstance(element, pyparsing.PrecededBy):
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
elif isinstance(element, pyparsing.Group):
if show_groups:
ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
else:
ret = EditablePartial.from_call(railroad.Group, label="", item="")
elif isinstance(element, pyparsing.TokenConverter):
ret = EditablePartial.from_call(
AnnotatedItem, label=type(element).__name__.lower(), item=""
)
elif isinstance(element, pyparsing.Opt):
ret = EditablePartial.from_call(railroad.Optional, item="")
elif isinstance(element, pyparsing.OneOrMore):
ret = EditablePartial.from_call(railroad.OneOrMore, item="")
elif isinstance(element, pyparsing.ZeroOrMore):
ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
elif isinstance(element, pyparsing.Group):
ret = EditablePartial.from_call(
railroad.Group, item=None, label=element_results_name
)
elif isinstance(element, pyparsing.Empty) and not element.customName:
# Skip unnamed "Empty" elements
ret = None
elif len(exprs) > 1:
ret = EditablePartial.from_call(railroad.Sequence, items=[])
elif len(exprs) > 0 and not element_results_name:
ret = EditablePartial.from_call(railroad.Group, item="", label=name)
else:
terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
ret = terminal
if ret is None:
return
# Indicate this element's position in the tree so we can extract it if necessary
lookup[el_id] = ElementState(
element=element,
converted=ret,
parent=parent,
parent_index=index,
number=lookup.generate_index(),
)
if element.customName:
lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
i = 0
for expr in exprs:
# Add a placeholder index in case we have to extract the child before we even add it to the parent
if "items" in ret.kwargs:
ret.kwargs["items"].insert(i, None)
item = _to_diagram_element(
expr,
parent=ret,
lookup=lookup,
vertical=vertical,
index=i,
show_results_names=show_results_names,
show_groups=show_groups,
)
# Some elements don't need to be shown in the diagram
if item is not None:
if "item" in ret.kwargs:
ret.kwargs["item"] = item
elif "items" in ret.kwargs:
# If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
ret.kwargs["items"][i] = item
i += 1
elif "items" in ret.kwargs:
# If we're supposed to skip this element, remove it from the parent
del ret.kwargs["items"][i]
# If all this items children are none, skip this item
if ret and (
("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
or ("item" in ret.kwargs and ret.kwargs["item"] is None)
):
ret = EditablePartial.from_call(railroad.Terminal, name)
# Mark this element as "complete", ie it has all of its children
if el_id in lookup:
lookup[el_id].complete = True
if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
lookup.extract_into_diagram(el_id)
if ret is not None:
ret = EditablePartial.from_call(
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
)
return ret
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/diagram/__init__.py
|
Python
|
mit
| 23,685 |
# exceptions.py
import re
import sys
import typing
from .util import col, line, lineno, _collapse_string_to_ranges
from .unicode import pyparsing_unicode as ppu
class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
pass
_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self,
pstr: str,
loc: int = 0,
msg: typing.Optional[str] = None,
elem=None,
):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parser_element = self.parserElement = elem
self.args = (pstr, loc, msg)
@staticmethod
def explain_exception(exc, depth=16):
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
"""
import inspect
from .core import ParserElement
if depth is None:
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(" " * (exc.column - 1) + "^")
ret.append("{}: {}".format(type(exc).__name__, exc))
if depth > 0:
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for i, ff in enumerate(callers[-depth:]):
frm = ff[0]
f_self = frm.f_locals.get("self", None)
if isinstance(f_self, ParserElement):
if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
continue
if id(f_self) in seen:
continue
seen.add(id(f_self))
self_type = type(f_self)
ret.append(
"{}.{} - {}".format(
self_type.__module__, self_type.__name__, f_self
)
)
elif f_self is not None:
self_type = type(f_self)
ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
else:
code = frm.f_code
if code.co_name in ("wrapper", "<module>"):
continue
ret.append("{}".format(code.co_name))
depth -= 1
if not depth:
break
return "\n".join(ret)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
@property
def line(self) -> str:
"""
Return the line of text where the exception occurred.
"""
return line(self.loc, self.pstr)
@property
def lineno(self) -> int:
"""
Return the 1-based line number of text where the exception occurred.
"""
return lineno(self.loc, self.pstr)
@property
def col(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
@property
def column(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
def __str__(self) -> str:
if self.pstr:
if self.loc >= len(self.pstr):
foundstr = ", found end of text"
else:
# pull out next word at error location
found_match = _exception_word_extractor.match(self.pstr, self.loc)
if found_match is not None:
found = found_match.group(0)
else:
found = self.pstr[self.loc : self.loc + 1]
foundstr = (", found %r" % found).replace(r"\\", "\\")
else:
foundstr = ""
return "{}{} (at char {}), (line:{}, col:{})".format(
self.msg, foundstr, self.loc, self.lineno, self.column
)
def __repr__(self):
return str(self)
def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
"""
Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
markerString = marker_string if marker_string is not None else markerString
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join(
(line_str[:line_column], markerString, line_str[line_column:])
)
return line_str.strip()
def explain(self, depth=16) -> str:
"""
Method to translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Example::
expr = pp.Word(pp.nums) * 3
try:
expr.parse_string("123 456 A789")
except pp.ParseException as pe:
print(pe.explain(depth=0))
prints::
123 456 A789
^
ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `set_name` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
Note: pyparsing's default truncation of exception tracebacks may also truncate the
stack of expressions that are displayed in the ``explain`` output. To get the full listing
of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
"""
return self.explain_exception(self, depth)
markInputline = mark_input_line
class ParseException(ParseBaseException):
"""
Exception thrown when a parse expression doesn't match the input string
Example::
try:
Word(nums).set_name("integer").parse_string("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.column))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
class ParseFatalException(ParseBaseException):
"""
User-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately
"""
class ParseSyntaxException(ParseFatalException):
"""
Just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
class RecursiveGrammarException(Exception):
"""
Exception thrown by :class:`ParserElement.validate` if the
grammar could be left-recursive; parser may need to enable
left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self) -> str:
return "RecursiveGrammarException: {}".format(self.parseElementTrace)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/exceptions.py
|
Python
|
mit
| 9,023 |
# helpers.py
import html.entities
import re
import typing
from . import __diag__
from .core import *
from .util import _bslash, _flatten, _escape_regex_range_chars
#
# global helpers
#
def delimited_list(
expr: Union[str, ParserElement],
delim: Union[str, ParserElement] = ",",
combine: bool = False,
min: typing.Optional[int] = None,
max: typing.Optional[int] = None,
*,
allow_trailing_delim: bool = False,
) -> ParserElement:
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
If ``allow_trailing_delim`` is set to True, then the list may end with
a delimiter.
Example::
delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
if isinstance(expr, str_type):
expr = ParserElement._literalStringClass(expr)
dlName = "{expr} [{delim} {expr}]...{end}".format(
expr=str(expr.copy().streamline()),
delim=str(delim),
end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
)
if not combine:
delim = Suppress(delim)
if min is not None:
if min < 1:
raise ValueError("min must be greater than 0")
min -= 1
if max is not None:
if min is not None and max <= min:
raise ValueError("max must be greater than, or equal to min")
max -= 1
delimited_list_expr = expr + (delim + expr)[min, max]
if allow_trailing_delim:
delimited_list_expr += Opt(delim)
if combine:
return Combine(delimited_list_expr).set_name(dlName)
else:
return delimited_list_expr.set_name(dlName)
def counted_array(
expr: ParserElement,
int_expr: typing.Optional[ParserElement] = None,
*,
intExpr: typing.Optional[ParserElement] = None,
) -> ParserElement:
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the
leading count token is suppressed.
If ``int_expr`` is specified, it should be a pyparsing expression
that produces an integer value.
Example::
counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
# if other fields must be parsed after the count but before the
# list items, give the fields results names and they will
# be preserved in the returned ParseResults:
count_with_metadata = integer + Word(alphas)("type")
typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
result = typed_array.parse_string("3 bool True True False")
print(result.dump())
# prints
# ['True', 'True', 'False']
# - items: ['True', 'True', 'False']
# - type: 'bool'
"""
intExpr = intExpr or int_expr
array_expr = Forward()
def count_field_parse_action(s, l, t):
nonlocal array_expr
n = t[0]
array_expr <<= (expr * n) if n else Empty()
# clear list contents, but keep any named results
del t[:]
if intExpr is None:
intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.set_name("arrayLen")
intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
def match_previous_literal(expr: ParserElement) -> ParserElement:
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = match_previous_literal(first)
match_expr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches a previous literal, will also match the leading
``"1:1"`` in ``"1:10"``. If this is not desired, use
:class:`match_previous_expr`. Do *not* use with packrat parsing
enabled.
"""
rep = Forward()
def copy_token_to_repeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.as_list())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
rep.set_name("(prev) " + str(expr))
return rep
def match_previous_expr(expr: ParserElement) -> ParserElement:
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = match_previous_expr(first)
match_expr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches by expressions, will *not* match the leading ``"1:1"``
in ``"1:10"``; the expressions are evaluated first, and then
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copy_token_to_repeater(s, l, t):
matchTokens = _flatten(t.as_list())
def must_match_these_tokens(s, l, t):
theseTokens = _flatten(t.as_list())
if theseTokens != matchTokens:
raise ParseException(
s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
)
rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
rep.set_name("(prev) " + str(expr))
return rep
def one_of(
strs: Union[typing.Iterable[str], str],
caseless: bool = False,
use_regex: bool = True,
as_keyword: bool = False,
*,
useRegex: bool = True,
asKeyword: bool = False,
) -> ParserElement:
"""Helper to quickly define a set of alternative :class:`Literal` s,
and makes sure to do longest-first testing when there is a conflict,
regardless of the input order, but returns
a :class:`MatchFirst` for best performance.
Parameters:
- ``strs`` - a string of space-delimited literals, or a collection of
string literals
- ``caseless`` - treat all literals as caseless - (default= ``False``)
- ``use_regex`` - as an optimization, will
generate a :class:`Regex` object; otherwise, will generate
a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
creating a :class:`Regex` raises an exception) - (default= ``True``)
- ``as_keyword`` - enforce :class:`Keyword`-style matching on the
generated expressions - (default= ``False``)
- ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
but will be removed in a future release
Example::
comp_oper = one_of("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
asKeyword = asKeyword or as_keyword
useRegex = useRegex and use_regex
if (
isinstance(caseless, str_type)
and __diag__.warn_on_multiple_string_args_to_oneof
):
warnings.warn(
"More than one string argument passed to one_of, pass"
" choices as a list or space-delimited string",
stacklevel=2,
)
if caseless:
isequal = lambda a, b: a.upper() == b.upper()
masks = lambda a, b: b.upper().startswith(a.upper())
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
else:
isequal = lambda a, b: a == b
masks = lambda a, b: b.startswith(a)
parseElementClass = Keyword if asKeyword else Literal
symbols: List[str] = []
if isinstance(strs, str_type):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
raise TypeError("Invalid argument to one_of, expected string or iterable")
if not symbols:
return NoMatch()
# reorder given symbols to take care to avoid masking longer choices with shorter ones
# (but only if the given symbols are not just single characters)
if any(len(sym) > 1 for sym in symbols):
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1 :]):
if isequal(other, cur):
del symbols[i + j + 1]
break
elif masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
break
else:
i += 1
if useRegex:
re_flags: int = re.IGNORECASE if caseless else 0
try:
if all(len(sym) == 1 for sym in symbols):
# symbols are just single characters, create range regex pattern
patt = "[{}]".format(
"".join(_escape_regex_range_chars(sym) for sym in symbols)
)
else:
patt = "|".join(re.escape(sym) for sym in symbols)
# wrap with \b word break markers if defining as keywords
if asKeyword:
patt = r"\b(?:{})\b".format(patt)
ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
if caseless:
# add parse action to return symbols as specified, not in random
# casing as found in input string
symbol_map = {sym.lower(): sym for sym in symbols}
ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
return ret
except re.error:
warnings.warn(
"Exception creating Regex for one_of, building MatchFirst", stacklevel=2
)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
" | ".join(symbols)
)
def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
"""Helper to easily and clearly define a dictionary by specifying
the respective patterns for the key and value. Takes care of
defining the :class:`Dict`, :class:`ZeroOrMore`, and
:class:`Group` tokens in the proper order. The key pattern
can include delimiting markers or punctuation, as long as they are
suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the :class:`Dict` results
can include named token fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
print(attr_expr[1, ...].parse_string(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
# similar to Dict, but simpler call format
result = dict_of(attr_label, attr_value).parse_string(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.as_dict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: 'light blue'
- posn: 'upper left'
- shape: 'SQUARE'
- texture: 'burlap'
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(OneOrMore(Group(key + value)))
def original_text_for(
expr: ParserElement, as_string: bool = True, *, asString: bool = True
) -> ParserElement:
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``as_string`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`original_text_for` contains expressions with defined
results names, you must set ``as_string`` to ``False`` if you
want to preserve those results name values.
The ``asString`` pre-PEP8 argument is retained for compatibility,
but will be removed in a future release.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b", "i"):
opener, closer = make_html_tags(tag)
patt = original_text_for(opener + SkipTo(closer) + closer)
print(patt.search_string(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
asString = asString and as_string
locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start : t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
matchExpr.set_parse_action(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
return matchExpr
def ungroup(expr: ParserElement) -> ParserElement:
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).add_parse_action(lambda t: t[0])
def locatedExpr(expr: ParserElement) -> ParserElement:
"""
(DEPRECATED - future code should use the Located class)
Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- ``locn_start`` - location where matched expression begins
- ``locn_end`` - location where matched expression ends
- ``value`` - the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parseWithTabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
return Group(
locator("locn_start")
+ expr("value")
+ locator.copy().leaveWhitespace()("locn_end")
)
def nested_expr(
opener: Union[str, ParserElement] = "(",
closer: Union[str, ParserElement] = ")",
content: typing.Optional[ParserElement] = None,
ignore_expr: ParserElement = quoted_string(),
*,
ignoreExpr: ParserElement = quoted_string(),
) -> ParserElement:
"""Helper method for defining nested lists enclosed in opening and
closing delimiters (``"("`` and ``")"`` are the default).
Parameters:
- ``opener`` - opening character for a nested list
(default= ``"("``); can also be a pyparsing expression
- ``closer`` - closing character for a nested list
(default= ``")"``); can also be a pyparsing expression
- ``content`` - expression for items within the nested lists
(default= ``None``)
- ``ignore_expr`` - expression for ignoring opening and closing delimiters
(default= :class:`quoted_string`)
- ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
but will be removed in a future release
If an expression is not provided for the content argument, the
nested expression will capture all whitespace-delimited content
between delimiters as a list of separate values.
Use the ``ignore_expr`` argument to define expressions that may
contain opening or closing characters that should not be treated as
opening or closing characters for nesting, such as quoted_string or
a comment expression. Specify multiple expressions using an
:class:`Or` or :class:`MatchFirst`. The default is
:class:`quoted_string`, but if no expressions are to be ignored, then
pass ``None`` for this argument.
Example::
data_type = one_of("void int short long char float double")
decl_data_type = Combine(data_type + Opt(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR, RPAR = map(Suppress, "()")
code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Opt(delimited_list(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(c_style_comment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.search_string(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if ignoreExpr != ignore_expr:
ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, str_type) and isinstance(closer, str_type):
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
exact=1,
)
)
).set_parse_action(lambda t: t[0].strip())
else:
content = empty.copy() + CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
).set_parse_action(lambda t: t[0].strip())
else:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).set_parse_action(lambda t: t[0].strip())
else:
content = Combine(
OneOrMore(
~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).set_parse_action(lambda t: t[0].strip())
else:
raise ValueError(
"opening and closing arguments must be strings if no content expression is given"
)
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
)
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.set_name("nested %s%s expression" % (opener, closer))
return ret
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, str_type):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Opt("/", default=[False])("empty").set_parse_action(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
else:
tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
printables, exclude_chars=">"
)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(
ZeroOrMore(
Group(
tagAttrName.set_parse_action(lambda t: t[0].lower())
+ Opt(Suppress("=") + tagAttrValue)
)
)
)
+ Opt("/", default=[False])("empty").set_parse_action(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
openTag.set_name("<%s>" % resname)
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
openTag.add_parse_action(
lambda t: t.__setitem__(
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
)
)
closeTag = closeTag(
"end" + "".join(resname.replace(":", " ").title().split())
).set_name("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag
def make_html_tags(
tag_str: Union[str, ParserElement]
) -> Tuple[ParserElement, ParserElement]:
"""Helper to construct opening and closing tag expressions for HTML,
given a tag name. Matches tags in either upper or lower case,
attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
# make_html_tags returns pyparsing expressions for the opening and
# closing tags as a 2-tuple
a, a_end = make_html_tags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.search_string(text):
# attributes in the <A> tag (like "href" shown here) are
# also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
"""
return _makeTags(tag_str, False)
def make_xml_tags(
tag_str: Union[str, ParserElement]
) -> Tuple[ParserElement, ParserElement]:
"""Helper to construct opening and closing tag expressions for XML,
given a tag name. Matches tags only in the given upper/lower case.
Example: similar to :class:`make_html_tags`
"""
return _makeTags(tag_str, True)
any_open_tag: ParserElement
any_close_tag: ParserElement
any_open_tag, any_close_tag = make_html_tags(
Word(alphas, alphanums + "_:").set_name("any tag")
)
_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
"common HTML entity"
)
def replace_html_entity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
class OpAssoc(Enum):
LEFT = 1
RIGHT = 2
InfixNotationOperatorArgType = Union[
ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
]
InfixNotationOperatorSpec = Union[
Tuple[
InfixNotationOperatorArgType,
int,
OpAssoc,
typing.Optional[ParseAction],
],
Tuple[
InfixNotationOperatorArgType,
int,
OpAssoc,
],
]
def infix_notation(
base_expr: ParserElement,
op_list: List[InfixNotationOperatorSpec],
lpar: Union[str, ParserElement] = Suppress("("),
rpar: Union[str, ParserElement] = Suppress(")"),
) -> ParserElement:
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary
or binary, left- or right-associative. Parse actions can also be
attached to operator expressions. The generated parser will also
recognize the use of parentheses to override operator precedences
(see example below).
Note: if you define a deep operator list, you may see performance
issues when using infix_notation. See
:class:`ParserElement.enable_packrat` for a mechanism to potentially
improve your parser performance.
Parameters:
- ``base_expr`` - expression representing the most basic operand to
be used in the expression
- ``op_list`` - list of tuples, one for each operator precedence level
in the expression grammar; each tuple is of the form ``(op_expr,
num_operands, right_left_assoc, (optional)parse_action)``, where:
- ``op_expr`` is the pyparsing expression for the operator; may also
be a string, which will be converted to a Literal; if ``num_operands``
is 3, ``op_expr`` is a tuple of two expressions, for the two
operators separating the 3 terms
- ``num_operands`` is the number of terms for this operator (must be 1,
2, or 3)
- ``right_left_assoc`` is the indicator whether the operator is right
or left associative, using the pyparsing-defined constants
``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
- ``parse_action`` is the parse action to be associated with
expressions matching this operator expression (the parse action
tuple member may be omitted); if the parse action is passed
a tuple or list of functions, this is equivalent to calling
``set_parse_action(*fn)``
(:class:`ParserElement.set_parse_action`)
- ``lpar`` - expression for matching left-parentheses; if passed as a
str, then will be parsed as Suppress(lpar). If lpar is passed as
an expression (such as ``Literal('(')``), then it will be kept in
the parsed results, and grouped with them. (default= ``Suppress('(')``)
- ``rpar`` - expression for matching right-parentheses; if passed as a
str, then will be parsed as Suppress(rpar). If rpar is passed as
an expression (such as ``Literal(')')``), then it will be kept in
the parsed results, and grouped with them. (default= ``Suppress(')')``)
Example::
# simple example of four-function arithmetic with ints and
# variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infix_notation(integer | varname,
[
('-', 1, OpAssoc.RIGHT),
(one_of('* /'), 2, OpAssoc.LEFT),
(one_of('+ -'), 2, OpAssoc.LEFT),
])
arith_expr.run_tests('''
5+3*6
(5+3)*6
-2--11
''', full_dump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
# captive version of FollowedBy that does not do parse actions or capture results names
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.try_parse(instring, loc)
return loc, []
_FB.__name__ = "FollowedBy>"
ret = Forward()
if isinstance(lpar, str):
lpar = Suppress(lpar)
if isinstance(rpar, str):
rpar = Suppress(rpar)
# if lpar and rpar are not suppressed, wrap in group
if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
lastExpr = base_expr | Group(lpar + ret + rpar)
else:
lastExpr = base_expr | (lpar + ret + rpar)
for i, operDef in enumerate(op_list):
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
if isinstance(opExpr, str_type):
opExpr = ParserElement._literalStringClass(opExpr)
if arity == 3:
if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions"
)
opExpr1, opExpr2 = opExpr
term_name = "{}{} term".format(opExpr1, opExpr2)
else:
term_name = "{} term".format(opExpr)
if not 1 <= arity <= 3:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
raise ValueError("operator must indicate right or left associativity")
thisExpr: Forward = Forward().set_name(term_name)
if rightLeftAssoc is OpAssoc.LEFT:
if arity == 1:
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
lastExpr + (opExpr + lastExpr)[1, ...]
)
else:
matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
elif arity == 3:
matchExpr = _FB(
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
elif rightLeftAssoc is OpAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Opt):
opExpr = Opt(opExpr)
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
lastExpr + (opExpr + thisExpr)[1, ...]
)
else:
matchExpr = _FB(lastExpr + thisExpr) + Group(
lastExpr + thisExpr[1, ...]
)
elif arity == 3:
matchExpr = _FB(
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.set_parse_action(*pa)
else:
matchExpr.set_parse_action(pa)
thisExpr <<= (matchExpr | lastExpr).setName(term_name)
lastExpr = thisExpr
ret <<= lastExpr
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
"""
(DEPRECATED - use IndentedBlock class instead)
Helper method for defining space-delimited indentation blocks,
such as those used to define block statements in Python source code.
Parameters:
- ``blockStatementExpr`` - expression defining syntax of statement that
is repeated within the indented block
- ``indentStack`` - list created by caller to manage indentation stack
(multiple ``statementWithIndentedBlock`` expressions within a single
grammar should share a common ``indentStack``)
- ``indent`` - boolean indicating whether block must be indented beyond
the current level; set to ``False`` for block of left-most statements
(default= ``True``)
A valid block must contain at least one ``blockStatement``.
(Note that indentedBlock uses internal parse actions which make it
incompatible with packrat parsing.)
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group(funcDecl + func_body)
rvalue = Forward()
funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << (funcDef | assignment | identifier)
module_body = stmt[1, ...]
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
backup_stacks.append(indentStack[:])
def reset_stack():
indentStack[:] = backup_stacks[-1]
def checkPeerIndent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if not (indentStack and curCol in indentStack):
raise ParseException(s, l, "not an unindent")
if curCol < indentStack[-1]:
indentStack.pop()
NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
if indent:
smExpr = Group(
Opt(NL)
+ INDENT
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ UNDENT
)
else:
smExpr = Group(
Opt(NL)
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ Opt(UNDENT)
)
# add a parse action to remove backup_stack from list of backups
smExpr.add_parse_action(
lambda: backup_stacks.pop(-1) and None if backup_stacks else None
)
smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.set_name("indented block")
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
"C style comment"
)
"Comment of the form ``/* ... */``"
html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
"Comment of the form ``<!-- ... -->``"
rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
"Comment of the form ``// ... (to end of line)``"
cpp_style_comment = Combine(
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
).set_name("C++ style comment")
"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
java_style_comment = cpp_style_comment
"Same as :class:`cpp_style_comment`"
python_style_comment = Regex(r"#.*").set_name("Python style comment")
"Comment of the form ``# ... (to end of line)``"
# build list of built-in expressions, for future reference if a global default value
# gets updated
_builtin_exprs: List[ParserElement] = [
v for v in vars().values() if isinstance(v, ParserElement)
]
# pre-PEP8 compatible names
delimitedList = delimited_list
countedArray = counted_array
matchPreviousLiteral = match_previous_literal
matchPreviousExpr = match_previous_expr
oneOf = one_of
dictOf = dict_of
originalTextFor = original_text_for
nestedExpr = nested_expr
makeHTMLTags = make_html_tags
makeXMLTags = make_xml_tags
anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
commonHTMLEntity = common_html_entity
replaceHTMLEntity = replace_html_entity
opAssoc = OpAssoc
infixNotation = infix_notation
cStyleComment = c_style_comment
htmlComment = html_comment
restOfLine = rest_of_line
dblSlashComment = dbl_slash_comment
cppStyleComment = cpp_style_comment
javaStyleComment = java_style_comment
pythonStyleComment = python_style_comment
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/helpers.py
|
Python
|
mit
| 39,129 |
# results.py
from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator
import pprint
from weakref import ref as wkref
from typing import Tuple, Any
str_type: Tuple[type, ...] = (str, bytes)
_generator_type = type((_ for _ in ()))
class _ParseResultsWithOffset:
__slots__ = ["tup"]
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __getstate__(self):
return self.tup
def __setstate__(self, *args):
self.tup = args[0]
class ParseResults:
"""Structured parse results, to provide multiple means of access to
the parsed data:
- as a list (``len(results)``)
- by list index (``results[0], results[1]``, etc.)
- by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
Example::
integer = Word(nums)
date_str = (integer.set_results_name("year") + '/'
+ integer.set_results_name("month") + '/'
+ integer.set_results_name("day"))
# equivalent form:
# date_str = (integer("year") + '/'
# + integer("month") + '/'
# + integer("day"))
# parse_string returns a ParseResults object
result = date_str.parse_string("1999/12/31")
def test(s, fn=repr):
print("{} -> {}".format(s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: '31'
- month: '12'
- year: '1999'
"""
_null_values: Tuple[Any, ...] = (None, [], "", ())
__slots__ = [
"_name",
"_parent",
"_all_names",
"_modal",
"_toklist",
"_tokdict",
"__weakref__",
]
class List(list):
"""
Simple wrapper class to distinguish parsed list results that should be preserved
as actual Python lists, instead of being converted to :class:`ParseResults`:
LBRACK, RBRACK = map(pp.Suppress, "[]")
element = pp.Forward()
item = ppc.integer
element_list = LBRACK + pp.delimited_list(element) + RBRACK
# add parse actions to convert from ParseResults to actual Python collection types
def as_python_list(t):
return pp.ParseResults.List(t.as_list())
element_list.add_parse_action(as_python_list)
element <<= item | element_list
element.run_tests('''
100
[2,3,4]
[[2, 1],3,4]
[(2, 1),3,4]
(2,3,4)
''', post_parse=lambda s, r: (r[0], type(r[0])))
prints:
100
(100, <class 'int'>)
[2,3,4]
([2, 3, 4], <class 'list'>)
[[2, 1],3,4]
([[2, 1], 3, 4], <class 'list'>)
(Used internally by :class:`Group` when `aslist=True`.)
"""
def __new__(cls, contained=None):
if contained is None:
contained = []
if not isinstance(contained, list):
raise TypeError(
"{} may only be constructed with a list,"
" not {}".format(cls.__name__, type(contained).__name__)
)
return list.__new__(cls)
def __new__(cls, toklist=None, name=None, **kwargs):
if isinstance(toklist, ParseResults):
return toklist
self = object.__new__(cls)
self._name = None
self._parent = None
self._all_names = set()
if toklist is None:
self._toklist = []
elif isinstance(toklist, (list, _generator_type)):
self._toklist = (
[toklist[:]]
if isinstance(toklist, ParseResults.List)
else list(toklist)
)
else:
self._toklist = [toklist]
self._tokdict = dict()
return self
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
):
self._modal = modal
if name is not None and name != "":
if isinstance(name, int):
name = str(name)
if not modal:
self._all_names = {name}
self._name = name
if toklist not in self._null_values:
if isinstance(toklist, (str_type, type)):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(
ParseResults(toklist._toklist), 0
)
else:
self[name] = _ParseResultsWithOffset(
ParseResults(toklist[0]), 0
)
self[name]._name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
if toklist is not self:
self[name] = toklist
else:
self._name = name
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self._toklist[i]
else:
if i not in self._all_names:
return self._tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self._tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self._tokdict[k] = self._tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self._toklist[k] = v
sub = v
else:
self._tokdict[k] = self._tokdict.get(k, list()) + [
_ParseResultsWithOffset(v, 0)
]
sub = v
if isinstance(sub, ParseResults):
sub._parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self._toklist)
del self._toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name, occurrences in self._tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position - (position > j)
)
else:
del self._tokdict[i]
def __contains__(self, k) -> bool:
return k in self._tokdict
def __len__(self) -> int:
return len(self._toklist)
def __bool__(self) -> bool:
return not not (self._toklist or self._tokdict)
def __iter__(self) -> Iterator:
return iter(self._toklist)
def __reversed__(self) -> Iterator:
return iter(self._toklist[::-1])
def keys(self):
return iter(self._tokdict)
def values(self):
return (self[k] for k in self.keys())
def items(self):
return ((k, self[k]) for k in self.keys())
def haskeys(self) -> bool:
"""
Since ``keys()`` returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self._tokdict)
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
def remove_first(tokens):
tokens.pop(0)
numlist.add_parse_action(remove_first)
print(numlist.parse_string("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + Word(nums)[1, ...]
print(patt.parse_string("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.add_parse_action(remove_LABEL)
print(patt.parse_string("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: 'AAB'
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == "default":
args = (args[0], v)
else:
raise TypeError(
"pop() got an unexpected keyword argument {!r}".format(k)
)
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, default_value=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given ``default_value`` or ``None`` if no
``default_value`` is specified.
Similar to ``dict.get()``.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return default_value
def insert(self, index, ins_string):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to ``list.insert()``.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
numlist.add_parse_action(insert_locn)
print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
"""
self._toklist.insert(index, ins_string)
# fixup indices in token dictionary
for name, occurrences in self._tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position + (position > index)
)
def append(self, item):
"""
Add single element to end of ``ParseResults`` list of elements.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
numlist.add_parse_action(append_sum)
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
"""
self._toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ``ParseResults`` list of elements.
Example::
patt = Word(alphas)[1, ...]
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
patt.add_parse_action(make_palindrome)
print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self.__iadd__(itemseq)
else:
self._toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self._toklist[:]
self._tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
if name.startswith("__"):
raise AttributeError(name)
return ""
def __add__(self, other) -> "ParseResults":
ret = self.copy()
ret += other
return ret
def __iadd__(self, other) -> "ParseResults":
if other._tokdict:
offset = len(self._toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other._tokdict.items()
otherdictitems = [
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for k, vlist in otheritems
for v in vlist
]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0]._parent = wkref(self)
self._toklist += other._toklist
self._all_names |= other._all_names
return self
def __radd__(self, other) -> "ParseResults":
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self) -> str:
return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict())
def __str__(self) -> str:
return (
"["
+ ", ".join(
[
str(i) if isinstance(i, ParseResults) else repr(i)
for i in self._toklist
]
)
+ "]"
)
def _asStringList(self, sep=""):
out = []
for item in self._toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(str(item))
return out
def as_list(self) -> list:
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = Word(alphas)[1, ...]
result = patt.parse_string("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use as_list() to create an actual list
result_list = result.as_list()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [
res.as_list() if isinstance(res, ParseResults) else res
for res in self._toklist
]
def as_dict(self) -> dict:
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.as_dict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
def to_item(obj):
if isinstance(obj, ParseResults):
return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
else:
return obj
return dict((k, to_item(v)) for k, v in self.items())
def copy(self) -> "ParseResults":
"""
Returns a new copy of a :class:`ParseResults` object.
"""
ret = ParseResults(self._toklist)
ret._tokdict = self._tokdict.copy()
ret._parent = self._parent
ret._all_names |= self._all_names
ret._name = self._name
return ret
def get_name(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = user_data[1, ...]
result = user_info.parse_string("22 111-22-3333 #221B")
for item in result:
print(item.get_name(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self._name:
return self._name
elif self._parent:
par = self._parent()
def find_in_parent(sub):
return next(
(
k
for k, vlist in par._tokdict.items()
for v, loc in vlist
if sub is v
),
None,
)
return find_in_parent(self) if par else None
elif (
len(self) == 1
and len(self._tokdict) == 1
and next(iter(self._tokdict.values()))[0][1] in (0, -1)
):
return next(iter(self._tokdict.keys()))
else:
return None
def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
"""
Diagnostic method for listing out the contents of
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
that this string can be embedded in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string('1999/12/31')
print(result.dump())
prints::
['1999', '/', '12', '/', '31']
- day: '31'
- month: '12'
- year: '1999'
"""
out = []
NL = "\n"
out.append(indent + str(self.as_list()) if include_list else "")
if full:
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append("{}{}- {}: ".format(indent, (" " * _depth), k))
if isinstance(v, ParseResults):
if v:
out.append(
v.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
)
)
else:
out.append(str(v))
else:
out.append(repr(v))
if any(isinstance(vv, ParseResults) for vv in self):
v = self
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
out.append(
"\n{}{}[{}]:\n{}{}{}".format(
indent,
(" " * (_depth)),
i,
indent,
(" " * (_depth + 1)),
vv.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
),
)
)
else:
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (_depth)),
i,
indent,
(" " * (_depth + 1)),
str(vv),
)
)
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
Accepts additional positional or keyword args as defined for
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimited_list(term)))
result = func.parse_string("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.as_list(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (
self._toklist,
(
self._tokdict.copy(),
self._parent is not None and self._parent() or None,
self._all_names,
self._name,
),
)
def __setstate__(self, state):
self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
self._all_names = set(inAccumNames)
if par is not None:
self._parent = wkref(par)
else:
self._parent = None
def __getnewargs__(self):
return self._toklist, self._name
def __dir__(self):
return dir(type(self)) + list(self.keys())
@classmethod
def from_dict(cls, other, name=None) -> "ParseResults":
"""
Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
name-value relations as results names. If an optional ``name`` argument is
given, a nested ``ParseResults`` will be returned.
"""
def is_iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
return not isinstance(obj, str_type)
ret = cls([])
for k, v in other.items():
if isinstance(v, Mapping):
ret += cls.from_dict(v, name=k)
else:
ret += cls([v], name=k, asList=is_iterable(v))
if name is not None:
ret = cls([ret], name=name)
return ret
asList = as_list
asDict = as_dict
getName = get_name
MutableMapping.register(ParseResults)
MutableSequence.register(ParseResults)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/results.py
|
Python
|
mit
| 25,341 |
# testing.py
from contextlib import contextmanager
import typing
from .core import (
ParserElement,
ParseException,
Keyword,
__diag__,
__compat__,
)
class pyparsing_test:
"""
namespace class for classes useful in writing unit tests
"""
class reset_pyparsing_context:
"""
Context manager to be used when writing unit tests that modify pyparsing config values:
- packrat parsing
- bounded recursion parsing
- default whitespace characters.
- default keyword characters
- literal string auto-conversion class
- __diag__ settings
Example::
with reset_pyparsing_context():
# test that literals used to construct a grammar are automatically suppressed
ParserElement.inlineLiteralsUsing(Suppress)
term = Word(alphas) | Word(nums)
group = Group('(' + term[...] + ')')
# assert that the '()' characters are not included in the parsed tokens
self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
# after exiting context manager, literals are converted to Literal expressions again
"""
def __init__(self):
self._save_context = {}
def save(self):
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
self._save_context[
"literal_string_class"
] = ParserElement._literalStringClass
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
if ParserElement._packratEnabled:
self._save_context[
"packrat_cache_size"
] = ParserElement.packrat_cache.size
else:
self._save_context["packrat_cache_size"] = None
self._save_context["packrat_parse"] = ParserElement._parse
self._save_context[
"recursion_enabled"
] = ParserElement._left_recursion_enabled
self._save_context["__diag__"] = {
name: getattr(__diag__, name) for name in __diag__._all_names
}
self._save_context["__compat__"] = {
"collect_all_And_tokens": __compat__.collect_all_And_tokens
}
return self
def restore(self):
# reset pyparsing global state
if (
ParserElement.DEFAULT_WHITE_CHARS
!= self._save_context["default_whitespace"]
):
ParserElement.set_default_whitespace_chars(
self._save_context["default_whitespace"]
)
ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
ParserElement.inlineLiteralsUsing(
self._save_context["literal_string_class"]
)
for name, value in self._save_context["__diag__"].items():
(__diag__.enable if value else __diag__.disable)(name)
ParserElement._packratEnabled = False
if self._save_context["packrat_enabled"]:
ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
else:
ParserElement._parse = self._save_context["packrat_parse"]
ParserElement._left_recursion_enabled = self._save_context[
"recursion_enabled"
]
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
return self
def copy(self):
ret = type(self)()
ret._save_context.update(self._save_context)
return ret
def __enter__(self):
return self.save()
def __exit__(self, *args):
self.restore()
class TestParseResultsAsserts:
"""
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
"""
def assertParseResultsEquals(
self, result, expected_list=None, expected_dict=None, msg=None
):
"""
Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
and compare any defined results names with an optional ``expected_dict``.
"""
if expected_list is not None:
self.assertEqual(expected_list, result.as_list(), msg=msg)
if expected_dict is not None:
self.assertEqual(expected_dict, result.as_dict(), msg=msg)
def assertParseAndCheckList(
self, expr, test_string, expected_list, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
"""
result = expr.parse_string(test_string, parse_all=True)
if verbose:
print(result.dump())
else:
print(result.as_list())
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
def assertParseAndCheckDict(
self, expr, test_string, expected_dict, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
"""
result = expr.parse_string(test_string, parseAll=True)
if verbose:
print(result.dump())
else:
print(result.as_list())
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
def assertRunTestResults(
self, run_tests_report, expected_parse_results=None, msg=None
):
"""
Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
Finally, asserts that the overall ``runTests()`` success value is ``True``.
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
"""
run_test_success, run_test_results = run_tests_report
if expected_parse_results is not None:
merged = [
(*rpt, expected)
for rpt, expected in zip(run_test_results, expected_parse_results)
]
for test_string, result, expected in merged:
# expected should be a tuple containing a list and/or a dict or an exception,
# and optional failure message string
# an empty tuple will skip any result validation
fail_msg = next(
(exp for exp in expected if isinstance(exp, str)), None
)
expected_exception = next(
(
exp
for exp in expected
if isinstance(exp, type) and issubclass(exp, Exception)
),
None,
)
if expected_exception is not None:
with self.assertRaises(
expected_exception=expected_exception, msg=fail_msg or msg
):
if isinstance(result, Exception):
raise result
else:
expected_list = next(
(exp for exp in expected if isinstance(exp, list)), None
)
expected_dict = next(
(exp for exp in expected if isinstance(exp, dict)), None
)
if (expected_list, expected_dict) != (None, None):
self.assertParseResultsEquals(
result,
expected_list=expected_list,
expected_dict=expected_dict,
msg=fail_msg or msg,
)
else:
# warning here maybe?
print("no validation for {!r}".format(test_string))
# do this last, in case some specific test results can be reported instead
self.assertTrue(
run_test_success, msg=msg if msg is not None else "failed runTests"
)
@contextmanager
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
with self.assertRaises(exc_type, msg=msg):
yield
@staticmethod
def with_line_numbers(
s: str,
start_line: typing.Optional[int] = None,
end_line: typing.Optional[int] = None,
expand_tabs: bool = True,
eol_mark: str = "|",
mark_spaces: typing.Optional[str] = None,
mark_control: typing.Optional[str] = None,
) -> str:
"""
Helpful method for debugging a parser - prints a string with line and column numbers.
(Line and column numbers are 1-based.)
:param s: tuple(bool, str - string to be printed with line and column numbers
:param start_line: int - (optional) starting line number in s to print (default=1)
:param end_line: int - (optional) ending line number in s to print (default=len(s))
:param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
:param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
:param mark_spaces: str - (optional) special character to display in place of spaces
:param mark_control: str - (optional) convert non-printing control characters to a placeholding
character; valid values:
- "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
- any single character string - replace control characters with given string
- None (default) - string is displayed as-is
:return: str - input string with leading line numbers and column number headers
"""
if expand_tabs:
s = s.expandtabs()
if mark_control is not None:
if mark_control == "unicode":
tbl = str.maketrans(
{c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
| {127: 0x2421}
)
eol_mark = ""
else:
tbl = str.maketrans(
{c: mark_control for c in list(range(0, 32)) + [127]}
)
s = s.translate(tbl)
if mark_spaces is not None and mark_spaces != " ":
if mark_spaces == "unicode":
tbl = str.maketrans({9: 0x2409, 32: 0x2423})
s = s.translate(tbl)
else:
s = s.replace(" ", mark_spaces)
if start_line is None:
start_line = 1
if end_line is None:
end_line = len(s)
end_line = min(end_line, len(s))
start_line = min(max(1, start_line), end_line)
if mark_control != "unicode":
s_lines = s.splitlines()[start_line - 1 : end_line]
else:
s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
if not s_lines:
return ""
lineno_width = len(str(end_line))
max_line_len = max(len(line) for line in s_lines)
lead = " " * (lineno_width + 1)
if max_line_len >= 99:
header0 = (
lead
+ "".join(
"{}{}".format(" " * 99, (i + 1) % 100)
for i in range(max(max_line_len // 100, 1))
)
+ "\n"
)
else:
header0 = ""
header1 = (
header0
+ lead
+ "".join(
" {}".format((i + 1) % 10)
for i in range(-(-max_line_len // 10))
)
+ "\n"
)
header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
return (
header1
+ header2
+ "\n".join(
"{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
for i, line in enumerate(s_lines, start=start_line)
)
+ "\n"
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/testing.py
|
Python
|
mit
| 13,402 |
# unicode.py
import sys
from itertools import filterfalse
from typing import List, Tuple, Union
class _lazyclassproperty:
def __init__(self, fn):
self.fn = fn
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
def __get__(self, obj, cls):
if cls is None:
cls = type(obj)
if not hasattr(cls, "_intern") or any(
cls._intern is getattr(superclass, "_intern", [])
for superclass in cls.__mro__[1:]
):
cls._intern = {}
attrname = self.fn.__name__
if attrname not in cls._intern:
cls._intern[attrname] = self.fn(cls)
return cls._intern[attrname]
UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
class unicode_set:
"""
A set of Unicode characters, for language-specific strings for
``alphas``, ``nums``, ``alphanums``, and ``printables``.
A unicode_set is defined by a list of ranges in the Unicode character
set, in a class attribute ``_ranges``. Ranges can be specified using
2-tuples or a 1-tuple, such as::
_ranges = [
(0x0020, 0x007e),
(0x00a0, 0x00ff),
(0x0100,),
]
Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
A unicode set can also be defined using multiple inheritance of other unicode sets::
class CJK(Chinese, Japanese, Korean):
pass
"""
_ranges: UnicodeRangeList = []
@_lazyclassproperty
def _chars_for_ranges(cls):
ret = []
for cc in cls.__mro__:
if cc is unicode_set:
break
for rr in getattr(cc, "_ranges", ()):
ret.extend(range(rr[0], rr[-1] + 1))
return [chr(c) for c in sorted(set(ret))]
@_lazyclassproperty
def printables(cls):
"all non-whitespace characters in this range"
return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
@_lazyclassproperty
def alphas(cls):
"all alphabetic characters in this range"
return "".join(filter(str.isalpha, cls._chars_for_ranges))
@_lazyclassproperty
def nums(cls):
"all numeric digit characters in this range"
return "".join(filter(str.isdigit, cls._chars_for_ranges))
@_lazyclassproperty
def alphanums(cls):
"all alphanumeric characters in this range"
return cls.alphas + cls.nums
@_lazyclassproperty
def identchars(cls):
"all characters in this range that are valid identifier characters, plus underscore '_'"
return "".join(
sorted(
set(
"".join(filter(str.isidentifier, cls._chars_for_ranges))
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
+ "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
+ "_"
)
)
)
@_lazyclassproperty
def identbodychars(cls):
"""
all characters in this range that are valid identifier body characters,
plus the digits 0-9
"""
return "".join(
sorted(
set(
cls.identchars
+ "0123456789"
+ "".join(
[c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
)
)
)
)
class pyparsing_unicode(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
# fmt: off
# define ranges in language character sets
_ranges: UnicodeRangeList = [
(0x0020, sys.maxunicode),
]
class BasicMultilingualPlane(unicode_set):
"Unicode set for the Basic Multilingual Plane"
_ranges: UnicodeRangeList = [
(0x0020, 0xFFFF),
]
class Latin1(unicode_set):
"Unicode set for Latin-1 Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0020, 0x007E),
(0x00A0, 0x00FF),
]
class LatinA(unicode_set):
"Unicode set for Latin-A Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0100, 0x017F),
]
class LatinB(unicode_set):
"Unicode set for Latin-B Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0180, 0x024F),
]
class Greek(unicode_set):
"Unicode set for Greek Unicode Character Ranges"
_ranges: UnicodeRangeList = [
(0x0342, 0x0345),
(0x0370, 0x0377),
(0x037A, 0x037F),
(0x0384, 0x038A),
(0x038C,),
(0x038E, 0x03A1),
(0x03A3, 0x03E1),
(0x03F0, 0x03FF),
(0x1D26, 0x1D2A),
(0x1D5E,),
(0x1D60,),
(0x1D66, 0x1D6A),
(0x1F00, 0x1F15),
(0x1F18, 0x1F1D),
(0x1F20, 0x1F45),
(0x1F48, 0x1F4D),
(0x1F50, 0x1F57),
(0x1F59,),
(0x1F5B,),
(0x1F5D,),
(0x1F5F, 0x1F7D),
(0x1F80, 0x1FB4),
(0x1FB6, 0x1FC4),
(0x1FC6, 0x1FD3),
(0x1FD6, 0x1FDB),
(0x1FDD, 0x1FEF),
(0x1FF2, 0x1FF4),
(0x1FF6, 0x1FFE),
(0x2129,),
(0x2719, 0x271A),
(0xAB65,),
(0x10140, 0x1018D),
(0x101A0,),
(0x1D200, 0x1D245),
(0x1F7A1, 0x1F7A7),
]
class Cyrillic(unicode_set):
"Unicode set for Cyrillic Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0400, 0x052F),
(0x1C80, 0x1C88),
(0x1D2B,),
(0x1D78,),
(0x2DE0, 0x2DFF),
(0xA640, 0xA672),
(0xA674, 0xA69F),
(0xFE2E, 0xFE2F),
]
class Chinese(unicode_set):
"Unicode set for Chinese Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x2E80, 0x2E99),
(0x2E9B, 0x2EF3),
(0x31C0, 0x31E3),
(0x3400, 0x4DB5),
(0x4E00, 0x9FEF),
(0xA700, 0xA707),
(0xF900, 0xFA6D),
(0xFA70, 0xFAD9),
(0x16FE2, 0x16FE3),
(0x1F210, 0x1F212),
(0x1F214, 0x1F23B),
(0x1F240, 0x1F248),
(0x20000, 0x2A6D6),
(0x2A700, 0x2B734),
(0x2B740, 0x2B81D),
(0x2B820, 0x2CEA1),
(0x2CEB0, 0x2EBE0),
(0x2F800, 0x2FA1D),
]
class Japanese(unicode_set):
"Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
_ranges: UnicodeRangeList = []
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x4E00, 0x9FBF),
(0x3000, 0x303F),
]
class Hiragana(unicode_set):
"Unicode set for Hiragana Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x3041, 0x3096),
(0x3099, 0x30A0),
(0x30FC,),
(0xFF70,),
(0x1B001,),
(0x1B150, 0x1B152),
(0x1F200,),
]
class Katakana(unicode_set):
"Unicode set for Katakana Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x3099, 0x309C),
(0x30A0, 0x30FF),
(0x31F0, 0x31FF),
(0x32D0, 0x32FE),
(0xFF65, 0xFF9F),
(0x1B000,),
(0x1B164, 0x1B167),
(0x1F201, 0x1F202),
(0x1F213,),
]
class Hangul(unicode_set):
"Unicode set for Hangul (Korean) Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x1100, 0x11FF),
(0x302E, 0x302F),
(0x3131, 0x318E),
(0x3200, 0x321C),
(0x3260, 0x327B),
(0x327E,),
(0xA960, 0xA97C),
(0xAC00, 0xD7A3),
(0xD7B0, 0xD7C6),
(0xD7CB, 0xD7FB),
(0xFFA0, 0xFFBE),
(0xFFC2, 0xFFC7),
(0xFFCA, 0xFFCF),
(0xFFD2, 0xFFD7),
(0xFFDA, 0xFFDC),
]
Korean = Hangul
class CJK(Chinese, Japanese, Hangul):
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
class Thai(unicode_set):
"Unicode set for Thai Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0E01, 0x0E3A),
(0x0E3F, 0x0E5B)
]
class Arabic(unicode_set):
"Unicode set for Arabic Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0600, 0x061B),
(0x061E, 0x06FF),
(0x0700, 0x077F),
]
class Hebrew(unicode_set):
"Unicode set for Hebrew Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0591, 0x05C7),
(0x05D0, 0x05EA),
(0x05EF, 0x05F4),
(0xFB1D, 0xFB36),
(0xFB38, 0xFB3C),
(0xFB3E,),
(0xFB40, 0xFB41),
(0xFB43, 0xFB44),
(0xFB46, 0xFB4F),
]
class Devanagari(unicode_set):
"Unicode set for Devanagari Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0900, 0x097F),
(0xA8E0, 0xA8FF)
]
# fmt: on
pyparsing_unicode.Japanese._ranges = (
pyparsing_unicode.Japanese.Kanji._ranges
+ pyparsing_unicode.Japanese.Hiragana._ranges
+ pyparsing_unicode.Japanese.Katakana._ranges
)
pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
# add language identifiers using language Unicode
pyparsing_unicode.العربية = pyparsing_unicode.Arabic
pyparsing_unicode.中文 = pyparsing_unicode.Chinese
pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
pyparsing_unicode.한국어 = pyparsing_unicode.Korean
pyparsing_unicode.ไทย = pyparsing_unicode.Thai
pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/unicode.py
|
Python
|
mit
| 10,787 |
# util.py
import warnings
import types
import collections
import itertools
from functools import lru_cache
from typing import List, Union, Iterable
_bslash = chr(92)
class __config_flags:
"""Internal class for defining compatibility and debugging flags"""
_all_names: List[str] = []
_fixed_names: List[str] = []
_type_desc = "configuration"
@classmethod
def _set(cls, dname, value):
if dname in cls._fixed_names:
warnings.warn(
"{}.{} {} is {} and cannot be overridden".format(
cls.__name__,
dname,
cls._type_desc,
str(getattr(cls, dname)).upper(),
)
)
return
if dname in cls._all_names:
setattr(cls, dname, value)
else:
raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
enable = classmethod(lambda cls, name: cls._set(name, True))
disable = classmethod(lambda cls, name: cls._set(name, False))
@lru_cache(maxsize=128)
def col(loc: int, strg: str) -> int:
"""
Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
@lru_cache(maxsize=128)
def lineno(loc: int, strg: str) -> int:
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
@lru_cache(maxsize=128)
def line(loc: int, strg: str) -> str:
"""
Returns the line of text containing loc within a string, counting newlines as line separators.
"""
last_cr = strg.rfind("\n", 0, loc)
next_cr = strg.find("\n", loc)
return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
class _UnboundedCache:
def __init__(self):
cache = {}
cache_get = cache.get
self.not_in_cache = not_in_cache = object()
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
def clear(_):
cache.clear()
self.size = None
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class _FifoCache:
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = collections.OrderedDict()
cache_get = cache.get
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
while len(cache) > size:
cache.popitem(last=False)
def clear(_):
cache.clear()
self.size = size
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class LRUMemo:
"""
A memoizing mapping that retains `capacity` deleted items
The memo tracks retained items by their access order; once `capacity` items
are retained, the least recently used item is discarded.
"""
def __init__(self, capacity):
self._capacity = capacity
self._active = {}
self._memory = collections.OrderedDict()
def __getitem__(self, key):
try:
return self._active[key]
except KeyError:
self._memory.move_to_end(key)
return self._memory[key]
def __setitem__(self, key, value):
self._memory.pop(key, None)
self._active[key] = value
def __delitem__(self, key):
try:
value = self._active.pop(key)
except KeyError:
pass
else:
while len(self._memory) >= self._capacity:
self._memory.popitem(last=False)
self._memory[key] = value
def clear(self):
self._active.clear()
self._memory.clear()
class UnboundedMemo(dict):
"""
A memoizing mapping that retains all deleted items
"""
def __delitem__(self, key):
pass
def _escape_regex_range_chars(s: str) -> str:
# escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return str(s)
def _collapse_string_to_ranges(
s: Union[str, Iterable[str]], re_escape: bool = True
) -> str:
def is_consecutive(c):
c_int = ord(c)
is_consecutive.prev, prev = c_int, is_consecutive.prev
if c_int - prev > 1:
is_consecutive.value = next(is_consecutive.counter)
return is_consecutive.value
is_consecutive.prev = 0
is_consecutive.counter = itertools.count()
is_consecutive.value = -1
def escape_re_range_char(c):
return "\\" + c if c in r"\^-][" else c
def no_escape_re_range_char(c):
return c
if not re_escape:
escape_re_range_char = no_escape_re_range_char
ret = []
s = "".join(sorted(set(s)))
if len(s) > 3:
for _, chars in itertools.groupby(s, key=is_consecutive):
first = last = next(chars)
last = collections.deque(
itertools.chain(iter([last]), chars), maxlen=1
).pop()
if first == last:
ret.append(escape_re_range_char(first))
else:
sep = "" if ord(last) == ord(first) + 1 else "-"
ret.append(
"{}{}{}".format(
escape_re_range_char(first), sep, escape_re_range_char(last)
)
)
else:
ret = [escape_re_range_char(c) for c in s]
return "".join(ret)
def _flatten(ll: list) -> list:
ret = []
for i in ll:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/pyparsing/util.py
|
Python
|
mit
| 6,805 |
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP Library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings.
Basic GET usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> b'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('https://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key1": "value1",
"key2": "value2"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <https://requests.readthedocs.io>.
:copyright: (c) 2017 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
import warnings
from pip._vendor import urllib3
from .exceptions import RequestsDependencyWarning
charset_normalizer_version = None
try:
from pip._vendor.chardet import __version__ as chardet_version
except ImportError:
chardet_version = None
def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
urllib3_version = urllib3_version.split(".")
assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git.
# Sometimes, urllib3 only reports its version as 16.1.
if len(urllib3_version) == 2:
urllib3_version.append("0")
# Check urllib3 for compatibility.
major, minor, patch = urllib3_version # noqa: F811
major, minor, patch = int(major), int(minor), int(patch)
# urllib3 >= 1.21.1, <= 1.26
assert major == 1
assert minor >= 21
assert minor <= 26
# Check charset_normalizer for compatibility.
if chardet_version:
major, minor, patch = chardet_version.split(".")[:3]
major, minor, patch = int(major), int(minor), int(patch)
# chardet_version >= 3.0.2, < 6.0.0
assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0)
elif charset_normalizer_version:
major, minor, patch = charset_normalizer_version.split(".")[:3]
major, minor, patch = int(major), int(minor), int(patch)
# charset_normalizer >= 2.0.0 < 3.0.0
assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0)
else:
raise Exception("You need either charset_normalizer or chardet installed")
def _check_cryptography(cryptography_version):
# cryptography < 1.3.4
try:
cryptography_version = list(map(int, cryptography_version.split(".")))
except ValueError:
return
if cryptography_version < [1, 3, 4]:
warning = "Old version of cryptography ({}) may cause slowdown.".format(
cryptography_version
)
warnings.warn(warning, RequestsDependencyWarning)
# Check imported dependencies for compatibility.
try:
check_compatibility(
urllib3.__version__, chardet_version, charset_normalizer_version
)
except (AssertionError, ValueError):
warnings.warn(
"urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
"version!".format(
urllib3.__version__, chardet_version, charset_normalizer_version
),
RequestsDependencyWarning,
)
# Attempt to enable urllib3's fallback for SNI support
# if the standard library doesn't support SNI or the
# 'ssl' library isn't available.
try:
# Note: This logic prevents upgrading cryptography on Windows, if imported
# as part of pip.
from pip._internal.utils.compat import WINDOWS
if not WINDOWS:
raise ImportError("pip internals: don't import cryptography on Windows")
try:
import ssl
except ImportError:
ssl = None
if not getattr(ssl, "HAS_SNI", False):
from pip._vendor.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
# Check cryptography version
from cryptography import __version__ as cryptography_version
_check_cryptography(cryptography_version)
except ImportError:
pass
# urllib3's DependencyWarnings should be silenced.
from pip._vendor.urllib3.exceptions import DependencyWarning
warnings.simplefilter("ignore", DependencyWarning)
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
from . import packages, utils
from .__version__ import (
__author__,
__author_email__,
__build__,
__cake__,
__copyright__,
__description__,
__license__,
__title__,
__url__,
__version__,
)
from .api import delete, get, head, options, patch, post, put, request
from .exceptions import (
ConnectionError,
ConnectTimeout,
FileModeWarning,
HTTPError,
JSONDecodeError,
ReadTimeout,
RequestException,
Timeout,
TooManyRedirects,
URLRequired,
)
from .models import PreparedRequest, Request, Response
from .sessions import Session, session
from .status_codes import codes
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter("default", FileModeWarning, append=True)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/__init__.py
|
Python
|
mit
| 5,178 |
# .-. .-. .-. . . .-. .-. .-. .-.
# |( |- |.| | | |- `-. | `-.
# ' ' `-' `-`.`-' `-' `-' ' `-'
__title__ = "requests"
__description__ = "Python HTTP for Humans."
__url__ = "https://requests.readthedocs.io"
__version__ = "2.28.1"
__build__ = 0x022801
__author__ = "Kenneth Reitz"
__author_email__ = "me@kennethreitz.org"
__license__ = "Apache 2.0"
__copyright__ = "Copyright 2022 Kenneth Reitz"
__cake__ = "\u2728 \U0001f370 \u2728"
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/__version__.py
|
Python
|
mit
| 440 |
"""
requests._internal_utils
~~~~~~~~~~~~~~
Provides utility functions that are consumed internally by Requests
which depend on extremely few external helpers (such as compat)
"""
import re
from .compat import builtin_str
_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$")
_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$")
_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$")
_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$")
HEADER_VALIDATORS = {
bytes: (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE),
str: (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR),
}
def to_native_string(string, encoding="ascii"):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
out = string.decode(encoding)
return out
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode("ascii")
return True
except UnicodeEncodeError:
return False
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/_internal_utils.py
|
Python
|
mit
| 1,397 |
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket # noqa: F401
from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError
from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader
from pip._vendor.urllib3.exceptions import (
LocationValueError,
MaxRetryError,
NewConnectionError,
ProtocolError,
)
from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError
from pip._vendor.urllib3.exceptions import SSLError as _SSLError
from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
from pip._vendor.urllib3.response import HTTPResponse
from pip._vendor.urllib3.util import Timeout as TimeoutSauce
from pip._vendor.urllib3.util import parse_url
from pip._vendor.urllib3.util.retry import Retry
from .auth import _basic_auth_str
from .compat import basestring, urlparse
from .cookies import extract_cookies_to_jar
from .exceptions import (
ConnectionError,
ConnectTimeout,
InvalidHeader,
InvalidProxyURL,
InvalidSchema,
InvalidURL,
ProxyError,
ReadTimeout,
RetryError,
SSLError,
)
from .models import Response
from .structures import CaseInsensitiveDict
from .utils import (
DEFAULT_CA_BUNDLE_PATH,
extract_zipped_paths,
get_auth_from_url,
get_encoding_from_headers,
prepend_scheme_if_needed,
select_proxy,
urldefragauth,
)
try:
from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter:
"""The Base Transport Adapter"""
def __init__(self):
super().__init__()
def send(
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = [
"max_retries",
"config",
"_pool_connections",
"_pool_maxsize",
"_pool_block",
]
def __init__(
self,
pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE,
max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK,
):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super().__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(
self._pool_connections, self._pool_maxsize, block=self._pool_block
)
def init_poolmanager(
self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs
):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
strict=True,
**pool_kwargs,
)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith("socks"):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs,
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs,
)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith("https") and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise OSError(
f"Could not find a suitable TLS CA certificate bundle, "
f"invalid path: {cert_loc}"
)
conn.cert_reqs = "CERT_REQUIRED"
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = "CERT_NONE"
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise OSError(
f"Could not find the TLS certificate file, "
f"invalid path: {conn.cert_file}"
)
if conn.key_file and not os.path.exists(conn.key_file):
raise OSError(
f"Could not find the TLS key file, invalid path: {conn.key_file}"
)
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, "status", None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, "headers", {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode("utf-8")
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, "http")
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL(
"Please check proxy URL. It is malformed "
"and could be missing the host."
)
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = proxy and scheme != "https"
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith("socks")
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers["Proxy-Authorization"] = _basic_auth_str(username, password)
return headers
def send(
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
try:
conn = self.get_connection(request.url, proxies)
except LocationValueError as e:
raise InvalidURL(e, request=request)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(
request,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
)
chunked = not (request.body is None or "Content-Length" in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError:
raise ValueError(
f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, "
f"or a single float to set both timeouts to the same value."
)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout,
)
# Send the request.
else:
if hasattr(conn, "proxy_pool"):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
skip_host = "Host" in request.headers
low_conn.putrequest(
request.method,
url,
skip_accept_encoding=True,
skip_host=skip_host,
)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode("utf-8"))
low_conn.send(b"\r\n")
low_conn.send(i)
low_conn.send(b"\r\n")
low_conn.send(b"0\r\n\r\n")
# Receive the response from the server
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False,
)
except Exception:
# If we hit any problems here, clean up the connection.
# Then, raise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, OSError) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
elif isinstance(e, _InvalidHeader):
raise InvalidHeader(e, request=request)
else:
raise
return self.build_response(request, resp)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/adapters.py
|
Python
|
mit
| 21,443 |
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
>>> req
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("get", url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("options", url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes. If
`allow_redirects` is not provided, it will be set to `False` (as
opposed to the default :meth:`request` behavior).
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", False)
return request("head", url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("post", url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("put", url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("patch", url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("delete", url, **kwargs)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/api.py
|
Python
|
mit
| 6,377 |
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import hashlib
import os
import re
import threading
import time
import warnings
from base64 import b64encode
from ._internal_utils import to_native_string
from .compat import basestring, str, urlparse
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
CONTENT_TYPE_MULTI_PART = "multipart/form-data"
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(type(password)),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode("latin1")
if isinstance(password, str):
password = password.encode("latin1")
authstr = "Basic " + to_native_string(
b64encode(b":".join((username, password))).strip()
)
return authstr
class AuthBase:
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError("Auth hooks must be callable.")
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all(
[
self.username == getattr(other, "username", None),
self.password == getattr(other, "password", None),
]
)
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers["Authorization"] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, "init"):
self._thread_local.init = True
self._thread_local.last_nonce = ""
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal["realm"]
nonce = self._thread_local.chal["nonce"]
qop = self._thread_local.chal.get("qop")
algorithm = self._thread_local.chal.get("algorithm")
opaque = self._thread_local.chal.get("opaque")
hash_utf8 = None
if algorithm is None:
_algorithm = "MD5"
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == "MD5" or _algorithm == "MD5-SESS":
def md5_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == "SHA":
def sha_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
elif _algorithm == "SHA-256":
def sha256_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.sha256(x).hexdigest()
hash_utf8 = sha256_utf8
elif _algorithm == "SHA-512":
def sha512_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.sha512(x).hexdigest()
hash_utf8 = sha512_utf8
KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += f"?{p_parsed.query}"
A1 = f"{self.username}:{realm}:{self.password}"
A2 = f"{method}:{path}"
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = f"{self._thread_local.nonce_count:08x}"
s = str(self._thread_local.nonce_count).encode("utf-8")
s += nonce.encode("utf-8")
s += time.ctime().encode("utf-8")
s += os.urandom(8)
cnonce = hashlib.sha1(s).hexdigest()[:16]
if _algorithm == "MD5-SESS":
HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}")
if not qop:
respdig = KD(HA1, f"{nonce}:{HA2}")
elif qop == "auth" or "auth" in qop.split(","):
noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}"
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = (
f'username="{self.username}", realm="{realm}", nonce="{nonce}", '
f'uri="{path}", response="{respdig}"'
)
if opaque:
base += f', opaque="{opaque}"'
if algorithm:
base += f', algorithm="{algorithm}"'
if entdig:
base += f', digest="{entdig}"'
if qop:
base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"'
return f"Digest {base}"
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
# If response is not 4xx, do not auth
# See https://github.com/psf/requests/issues/3772
if not 400 <= r.status_code < 500:
self._thread_local.num_401_calls = 1
return r
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get("www-authenticate", "")
if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r"digest ", flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers["Authorization"] = self.build_digest_header(
prep.method, prep.url
)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers["Authorization"] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook("response", self.handle_401)
r.register_hook("response", self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all(
[
self.username == getattr(other, "username", None),
self.password == getattr(other, "password", None),
]
)
def __ne__(self, other):
return not self == other
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/auth.py
|
Python
|
mit
| 10,187 |
#!/usr/bin/env python
"""
requests.certs
~~~~~~~~~~~~~~
This module returns the preferred default CA certificate bundle. There is
only one — the one from the certifi package.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os
if "_PIP_STANDALONE_CERT" not in os.environ:
from pip._vendor.certifi import where
else:
def where():
return os.environ["_PIP_STANDALONE_CERT"]
if __name__ == "__main__":
print(where())
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/certs.py
|
Python
|
mit
| 575 |
"""
requests.compat
~~~~~~~~~~~~~~~
This module previously handled import compatibility issues
between Python 2 and Python 3. It remains for backwards
compatibility until the next major version.
"""
from pip._vendor import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = _ver[0] == 2
#: Python 3.x?
is_py3 = _ver[0] == 3
# Note: We've patched out simplejson support in pip because it prevents
# upgrading simplejson on Windows.
import json
from json import JSONDecodeError
# Keep OrderedDict for backwards compatibility.
from collections import OrderedDict
from collections.abc import Callable, Mapping, MutableMapping
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
# --------------
# Legacy Imports
# --------------
from urllib.parse import (
quote,
quote_plus,
unquote,
unquote_plus,
urldefrag,
urlencode,
urljoin,
urlparse,
urlsplit,
urlunparse,
)
from urllib.request import (
getproxies,
getproxies_environment,
parse_http_list,
proxy_bypass,
proxy_bypass_environment,
)
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/compat.py
|
Python
|
mit
| 1,286 |
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import calendar
import copy
import time
from ._internal_utils import to_native_string
from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse
try:
import threading
except ImportError:
import dummy_threading as threading
class MockRequest:
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get("Host"):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = to_native_string(self._r.headers["Host"], encoding="utf-8")
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse(
[
parsed.scheme,
host,
parsed.path,
parsed.params,
parsed.query,
parsed.fragment,
]
)
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError(
"Cookie headers should be added with add_unredirected_header()"
)
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse:
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, "_original_response") and response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get("Cookie")
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(
self, name, domain=kwargs.get("domain"), path=kwargs.get("path")
)
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (
path is None or cookie.path == path
):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super().__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if (
hasattr(cookie.value, "startswith")
and cookie.value.startswith('"')
and cookie.value.endswith('"')
):
cookie.value = cookie.value.replace('\\"', "")
return super().set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super().update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None:
# if there are multiple cookies that meet passed in criteria
raise CookieConflictError(
f"There are multiple cookies with name, {name!r}"
)
# we will eventually return this as long as no cookie conflict
toReturn = cookie.value
if toReturn:
return toReturn
raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop("_cookies_lock")
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if "_cookies_lock" not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.set_policy(self.get_policy())
new_cj.update(self)
return new_cj
def get_policy(self):
"""Return the CookiePolicy instance used."""
return self._policy
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, "copy"):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = {
"version": 0,
"name": name,
"value": value,
"port": None,
"domain": "",
"path": "/",
"secure": False,
"expires": None,
"discard": True,
"comment": None,
"comment_url": None,
"rest": {"HttpOnly": None},
"rfc2109": False,
}
badargs = set(kwargs) - set(result)
if badargs:
raise TypeError(
f"create_cookie() got unexpected keyword arguments: {list(badargs)}"
)
result.update(kwargs)
result["port_specified"] = bool(result["port"])
result["domain_specified"] = bool(result["domain"])
result["domain_initial_dot"] = result["domain"].startswith(".")
result["path_specified"] = bool(result["path"])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel["max-age"]:
try:
expires = int(time.time() + int(morsel["max-age"]))
except ValueError:
raise TypeError(f"max-age: {morsel['max-age']} must be integer")
elif morsel["expires"]:
time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = calendar.timegm(time.strptime(morsel["expires"], time_template))
return create_cookie(
comment=morsel["comment"],
comment_url=bool(morsel["comment"]),
discard=False,
domain=morsel["domain"],
expires=expires,
name=morsel.key,
path=morsel["path"],
port=None,
rest={"HttpOnly": morsel["httponly"]},
rfc2109=False,
secure=bool(morsel["secure"]),
value=morsel.value,
version=morsel["version"] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
:rtype: CookieJar
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
:rtype: CookieJar
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError("You can only merge into CookieJar")
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/cookies.py
|
Python
|
mit
| 18,560 |
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError
from .compat import JSONDecodeError as CompatJSONDecodeError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request.
"""
def __init__(self, *args, **kwargs):
"""Initialize RequestException with `request` and `response` objects."""
response = kwargs.pop("response", None)
self.response = response
self.request = kwargs.pop("request", None)
if response is not None and not self.request and hasattr(response, "request"):
self.request = self.response.request
super().__init__(*args, **kwargs)
class InvalidJSONError(RequestException):
"""A JSON error occurred."""
class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
"""Couldn't decode the text into json"""
def __init__(self, *args, **kwargs):
"""
Construct the JSONDecodeError instance first with all
args. Then use it's args to construct the IOError so that
the json specific args aren't used as IOError specific args
and the error message from JSONDecodeError is preserved.
"""
CompatJSONDecodeError.__init__(self, *args)
InvalidJSONError.__init__(self, *self.args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL scheme (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""The URL scheme provided is either invalid or unsupported."""
class InvalidURL(RequestException, ValueError):
"""The URL provided was somehow invalid."""
class InvalidHeader(RequestException, ValueError):
"""The header value provided was somehow invalid."""
class InvalidProxyURL(InvalidURL):
"""The proxy URL provided is invalid."""
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content."""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed."""
class RetryError(RequestException):
"""Custom retries logic failed"""
class UnrewindableBodyError(RequestException):
"""Requests encountered an error when trying to rewind a body."""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""A file was opened in text mode, but Requests determined its binary length."""
class RequestsDependencyWarning(RequestsWarning):
"""An imported dependency doesn't match the expected version range."""
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/exceptions.py
|
Python
|
mit
| 3,823 |
"""Module containing bug report helper(s)."""
import json
import platform
import ssl
import sys
from pip._vendor import idna
from pip._vendor import urllib3
from . import __version__ as requests_version
charset_normalizer = None
try:
from pip._vendor import chardet
except ImportError:
chardet = None
try:
from pip._vendor.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import cryptography
import OpenSSL
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 3.10.3 it will return
{'name': 'CPython', 'version': '3.10.3'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == "CPython":
implementation_version = platform.python_version()
elif implementation == "PyPy":
implementation_version = "{}.{}.{}".format(
sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro,
)
if sys.pypy_version_info.releaselevel != "final":
implementation_version = "".join(
[implementation_version, sys.pypy_version_info.releaselevel]
)
elif implementation == "Jython":
implementation_version = platform.python_version() # Complete Guess
elif implementation == "IronPython":
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = "Unknown"
return {"name": implementation, "version": implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
"system": platform.system(),
"release": platform.release(),
}
except OSError:
platform_info = {
"system": "Unknown",
"release": "Unknown",
}
implementation_info = _implementation()
urllib3_info = {"version": urllib3.__version__}
charset_normalizer_info = {"version": None}
chardet_info = {"version": None}
if charset_normalizer:
charset_normalizer_info = {"version": charset_normalizer.__version__}
if chardet:
chardet_info = {"version": chardet.__version__}
pyopenssl_info = {
"version": None,
"openssl_version": "",
}
if OpenSSL:
pyopenssl_info = {
"version": OpenSSL.__version__,
"openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}",
}
cryptography_info = {
"version": getattr(cryptography, "__version__", ""),
}
idna_info = {
"version": getattr(idna, "__version__", ""),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""}
return {
"platform": platform_info,
"implementation": implementation_info,
"system_ssl": system_ssl_info,
"using_pyopenssl": pyopenssl is not None,
"using_charset_normalizer": chardet is None,
"pyOpenSSL": pyopenssl_info,
"urllib3": urllib3_info,
"chardet": chardet_info,
"charset_normalizer": charset_normalizer_info,
"cryptography": cryptography_info,
"idna": idna_info,
"requests": {
"version": requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == "__main__":
main()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/help.py
|
Python
|
mit
| 3,879 |
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``response``:
The response generated from a Request.
"""
HOOKS = ["response"]
def default_hooks():
return {event: [] for event in HOOKS}
# TODO: response is the only one
def dispatch_hook(key, hooks, hook_data, **kwargs):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or {}
hooks = hooks.get(key)
if hooks:
if hasattr(hooks, "__call__"):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data, **kwargs)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/hooks.py
|
Python
|
mit
| 733 |
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna # noqa: F401
from io import UnsupportedOperation
from pip._vendor.urllib3.exceptions import (
DecodeError,
LocationParseError,
ProtocolError,
ReadTimeoutError,
SSLError,
)
from pip._vendor.urllib3.fields import RequestField
from pip._vendor.urllib3.filepost import encode_multipart_formdata
from pip._vendor.urllib3.util import parse_url
from ._internal_utils import to_native_string, unicode_is_ascii
from .auth import HTTPBasicAuth
from .compat import (
Callable,
JSONDecodeError,
Mapping,
basestring,
builtin_str,
chardet,
cookielib,
)
from .compat import json as complexjson
from .compat import urlencode, urlsplit, urlunparse
from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header
from .exceptions import (
ChunkedEncodingError,
ConnectionError,
ContentDecodingError,
HTTPError,
InvalidJSONError,
InvalidURL,
)
from .exceptions import JSONDecodeError as RequestsJSONDecodeError
from .exceptions import MissingSchema
from .exceptions import SSLError as RequestsSSLError
from .exceptions import StreamConsumedError
from .hooks import default_hooks
from .status_codes import codes
from .structures import CaseInsensitiveDict
from .utils import (
check_header_validity,
get_auth_from_url,
guess_filename,
guess_json_utf,
iter_slices,
parse_header_links,
requote_uri,
stream_decode_response_unicode,
super_len,
to_key_val_list,
)
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin:
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = "/"
url.append(path)
query = p.query
if query:
url.append("?")
url.append(query)
return "".join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, "read"):
return data
elif hasattr(data, "__iter__"):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, "__iter__"):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(
k.encode("utf-8") if isinstance(k, str) else k,
v.encode("utf-8") if isinstance(v, str) else v,
)
)
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if not files:
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, "__iter__"):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(
field.decode("utf-8")
if isinstance(field, bytes)
else field,
v.encode("utf-8") if isinstance(v, str) else v,
)
)
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, "read"):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin:
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError(f'Unsupported event specified, with event name "{event}"')
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, "__iter__"):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(
self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None,
):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return f"<Request [{self.method}]>"
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(
self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None,
):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return f"<PreparedRequest [{self.method}]>"
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from pip._vendor import idna
try:
host = idna.encode(host, uts46=True).decode("utf-8")
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode("utf8")
else:
url = str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ":" in url and not url.lower().startswith("http"):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema(
f"Invalid URL {url!r}: No scheme supplied. "
f"Perhaps you meant http://{url}?"
)
if not host:
raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL("URL has an invalid label.")
elif host.startswith(("*", ".")):
raise InvalidURL("URL has an invalid label.")
# Carefully reconstruct the network location
netloc = auth or ""
if netloc:
netloc += "@"
netloc += host
if port:
netloc += f":{port}"
# Bare domains aren't valid URLs.
if not path:
path = "/"
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = f"{query}&{enc_params}"
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = "application/json"
try:
body = complexjson.dumps(json, allow_nan=False)
except ValueError as ve:
raise InvalidJSONError(ve, request=self)
if not isinstance(body, bytes):
body = body.encode("utf-8")
is_stream = all(
[
hasattr(data, "__iter__"),
not isinstance(data, (basestring, list, tuple, Mapping)),
]
)
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, "tell", None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError(
"Streamed bodies and files are mutually exclusive."
)
if length:
self.headers["Content-Length"] = builtin_str(length)
else:
self.headers["Transfer-Encoding"] = "chunked"
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, "read"):
content_type = None
else:
content_type = "application/x-www-form-urlencoded"
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ("content-type" not in self.headers):
self.headers["Content-Type"] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers["Content-Length"] = builtin_str(length)
elif (
self.method not in ("GET", "HEAD")
and self.headers.get("Content-Length") is None
):
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers["Content-Length"] = "0"
def prepare_auth(self, auth, url=""):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers["Cookie"] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response:
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
"_content",
"status_code",
"headers",
"url",
"history",
"encoding",
"reason",
"cookies",
"elapsed",
"request",
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, "_content_consumed", True)
setattr(self, "raw", None)
def __repr__(self):
return f"<Response [{self.status_code}]>"
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return "location" in self.headers and self.status_code in REDIRECT_STATI
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return "location" in self.headers and self.status_code in (
codes.moved_permanently,
codes.permanent_redirect,
)
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the charset_normalizer or chardet libraries."""
return chardet.detect(self.content)["encoding"]
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, "stream"):
try:
yield from self.raw.stream(chunk_size, decode_content=True)
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except SSLError as e:
raise RequestsSSLError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError(
f"chunk_size must be an int, it is instead a {type(chunk_size)}."
)
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(
self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(
chunk_size=chunk_size, decode_unicode=decode_unicode
):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
yield from lines
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError("The content for this response was already consumed")
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b""
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``charset_normalizer`` or ``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return ""
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors="replace")
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors="replace")
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises requests.exceptions.JSONDecodeError: If the response body does not
contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using charset_normalizer to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
except JSONDecodeError as e:
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
try:
return complexjson.loads(self.text, **kwargs)
except JSONDecodeError as e:
# Catch JSON-related errors and raise as requests.JSONDecodeError
# This aliases json.JSONDecodeError and simplejson.JSONDecodeError
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get("link")
resolved_links = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get("rel") or link.get("url")
resolved_links[key] = link
return resolved_links
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ""
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode("utf-8")
except UnicodeDecodeError:
reason = self.reason.decode("iso-8859-1")
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = (
f"{self.status_code} Client Error: {reason} for url: {self.url}"
)
elif 500 <= self.status_code < 600:
http_error_msg = (
f"{self.status_code} Server Error: {reason} for url: {self.url}"
)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, "release_conn", None)
if release_conn is not None:
release_conn()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/models.py
|
Python
|
mit
| 35,287 |
import sys
# This code exists for backwards compatibility reasons.
# I don't like it either. Just look the other way. :)
for package in ('urllib3', 'idna', 'chardet'):
vendored_package = "pip._vendor." + package
locals()[package] = __import__(vendored_package)
# This traversal is apparently necessary such that the identities are
# preserved (requests.packages.urllib3.* is urllib3.*)
for mod in list(sys.modules):
if mod == vendored_package or mod.startswith(vendored_package + '.'):
unprefixed_mod = mod[len("pip._vendor."):]
sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod]
# Kinda cool, though, right?
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/packages.py
|
Python
|
mit
| 695 |
"""
requests.sessions
~~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
import sys
import time
from collections import OrderedDict
from datetime import timedelta
from ._internal_utils import to_native_string
from .adapters import HTTPAdapter
from .auth import _basic_auth_str
from .compat import Mapping, cookielib, urljoin, urlparse
from .cookies import (
RequestsCookieJar,
cookiejar_from_dict,
extract_cookies_to_jar,
merge_cookies,
)
from .exceptions import (
ChunkedEncodingError,
ContentDecodingError,
InvalidSchema,
TooManyRedirects,
)
from .hooks import default_hooks, dispatch_hook
# formerly defined here, reexposed here for backward compatibility
from .models import ( # noqa: F401
DEFAULT_REDIRECT_LIMIT,
REDIRECT_STATI,
PreparedRequest,
Request,
)
from .status_codes import codes
from .structures import CaseInsensitiveDict
from .utils import ( # noqa: F401
DEFAULT_PORTS,
default_headers,
get_auth_from_url,
get_environ_proxies,
get_netrc_auth,
requote_uri,
resolve_proxies,
rewind_body,
should_bypass_proxies,
to_key_val_list,
)
# Preferred clock, based on which one is more accurate on a given system.
if sys.platform == "win32":
preferred_clock = time.perf_counter
else:
preferred_clock = time.time
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get("response") == []:
return request_hooks
if request_hooks is None or request_hooks.get("response") == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin:
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
# Due to the nature of how requests processes redirects this method will
# be called at least once upon the original response and at least twice
# on each subsequent redirect response (if any).
# If a custom mixin is used to handle this logic, it may be advantageous
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
location = resp.headers["location"]
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
location = location.encode("latin1")
return to_native_string(location, "utf8")
return None
def should_strip_auth(self, old_url, new_url):
"""Decide whether Authorization header should be removed when redirecting"""
old_parsed = urlparse(old_url)
new_parsed = urlparse(new_url)
if old_parsed.hostname != new_parsed.hostname:
return True
# Special case: allow http -> https redirect when using the standard
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
if (
old_parsed.scheme == "http"
and old_parsed.port in (80, None)
and new_parsed.scheme == "https"
and new_parsed.port in (443, None)
):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
if (
not changed_scheme
and old_parsed.port in default_port
and new_parsed.port in default_port
):
return False
# Standard case: root URI must match
return changed_port or changed_scheme
def resolve_redirects(
self,
resp,
req,
stream=False,
timeout=None,
verify=True,
cert=None,
proxies=None,
yield_requests=False,
**adapter_kwargs,
):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects(
f"Exceeded {self.max_redirects} redirects.", response=resp
)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith("//"):
parsed_rurl = urlparse(resp.url)
url = ":".join([to_native_string(parsed_rurl.scheme), url])
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == "" and previous_fragment:
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/psf/requests/issues/1084
if resp.status_code not in (
codes.temporary_redirect,
codes.permanent_redirect,
):
# https://github.com/psf/requests/issues/3490
purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding")
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
headers.pop("Cookie", None)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = prepared_request._body_position is not None and (
"Content-Length" in headers or "Transfer-Encoding" in headers
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs,
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if "Authorization" in headers and self.should_strip_auth(
response.request.url, url
):
# If we get redirected to a new host, we should strip out any
# authentication headers.
del headers["Authorization"]
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
headers = prepared_request.headers
scheme = urlparse(prepared_request.url).scheme
new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
if "Proxy-Authorization" in headers:
del headers["Proxy-Authorization"]
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers["Proxy-Authorization"] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != "HEAD":
method = "GET"
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != "HEAD":
method = "GET"
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == "POST":
method = "GET"
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('https://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
... s.get('https://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
"headers",
"cookies",
"auth",
"proxies",
"hooks",
"params",
"verify",
"cert",
"adapters",
"stream",
"trust_env",
"max_redirects",
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
#: Defaults to `True`, requiring requests to verify the TLS certificate at the
#: remote end.
#: If verify is set to `False`, requests will accept any TLS certificate
#: presented by the server, and will ignore hostname mismatches and/or
#: expired certificates, which will make your application vulnerable to
#: man-in-the-middle (MitM) attacks.
#: Only set this to `False` for testing.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount("https://", HTTPAdapter())
self.mount("http://", HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies
)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(
request.headers, self.headers, dict_class=CaseInsensitiveDict
),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(
self,
method,
url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None,
):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``. When set to
``False``, requests will accept any TLS certificate presented by
the server, and will ignore hostname mismatches and/or expired
certificates, which will make your application vulnerable to
man-in-the-middle (MitM) attacks. Setting verify to ``False``
may be useful during local development or testing.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
"timeout": timeout,
"allow_redirects": allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", True)
return self.request("GET", url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", True)
return self.request("OPTIONS", url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", False)
return self.request("HEAD", url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("POST", url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("PUT", url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("PATCH", url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("DELETE", url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault("stream", self.stream)
kwargs.setdefault("verify", self.verify)
kwargs.setdefault("cert", self.cert)
if "proxies" not in kwargs:
kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError("You can only send PreparedRequests.")
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop("allow_redirects", True)
stream = kwargs.get("stream")
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook("response", hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Resolve redirects if allowed.
if allow_redirects:
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
history = [resp for resp in gen]
else:
history = []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(
self.resolve_redirects(r, request, yield_requests=True, **kwargs)
)
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get("no_proxy") if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration
# and be compatible with cURL.
if verify is True or verify is None:
verify = (
os.environ.get("REQUESTS_CA_BUNDLE")
or os.environ.get("CURL_CA_BUNDLE")
or verify
)
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema(f"No connection adapters were found for {url!r}")
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
.. deprecated:: 1.0.0
This method has been deprecated since version 1.0.0 and is only kept for
backwards compatibility. New code should use :class:`~requests.sessions.Session`
to create a session. This may be removed at a future date.
:rtype: Session
"""
return Session()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/sessions.py
|
Python
|
mit
| 30,180 |
r"""
The ``codes`` object defines a mapping from common names for HTTP statuses
to their numerical codes, accessible either as attributes or as dictionary
items.
Example::
>>> import requests
>>> requests.codes['temporary_redirect']
307
>>> requests.codes.teapot
418
>>> requests.codes['\o/']
200
Some codes have multiple names, and both upper- and lower-case versions of
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
``codes.okay`` all correspond to the HTTP status code 200.
"""
from .structures import LookupDict
_codes = {
# Informational.
100: ("continue",),
101: ("switching_protocols",),
102: ("processing",),
103: ("checkpoint",),
122: ("uri_too_long", "request_uri_too_long"),
200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
201: ("created",),
202: ("accepted",),
203: ("non_authoritative_info", "non_authoritative_information"),
204: ("no_content",),
205: ("reset_content", "reset"),
206: ("partial_content", "partial"),
207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
208: ("already_reported",),
226: ("im_used",),
# Redirection.
300: ("multiple_choices",),
301: ("moved_permanently", "moved", "\\o-"),
302: ("found",),
303: ("see_other", "other"),
304: ("not_modified",),
305: ("use_proxy",),
306: ("switch_proxy",),
307: ("temporary_redirect", "temporary_moved", "temporary"),
308: (
"permanent_redirect",
"resume_incomplete",
"resume",
), # "resume" and "resume_incomplete" to be removed in 3.0
# Client Error.
400: ("bad_request", "bad"),
401: ("unauthorized",),
402: ("payment_required", "payment"),
403: ("forbidden",),
404: ("not_found", "-o-"),
405: ("method_not_allowed", "not_allowed"),
406: ("not_acceptable",),
407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
408: ("request_timeout", "timeout"),
409: ("conflict",),
410: ("gone",),
411: ("length_required",),
412: ("precondition_failed", "precondition"),
413: ("request_entity_too_large",),
414: ("request_uri_too_large",),
415: ("unsupported_media_type", "unsupported_media", "media_type"),
416: (
"requested_range_not_satisfiable",
"requested_range",
"range_not_satisfiable",
),
417: ("expectation_failed",),
418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
421: ("misdirected_request",),
422: ("unprocessable_entity", "unprocessable"),
423: ("locked",),
424: ("failed_dependency", "dependency"),
425: ("unordered_collection", "unordered"),
426: ("upgrade_required", "upgrade"),
428: ("precondition_required", "precondition"),
429: ("too_many_requests", "too_many"),
431: ("header_fields_too_large", "fields_too_large"),
444: ("no_response", "none"),
449: ("retry_with", "retry"),
450: ("blocked_by_windows_parental_controls", "parental_controls"),
451: ("unavailable_for_legal_reasons", "legal_reasons"),
499: ("client_closed_request",),
# Server Error.
500: ("internal_server_error", "server_error", "/o\\", "✗"),
501: ("not_implemented",),
502: ("bad_gateway",),
503: ("service_unavailable", "unavailable"),
504: ("gateway_timeout",),
505: ("http_version_not_supported", "http_version"),
506: ("variant_also_negotiates",),
507: ("insufficient_storage",),
509: ("bandwidth_limit_exceeded", "bandwidth"),
510: ("not_extended",),
511: ("network_authentication_required", "network_auth", "network_authentication"),
}
codes = LookupDict(name="status_codes")
def _init():
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith(("\\", "/")):
setattr(codes, title.upper(), code)
def doc(code):
names = ", ".join(f"``{n}``" for n in _codes[code])
return "* %d: %s" % (code, names)
global __doc__
__doc__ = (
__doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
if __doc__ is not None
else None
)
_init()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/status_codes.py
|
Python
|
mit
| 4,235 |
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
from collections import OrderedDict
from .compat import Mapping, MutableMapping
class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super().__init__()
def __repr__(self):
return f"<lookup '{self.name}'>"
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/structures.py
|
Python
|
mit
| 2,912 |
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict
from pip._vendor.urllib3.util import make_headers, parse_url
from . import certs
from .__version__ import __version__
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401
from .compat import (
Mapping,
basestring,
bytes,
getproxies,
getproxies_environment,
integer_types,
)
from .compat import parse_http_list as _parse_list_header
from .compat import (
proxy_bypass,
proxy_bypass_environment,
quote,
str,
unquote,
urlparse,
urlunparse,
)
from .cookies import cookiejar_from_dict
from .exceptions import (
FileModeWarning,
InvalidHeader,
InvalidURL,
UnrewindableBodyError,
)
from .structures import CaseInsensitiveDict
NETRC_FILES = (".netrc", "_netrc")
DEFAULT_CA_BUNDLE_PATH = certs.where()
DEFAULT_PORTS = {"http": 80, "https": 443}
# Ensure that ', ' is used to preserve previous delimiter behavior.
DEFAULT_ACCEPT_ENCODING = ", ".join(
re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
)
if sys.platform == "win32":
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
try:
import winreg
except ImportError:
return False
try:
internetSettings = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
)
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
# ProxyOverride is almost always a string
proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
except (OSError, ValueError):
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(";")
# now check if we match one of the registry values.
for test in proxyOverride:
if test == "<local>":
if "." not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, "items"):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, "__len__"):
total_length = len(o)
elif hasattr(o, "len"):
total_length = o.len
elif hasattr(o, "fileno"):
try:
fileno = o.fileno()
except (io.UnsupportedOperation, AttributeError):
# AttributeError is a surprising exception, seeing as how we've just checked
# that `hasattr(o, 'fileno')`. It happens for objects obtained via
# `Tarfile.extractfile()`, per issue 5229.
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if "b" not in o.mode:
warnings.warn(
(
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."
),
FileModeWarning,
)
if hasattr(o, "tell"):
try:
current_position = o.tell()
except OSError:
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, "seek") and total_length is None:
# StringIO and BytesIO have seek but no usable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except OSError:
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
netrc_file = os.environ.get("NETRC")
if netrc_file is not None:
netrc_locations = (netrc_file,)
else:
netrc_locations = (f"~/{f}" for f in NETRC_FILES)
try:
from netrc import NetrcParseError, netrc
netrc_path = None
for f in netrc_locations:
try:
loc = os.path.expanduser(f)
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See https://bugs.python.org/issue20164 &
# https://github.com/psf/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b":"
if isinstance(url, str):
splitstr = splitstr.decode("ascii")
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = 0 if _netrc[0] else 1
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, OSError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# App Engine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, "name", None)
if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
return os.path.basename(name)
def extract_zipped_paths(path):
"""Replace nonexistent paths that look like they refer to a member of a zip
archive with the location of an extracted copy of the target, or else
just return the provided path unchanged.
"""
if os.path.exists(path):
# this is already a valid path, no need to do anything further
return path
# find the first valid part of the provided path and treat that as a zip archive
# assume the rest of the path is the name of a member in the archive
archive, member = os.path.split(path)
while archive and not os.path.exists(archive):
archive, prefix = os.path.split(archive)
if not prefix:
# If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
# we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
break
member = "/".join([prefix, member])
if not zipfile.is_zipfile(archive):
return path
zip_file = zipfile.ZipFile(archive)
if member not in zip_file.namelist():
return path
# we have a valid zip archive and a valid member of that archive
tmp = tempfile.gettempdir()
extracted_path = os.path.join(tmp, member.split("/")[-1])
if not os.path.exists(extracted_path):
# use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
with atomic_open(extracted_path) as file_handler:
file_handler.write(zip_file.read(member))
return extracted_path
@contextlib.contextmanager
def atomic_open(filename):
"""Write a file to the disk in an atomic fashion"""
tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
try:
with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
yield tmp_handler
os.replace(tmp_name, filename)
except BaseException:
os.remove(tmp_name)
raise
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError("cannot encode objects that are not 2-tuples")
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError("cannot encode objects that are not 2-tuples")
if isinstance(value, Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if "=" not in item:
result[item] = None
continue
name, value = item.split("=", 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != "\\\\":
return value.replace("\\\\", "\\").replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn(
(
"In requests 3.0, get_encodings_from_content will be removed. For "
"more information, please see the discussion on issue #2266. (This"
" warning should only appear once.)"
),
DeprecationWarning,
)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (
charset_re.findall(content)
+ pragma_re.findall(content)
+ xml_re.findall(content)
)
def _parse_content_type_header(header):
"""Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
"""
tokens = header.split(";")
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1 :].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get("content-type")
if not content_type:
return None
content_type, params = _parse_content_type_header(content_type)
if "charset" in params:
return params["charset"].strip("'\"")
if "text" in content_type:
return "ISO-8859-1"
if "application/json" in content_type:
# Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
return "utf-8"
def stream_decode_response_unicode(iterator, r):
"""Stream decodes an iterator."""
if r.encoding is None:
yield from iterator
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b"", final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos : pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn(
(
"In requests 3.0, get_unicode_from_response will be removed. For "
"more information, please see the discussion on issue #2266. (This"
" warning should only appear once.)"
),
DeprecationWarning,
)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors="replace")
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
)
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split("%")
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = f"%{parts[i]}"
else:
parts[i] = f"%{parts[i]}"
return "".join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
netaddr, bits = net.split("/")
netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack(">I", bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except OSError:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count("/") == 1:
try:
mask = int(string_network.split("/")[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split("/")[0])
except OSError:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
# Prioritize lowercase environment variables over uppercase
# to keep a consistent behaviour with other http projects (curl, wget).
def get_proxy(key):
return os.environ.get(key) or os.environ.get(key.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy("no_proxy")
parsed = urlparse(url)
if parsed.hostname is None:
# URLs don't always have hostnames, e.g. file:/// urls.
return True
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the hostname, both with and without the port.
no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
if is_ipv4_address(parsed.hostname):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(parsed.hostname, proxy_ip):
return True
elif parsed.hostname == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
host_with_port = parsed.hostname
if parsed.port:
host_with_port += f":{parsed.port}"
for host in no_proxy:
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
with set_environ("no_proxy", no_proxy_arg):
# parsed.hostname can be `None` in cases such as a file URI.
try:
bypass = proxy_bypass(parsed.hostname)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy=None):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get("all"))
proxy_keys = [
urlparts.scheme + "://" + urlparts.hostname,
urlparts.scheme,
"all://" + urlparts.hostname,
"all",
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def resolve_proxies(request, proxies, trust_env=True):
"""This method takes proxy information from a request and configuration
input to resolve a mapping of target proxies. This will consider settings
such a NO_PROXY to strip proxy configurations.
:param request: Request or PreparedRequest
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
:param trust_env: Boolean declaring whether to trust environment configs
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
url = request.url
scheme = urlparse(url).scheme
no_proxy = proxies.get("no_proxy")
new_proxies = proxies.copy()
if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
if proxy:
new_proxies.setdefault(scheme, proxy)
return new_proxies
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return f"{name}/{__version__}"
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict(
{
"User-Agent": default_user_agent(),
"Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
"Accept": "*/*",
"Connection": "keep-alive",
}
)
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = " '\""
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ""
link = {"url": url.strip("<> '\"")}
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = "\x00".encode("ascii") # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return "utf-32" # BOM included
if sample[:3] == codecs.BOM_UTF8:
return "utf-8-sig" # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return "utf-16" # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return "utf-8"
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return "utf-16-be"
if sample[1::2] == _null2: # 2nd and 4th are null
return "utf-16-le"
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return "utf-32-be"
if sample[1:] == _null3:
return "utf-32-le"
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
parsed = parse_url(url)
scheme, auth, host, port, path, query, fragment = parsed
# A defect in urlparse determines that there isn't a netloc present in some
# urls. We previously assumed parsing was overly cautious, and swapped the
# netloc and path. Due to a lack of tests on the original defect, this is
# maintained with parse_url for backwards compatibility.
netloc = parsed.netloc
if not netloc:
netloc, path = path, netloc
if auth:
# parse_url doesn't provide the netloc with auth
# so we'll add it ourselves.
netloc = "@".join([auth, netloc])
if scheme is None:
scheme = new_scheme
if path is None:
path = ""
return urlunparse((scheme, netloc, path, "", query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ("", "")
return auth
def check_header_validity(header):
"""Verifies that header parts don't contain leading whitespace
reserved characters, or return characters.
:param header: tuple, in the format (name, value).
"""
name, value = header
for part in header:
if type(part) not in HEADER_VALIDATORS:
raise InvalidHeader(
f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be "
f"of type str or bytes, not {type(part)}"
)
_validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0])
_validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
def _validate_header_part(header_part, header_kind, validator):
if not validator.match(header_part):
raise InvalidHeader(
f"Invalid leading whitespace, reserved character(s), or return"
f"character(s) in header {header_kind}: {header_part!r}"
)
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit("@", 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ""))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, "seek", None)
if body_seek is not None and isinstance(
prepared_request._body_position, integer_types
):
try:
body_seek(prepared_request._body_position)
except OSError:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect."
)
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/requests/utils.py
|
Python
|
mit
| 33,240 |
__all__ = [
"__version__",
"AbstractProvider",
"AbstractResolver",
"BaseReporter",
"InconsistentCandidate",
"Resolver",
"RequirementsConflicted",
"ResolutionError",
"ResolutionImpossible",
"ResolutionTooDeep",
]
__version__ = "0.8.1"
from .providers import AbstractProvider, AbstractResolver
from .reporters import BaseReporter
from .resolvers import (
InconsistentCandidate,
RequirementsConflicted,
ResolutionError,
ResolutionImpossible,
ResolutionTooDeep,
Resolver,
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/resolvelib/__init__.py
|
Python
|
mit
| 537 |
castiel248/Convert
|
Lib/site-packages/pip/_vendor/resolvelib/compat/__init__.py
|
Python
|
mit
| 0 |
|
__all__ = ["Mapping", "Sequence"]
try:
from collections.abc import Mapping, Sequence
except ImportError:
from collections import Mapping, Sequence
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py
|
Python
|
mit
| 156 |
class AbstractProvider(object):
"""Delegate class to provide requirement interface for the resolver."""
def identify(self, requirement_or_candidate):
"""Given a requirement, return an identifier for it.
This is used to identify a requirement, e.g. whether two requirements
should have their specifier parts merged.
"""
raise NotImplementedError
def get_preference(
self,
identifier,
resolutions,
candidates,
information,
backtrack_causes,
):
"""Produce a sort key for given requirement based on preference.
The preference is defined as "I think this requirement should be
resolved first". The lower the return value is, the more preferred
this group of arguments is.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param resolutions: Mapping of candidates currently pinned by the
resolver. Each key is an identifier, and the value a candidate.
The candidate may conflict with requirements from ``information``.
:param candidates: Mapping of each dependency's possible candidates.
Each value is an iterator of candidates.
:param information: Mapping of requirement information of each package.
Each value is an iterator of *requirement information*.
:param backtrack_causes: Sequence of requirement information that were
the requirements that caused the resolver to most recently backtrack.
A *requirement information* instance is a named tuple with two members:
* ``requirement`` specifies a requirement contributing to the current
list of candidates.
* ``parent`` specifies the candidate that provides (dependend on) the
requirement, or ``None`` to indicate a root requirement.
The preference could depend on a various of issues, including (not
necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should probably be
worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this requirement? Those
with few left should likely be worked on first, I guess?
* Are there any known conflicts for this requirement? We should
probably work on those with the most known conflicts.
A sortable value should be returned (this will be used as the ``key``
parameter of the built-in sorting function). The smaller the value is,
the more preferred this requirement is (i.e. the sorting function
is called with ``reverse=False``).
"""
raise NotImplementedError
def find_matches(self, identifier, requirements, incompatibilities):
"""Find all possible candidates that satisfy given constraints.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param requirements: A mapping of requirements that all returned
candidates must satisfy. Each key is an identifier, and the value
an iterator of requirements for that dependency.
:param incompatibilities: A mapping of known incompatibilities of
each dependency. Each key is an identifier, and the value an
iterator of incompatibilities known to the resolver. All
incompatibilities *must* be excluded from the return value.
This should try to get candidates based on the requirements' types.
For VCS, local, and archive requirements, the one-and-only match is
returned, and for a "named" requirement, the index(es) should be
consulted to find concrete candidates for this requirement.
The return value should produce candidates ordered by preference; the
most preferred candidate should come first. The return type may be one
of the following:
* A callable that returns an iterator that yields candidates.
* An collection of candidates.
* An iterable of candidates. This will be consumed immediately into a
list of candidates.
"""
raise NotImplementedError
def is_satisfied_by(self, requirement, candidate):
"""Whether the given requirement can be satisfied by a candidate.
The candidate is guarenteed to have been generated from the
requirement.
A boolean should be returned to indicate whether ``candidate`` is a
viable solution to the requirement.
"""
raise NotImplementedError
def get_dependencies(self, candidate):
"""Get dependencies of a candidate.
This should return a collection of requirements that `candidate`
specifies as its dependencies.
"""
raise NotImplementedError
class AbstractResolver(object):
"""The thing that performs the actual resolution work."""
base_exception = Exception
def __init__(self, provider, reporter):
self.provider = provider
self.reporter = reporter
def resolve(self, requirements, **kwargs):
"""Take a collection of constraints, spit out the resolution result.
This returns a representation of the final resolution state, with one
guarenteed attribute ``mapping`` that contains resolved candidates as
values. The keys are their respective identifiers.
:param requirements: A collection of constraints.
:param kwargs: Additional keyword arguments that subclasses may accept.
:raises: ``self.base_exception`` or its subclass.
"""
raise NotImplementedError
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/resolvelib/providers.py
|
Python
|
mit
| 5,872 |
class BaseReporter(object):
"""Delegate class to provider progress reporting for the resolver."""
def starting(self):
"""Called before the resolution actually starts."""
def starting_round(self, index):
"""Called before each round of resolution starts.
The index is zero-based.
"""
def ending_round(self, index, state):
"""Called before each round of resolution ends.
This is NOT called if the resolution ends at this round. Use `ending`
if you want to report finalization. The index is zero-based.
"""
def ending(self, state):
"""Called before the resolution ends successfully."""
def adding_requirement(self, requirement, parent):
"""Called when adding a new requirement into the resolve criteria.
:param requirement: The additional requirement to be applied to filter
the available candidaites.
:param parent: The candidate that requires ``requirement`` as a
dependency, or None if ``requirement`` is one of the root
requirements passed in from ``Resolver.resolve()``.
"""
def resolving_conflicts(self, causes):
"""Called when starting to attempt requirement conflict resolution.
:param causes: The information on the collision that caused the backtracking.
"""
def backtracking(self, candidate):
"""Called when rejecting a candidate during backtracking."""
def pinning(self, candidate):
"""Called when adding a candidate to the potential solution."""
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/resolvelib/reporters.py
|
Python
|
mit
| 1,583 |
import collections
import operator
from .providers import AbstractResolver
from .structs import DirectedGraph, IteratorMapping, build_iter_view
RequirementInformation = collections.namedtuple(
"RequirementInformation", ["requirement", "parent"]
)
class ResolverException(Exception):
"""A base class for all exceptions raised by this module.
Exceptions derived by this class should all be handled in this module. Any
bubbling pass the resolver should be treated as a bug.
"""
class RequirementsConflicted(ResolverException):
def __init__(self, criterion):
super(RequirementsConflicted, self).__init__(criterion)
self.criterion = criterion
def __str__(self):
return "Requirements conflict: {}".format(
", ".join(repr(r) for r in self.criterion.iter_requirement()),
)
class InconsistentCandidate(ResolverException):
def __init__(self, candidate, criterion):
super(InconsistentCandidate, self).__init__(candidate, criterion)
self.candidate = candidate
self.criterion = criterion
def __str__(self):
return "Provided candidate {!r} does not satisfy {}".format(
self.candidate,
", ".join(repr(r) for r in self.criterion.iter_requirement()),
)
class Criterion(object):
"""Representation of possible resolution results of a package.
This holds three attributes:
* `information` is a collection of `RequirementInformation` pairs.
Each pair is a requirement contributing to this criterion, and the
candidate that provides the requirement.
* `incompatibilities` is a collection of all known not-to-work candidates
to exclude from consideration.
* `candidates` is a collection containing all possible candidates deducted
from the union of contributing requirements and known incompatibilities.
It should never be empty, except when the criterion is an attribute of a
raised `RequirementsConflicted` (in which case it is always empty).
.. note::
This class is intended to be externally immutable. **Do not** mutate
any of its attribute containers.
"""
def __init__(self, candidates, information, incompatibilities):
self.candidates = candidates
self.information = information
self.incompatibilities = incompatibilities
def __repr__(self):
requirements = ", ".join(
"({!r}, via={!r})".format(req, parent)
for req, parent in self.information
)
return "Criterion({})".format(requirements)
def iter_requirement(self):
return (i.requirement for i in self.information)
def iter_parent(self):
return (i.parent for i in self.information)
class ResolutionError(ResolverException):
pass
class ResolutionImpossible(ResolutionError):
def __init__(self, causes):
super(ResolutionImpossible, self).__init__(causes)
# causes is a list of RequirementInformation objects
self.causes = causes
class ResolutionTooDeep(ResolutionError):
def __init__(self, round_count):
super(ResolutionTooDeep, self).__init__(round_count)
self.round_count = round_count
# Resolution state in a round.
State = collections.namedtuple("State", "mapping criteria backtrack_causes")
class Resolution(object):
"""Stateful resolution object.
This is designed as a one-off object that holds information to kick start
the resolution process, and holds the results afterwards.
"""
def __init__(self, provider, reporter):
self._p = provider
self._r = reporter
self._states = []
@property
def state(self):
try:
return self._states[-1]
except IndexError:
raise AttributeError("state")
def _push_new_state(self):
"""Push a new state into history.
This new state will be used to hold resolution results of the next
coming round.
"""
base = self._states[-1]
state = State(
mapping=base.mapping.copy(),
criteria=base.criteria.copy(),
backtrack_causes=base.backtrack_causes[:],
)
self._states.append(state)
def _add_to_criteria(self, criteria, requirement, parent):
self._r.adding_requirement(requirement=requirement, parent=parent)
identifier = self._p.identify(requirement_or_candidate=requirement)
criterion = criteria.get(identifier)
if criterion:
incompatibilities = list(criterion.incompatibilities)
else:
incompatibilities = []
matches = self._p.find_matches(
identifier=identifier,
requirements=IteratorMapping(
criteria,
operator.methodcaller("iter_requirement"),
{identifier: [requirement]},
),
incompatibilities=IteratorMapping(
criteria,
operator.attrgetter("incompatibilities"),
{identifier: incompatibilities},
),
)
if criterion:
information = list(criterion.information)
information.append(RequirementInformation(requirement, parent))
else:
information = [RequirementInformation(requirement, parent)]
criterion = Criterion(
candidates=build_iter_view(matches),
information=information,
incompatibilities=incompatibilities,
)
if not criterion.candidates:
raise RequirementsConflicted(criterion)
criteria[identifier] = criterion
def _get_preference(self, name):
return self._p.get_preference(
identifier=name,
resolutions=self.state.mapping,
candidates=IteratorMapping(
self.state.criteria,
operator.attrgetter("candidates"),
),
information=IteratorMapping(
self.state.criteria,
operator.attrgetter("information"),
),
backtrack_causes=self.state.backtrack_causes,
)
def _is_current_pin_satisfying(self, name, criterion):
try:
current_pin = self.state.mapping[name]
except KeyError:
return False
return all(
self._p.is_satisfied_by(requirement=r, candidate=current_pin)
for r in criterion.iter_requirement()
)
def _get_updated_criteria(self, candidate):
criteria = self.state.criteria.copy()
for requirement in self._p.get_dependencies(candidate=candidate):
self._add_to_criteria(criteria, requirement, parent=candidate)
return criteria
def _attempt_to_pin_criterion(self, name):
criterion = self.state.criteria[name]
causes = []
for candidate in criterion.candidates:
try:
criteria = self._get_updated_criteria(candidate)
except RequirementsConflicted as e:
causes.append(e.criterion)
continue
# Check the newly-pinned candidate actually works. This should
# always pass under normal circumstances, but in the case of a
# faulty provider, we will raise an error to notify the implementer
# to fix find_matches() and/or is_satisfied_by().
satisfied = all(
self._p.is_satisfied_by(requirement=r, candidate=candidate)
for r in criterion.iter_requirement()
)
if not satisfied:
raise InconsistentCandidate(candidate, criterion)
self._r.pinning(candidate=candidate)
self.state.criteria.update(criteria)
# Put newly-pinned candidate at the end. This is essential because
# backtracking looks at this mapping to get the last pin.
self.state.mapping.pop(name, None)
self.state.mapping[name] = candidate
return []
# All candidates tried, nothing works. This criterion is a dead
# end, signal for backtracking.
return causes
def _backtrack(self):
"""Perform backtracking.
When we enter here, the stack is like this::
[ state Z ]
[ state Y ]
[ state X ]
.... earlier states are irrelevant.
1. No pins worked for Z, so it does not have a pin.
2. We want to reset state Y to unpinned, and pin another candidate.
3. State X holds what state Y was before the pin, but does not
have the incompatibility information gathered in state Y.
Each iteration of the loop will:
1. Discard Z.
2. Discard Y but remember its incompatibility information gathered
previously, and the failure we're dealing with right now.
3. Push a new state Y' based on X, and apply the incompatibility
information from Y to Y'.
4a. If this causes Y' to conflict, we need to backtrack again. Make Y'
the new Z and go back to step 2.
4b. If the incompatibilities apply cleanly, end backtracking.
"""
while len(self._states) >= 3:
# Remove the state that triggered backtracking.
del self._states[-1]
# Retrieve the last candidate pin and known incompatibilities.
broken_state = self._states.pop()
name, candidate = broken_state.mapping.popitem()
incompatibilities_from_broken = [
(k, list(v.incompatibilities))
for k, v in broken_state.criteria.items()
]
# Also mark the newly known incompatibility.
incompatibilities_from_broken.append((name, [candidate]))
self._r.backtracking(candidate=candidate)
# Create a new state from the last known-to-work one, and apply
# the previously gathered incompatibility information.
def _patch_criteria():
for k, incompatibilities in incompatibilities_from_broken:
if not incompatibilities:
continue
try:
criterion = self.state.criteria[k]
except KeyError:
continue
matches = self._p.find_matches(
identifier=k,
requirements=IteratorMapping(
self.state.criteria,
operator.methodcaller("iter_requirement"),
),
incompatibilities=IteratorMapping(
self.state.criteria,
operator.attrgetter("incompatibilities"),
{k: incompatibilities},
),
)
candidates = build_iter_view(matches)
if not candidates:
return False
incompatibilities.extend(criterion.incompatibilities)
self.state.criteria[k] = Criterion(
candidates=candidates,
information=list(criterion.information),
incompatibilities=incompatibilities,
)
return True
self._push_new_state()
success = _patch_criteria()
# It works! Let's work on this new state.
if success:
return True
# State does not work after applying known incompatibilities.
# Try the still previous state.
# No way to backtrack anymore.
return False
def resolve(self, requirements, max_rounds):
if self._states:
raise RuntimeError("already resolved")
self._r.starting()
# Initialize the root state.
self._states = [
State(
mapping=collections.OrderedDict(),
criteria={},
backtrack_causes=[],
)
]
for r in requirements:
try:
self._add_to_criteria(self.state.criteria, r, parent=None)
except RequirementsConflicted as e:
raise ResolutionImpossible(e.criterion.information)
# The root state is saved as a sentinel so the first ever pin can have
# something to backtrack to if it fails. The root state is basically
# pinning the virtual "root" package in the graph.
self._push_new_state()
for round_index in range(max_rounds):
self._r.starting_round(index=round_index)
unsatisfied_names = [
key
for key, criterion in self.state.criteria.items()
if not self._is_current_pin_satisfying(key, criterion)
]
# All criteria are accounted for. Nothing more to pin, we are done!
if not unsatisfied_names:
self._r.ending(state=self.state)
return self.state
# Choose the most preferred unpinned criterion to try.
name = min(unsatisfied_names, key=self._get_preference)
failure_causes = self._attempt_to_pin_criterion(name)
if failure_causes:
causes = [i for c in failure_causes for i in c.information]
# Backtrack if pinning fails. The backtrack process puts us in
# an unpinned state, so we can work on it in the next round.
self._r.resolving_conflicts(causes=causes)
success = self._backtrack()
self.state.backtrack_causes[:] = causes
# Dead ends everywhere. Give up.
if not success:
raise ResolutionImpossible(self.state.backtrack_causes)
else:
# Pinning was successful. Push a new state to do another pin.
self._push_new_state()
self._r.ending_round(index=round_index, state=self.state)
raise ResolutionTooDeep(max_rounds)
def _has_route_to_root(criteria, key, all_keys, connected):
if key in connected:
return True
if key not in criteria:
return False
for p in criteria[key].iter_parent():
try:
pkey = all_keys[id(p)]
except KeyError:
continue
if pkey in connected:
connected.add(key)
return True
if _has_route_to_root(criteria, pkey, all_keys, connected):
connected.add(key)
return True
return False
Result = collections.namedtuple("Result", "mapping graph criteria")
def _build_result(state):
mapping = state.mapping
all_keys = {id(v): k for k, v in mapping.items()}
all_keys[id(None)] = None
graph = DirectedGraph()
graph.add(None) # Sentinel as root dependencies' parent.
connected = {None}
for key, criterion in state.criteria.items():
if not _has_route_to_root(state.criteria, key, all_keys, connected):
continue
if key not in graph:
graph.add(key)
for p in criterion.iter_parent():
try:
pkey = all_keys[id(p)]
except KeyError:
continue
if pkey not in graph:
graph.add(pkey)
graph.connect(pkey, key)
return Result(
mapping={k: v for k, v in mapping.items() if k in connected},
graph=graph,
criteria=state.criteria,
)
class Resolver(AbstractResolver):
"""The thing that performs the actual resolution work."""
base_exception = ResolverException
def resolve(self, requirements, max_rounds=100):
"""Take a collection of constraints, spit out the resolution result.
The return value is a representation to the final resolution result. It
is a tuple subclass with three public members:
* `mapping`: A dict of resolved candidates. Each key is an identifier
of a requirement (as returned by the provider's `identify` method),
and the value is the resolved candidate.
* `graph`: A `DirectedGraph` instance representing the dependency tree.
The vertices are keys of `mapping`, and each edge represents *why*
a particular package is included. A special vertex `None` is
included to represent parents of user-supplied requirements.
* `criteria`: A dict of "criteria" that hold detailed information on
how edges in the graph are derived. Each key is an identifier of a
requirement, and the value is a `Criterion` instance.
The following exceptions may be raised if a resolution cannot be found:
* `ResolutionImpossible`: A resolution cannot be found for the given
combination of requirements. The `causes` attribute of the
exception is a list of (requirement, parent), giving the
requirements that could not be satisfied.
* `ResolutionTooDeep`: The dependency tree is too deeply nested and
the resolver gave up. This is usually caused by a circular
dependency, but you can try to resolve this by increasing the
`max_rounds` argument.
"""
resolution = Resolution(self.provider, self.reporter)
state = resolution.resolve(requirements, max_rounds=max_rounds)
return _build_result(state)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/resolvelib/resolvers.py
|
Python
|
mit
| 17,592 |
import itertools
from .compat import collections_abc
class DirectedGraph(object):
"""A graph structure with directed edges."""
def __init__(self):
self._vertices = set()
self._forwards = {} # <key> -> Set[<key>]
self._backwards = {} # <key> -> Set[<key>]
def __iter__(self):
return iter(self._vertices)
def __len__(self):
return len(self._vertices)
def __contains__(self, key):
return key in self._vertices
def copy(self):
"""Return a shallow copy of this graph."""
other = DirectedGraph()
other._vertices = set(self._vertices)
other._forwards = {k: set(v) for k, v in self._forwards.items()}
other._backwards = {k: set(v) for k, v in self._backwards.items()}
return other
def add(self, key):
"""Add a new vertex to the graph."""
if key in self._vertices:
raise ValueError("vertex exists")
self._vertices.add(key)
self._forwards[key] = set()
self._backwards[key] = set()
def remove(self, key):
"""Remove a vertex from the graph, disconnecting all edges from/to it."""
self._vertices.remove(key)
for f in self._forwards.pop(key):
self._backwards[f].remove(key)
for t in self._backwards.pop(key):
self._forwards[t].remove(key)
def connected(self, f, t):
return f in self._backwards[t] and t in self._forwards[f]
def connect(self, f, t):
"""Connect two existing vertices.
Nothing happens if the vertices are already connected.
"""
if t not in self._vertices:
raise KeyError(t)
self._forwards[f].add(t)
self._backwards[t].add(f)
def iter_edges(self):
for f, children in self._forwards.items():
for t in children:
yield f, t
def iter_children(self, key):
return iter(self._forwards[key])
def iter_parents(self, key):
return iter(self._backwards[key])
class IteratorMapping(collections_abc.Mapping):
def __init__(self, mapping, accessor, appends=None):
self._mapping = mapping
self._accessor = accessor
self._appends = appends or {}
def __repr__(self):
return "IteratorMapping({!r}, {!r}, {!r})".format(
self._mapping,
self._accessor,
self._appends,
)
def __bool__(self):
return bool(self._mapping or self._appends)
__nonzero__ = __bool__ # XXX: Python 2.
def __contains__(self, key):
return key in self._mapping or key in self._appends
def __getitem__(self, k):
try:
v = self._mapping[k]
except KeyError:
return iter(self._appends[k])
return itertools.chain(self._accessor(v), self._appends.get(k, ()))
def __iter__(self):
more = (k for k in self._appends if k not in self._mapping)
return itertools.chain(self._mapping, more)
def __len__(self):
more = sum(1 for k in self._appends if k not in self._mapping)
return len(self._mapping) + more
class _FactoryIterableView(object):
"""Wrap an iterator factory returned by `find_matches()`.
Calling `iter()` on this class would invoke the underlying iterator
factory, making it a "collection with ordering" that can be iterated
through multiple times, but lacks random access methods presented in
built-in Python sequence types.
"""
def __init__(self, factory):
self._factory = factory
def __repr__(self):
return "{}({})".format(type(self).__name__, list(self._factory()))
def __bool__(self):
try:
next(self._factory())
except StopIteration:
return False
return True
__nonzero__ = __bool__ # XXX: Python 2.
def __iter__(self):
return self._factory()
class _SequenceIterableView(object):
"""Wrap an iterable returned by find_matches().
This is essentially just a proxy to the underlying sequence that provides
the same interface as `_FactoryIterableView`.
"""
def __init__(self, sequence):
self._sequence = sequence
def __repr__(self):
return "{}({})".format(type(self).__name__, self._sequence)
def __bool__(self):
return bool(self._sequence)
__nonzero__ = __bool__ # XXX: Python 2.
def __iter__(self):
return iter(self._sequence)
def build_iter_view(matches):
"""Build an iterable view from the value returned by `find_matches()`."""
if callable(matches):
return _FactoryIterableView(matches)
if not isinstance(matches, collections_abc.Sequence):
matches = list(matches)
return _SequenceIterableView(matches)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/resolvelib/structs.py
|
Python
|
mit
| 4,794 |
"""Rich text and beautiful formatting in the terminal."""
import os
from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
from ._extension import load_ipython_extension # noqa: F401
__all__ = ["get_console", "reconfigure", "print", "inspect"]
if TYPE_CHECKING:
from .console import Console
# Global console used by alternative print
_console: Optional["Console"] = None
try:
_IMPORT_CWD = os.path.abspath(os.getcwd())
except FileNotFoundError:
# Can happen if the cwd has been deleted
_IMPORT_CWD = ""
def get_console() -> "Console":
"""Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
and hasn't been explicitly given one.
Returns:
Console: A console instance.
"""
global _console
if _console is None:
from .console import Console
_console = Console()
return _console
def reconfigure(*args: Any, **kwargs: Any) -> None:
"""Reconfigures the global console by replacing it with another.
Args:
console (Console): Replacement console instance.
"""
from pip._vendor.rich.console import Console
new_console = Console(*args, **kwargs)
_console = get_console()
_console.__dict__ = new_console.__dict__
def print(
*objects: Any,
sep: str = " ",
end: str = "\n",
file: Optional[IO[str]] = None,
flush: bool = False,
) -> None:
r"""Print object(s) supplied via positional arguments.
This function has an identical signature to the built-in print.
For more advanced features, see the :class:`~rich.console.Console` class.
Args:
sep (str, optional): Separator between printed objects. Defaults to " ".
end (str, optional): Character to write at end of output. Defaults to "\\n".
file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
"""
from .console import Console
write_console = get_console() if file is None else Console(file=file)
return write_console.print(*objects, sep=sep, end=end)
def print_json(
json: Optional[str] = None,
*,
data: Any = None,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = True,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
"""Pretty prints JSON. Output will be valid JSON.
Args:
json (str): A string containing JSON.
data (Any): If json is not supplied, then encode this data.
indent (int, optional): Number of spaces to indent. Defaults to 2.
highlight (bool, optional): Enable highlighting of output: Defaults to True.
skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
check_circular (bool, optional): Check for circular references. Defaults to True.
allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
default (Callable, optional): A callable that converts values that can not be encoded
in to something that can be JSON encoded. Defaults to None.
sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
"""
get_console().print_json(
json,
data=data,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
def inspect(
obj: Any,
*,
console: Optional["Console"] = None,
title: Optional[str] = None,
help: bool = False,
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = False,
value: bool = True,
) -> None:
"""Inspect any Python object.
* inspect(<OBJECT>) to see summarized info.
* inspect(<OBJECT>, methods=True) to see methods.
* inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.
* inspect(<OBJECT>, private=True) to see private attributes (single underscore).
* inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.
* inspect(<OBJECT>, all=True) to see all attributes.
Args:
obj (Any): An object to inspect.
title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
methods (bool, optional): Enable inspection of callables. Defaults to False.
docs (bool, optional): Also render doc strings. Defaults to True.
private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
sort (bool, optional): Sort attributes alphabetically. Defaults to True.
all (bool, optional): Show all attributes. Defaults to False.
value (bool, optional): Pretty print value. Defaults to True.
"""
_console = console or get_console()
from pip._vendor.rich._inspect import Inspect
# Special case for inspect(inspect)
is_inspect = obj is inspect
_inspect = Inspect(
obj,
title=title,
help=is_inspect or help,
methods=is_inspect or methods,
docs=is_inspect or docs,
private=private,
dunder=dunder,
sort=sort,
all=all,
value=value,
)
_console.print(_inspect)
if __name__ == "__main__": # pragma: no cover
print("Hello, **World**")
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/__init__.py
|
Python
|
mit
| 5,944 |
import colorsys
import io
from time import process_time
from pip._vendor.rich import box
from pip._vendor.rich.color import Color
from pip._vendor.rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult
from pip._vendor.rich.markdown import Markdown
from pip._vendor.rich.measure import Measurement
from pip._vendor.rich.pretty import Pretty
from pip._vendor.rich.segment import Segment
from pip._vendor.rich.style import Style
from pip._vendor.rich.syntax import Syntax
from pip._vendor.rich.table import Table
from pip._vendor.rich.text import Text
class ColorBox:
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
for y in range(0, 5):
for x in range(options.max_width):
h = x / options.max_width
l = 0.1 + ((y / 5) * 0.7)
r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment("▄", Style(color=color, bgcolor=bgcolor))
yield Segment.line()
def __rich_measure__(
self, console: "Console", options: ConsoleOptions
) -> Measurement:
return Measurement(1, options.max_width)
def make_test_card() -> Table:
"""Get a renderable that demonstrates a number of features."""
table = Table.grid(padding=1, pad_edge=True)
table.title = "Rich features"
table.add_column("Feature", no_wrap=True, justify="center", style="bold red")
table.add_column("Demonstration")
color_table = Table(
box=None,
expand=False,
show_header=False,
show_edge=False,
pad_edge=False,
)
color_table.add_row(
(
"✓ [bold green]4-bit color[/]\n"
"✓ [bold blue]8-bit color[/]\n"
"✓ [bold magenta]Truecolor (16.7 million)[/]\n"
"✓ [bold yellow]Dumb terminals[/]\n"
"✓ [bold cyan]Automatic color conversion"
),
ColorBox(),
)
table.add_row("Colors", color_table)
table.add_row(
"Styles",
"All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
)
lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
lorem_table = Table.grid(padding=1, collapse_padding=True)
lorem_table.pad_edge = False
lorem_table.add_row(
Text(lorem, justify="left", style="green"),
Text(lorem, justify="center", style="yellow"),
Text(lorem, justify="right", style="blue"),
Text(lorem, justify="full", style="red"),
)
table.add_row(
"Text",
Group(
Text.from_markup(
"""Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
),
lorem_table,
),
)
def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table:
table = Table(show_header=False, pad_edge=False, box=None, expand=True)
table.add_column("1", ratio=1)
table.add_column("2", ratio=1)
table.add_row(renderable1, renderable2)
return table
table.add_row(
"Asian\nlanguage\nsupport",
":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
)
markup_example = (
"[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
":+1: :apple: :ant: :bear: :baguette_bread: :bus: "
)
table.add_row("Markup", markup_example)
example_table = Table(
show_edge=False,
show_header=True,
expand=False,
row_styles=["none", "dim"],
box=box.SIMPLE,
)
example_table.add_column("[green]Date", style="green", no_wrap=True)
example_table.add_column("[blue]Title", style="blue")
example_table.add_column(
"[cyan]Production Budget",
style="cyan",
justify="right",
no_wrap=True,
)
example_table.add_column(
"[magenta]Box Office",
style="magenta",
justify="right",
no_wrap=True,
)
example_table.add_row(
"Dec 20, 2019",
"Star Wars: The Rise of Skywalker",
"$275,000,000",
"$375,126,118",
)
example_table.add_row(
"May 25, 2018",
"[b]Solo[/]: A Star Wars Story",
"$275,000,000",
"$393,151,347",
)
example_table.add_row(
"Dec 15, 2017",
"Star Wars Ep. VIII: The Last Jedi",
"$262,000,000",
"[bold]$1,332,539,889[/bold]",
)
example_table.add_row(
"May 19, 1999",
"Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
"$115,000,000",
"$1,027,044,677",
)
table.add_row("Tables", example_table)
code = '''\
def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value'''
pretty_data = {
"foo": [
3.1427,
(
"Paul Atreides",
"Vladimir Harkonnen",
"Thufir Hawat",
),
],
"atomic": (False, True, None),
}
table.add_row(
"Syntax\nhighlighting\n&\npretty\nprinting",
comparison(
Syntax(code, "python3", line_numbers=True, indent_guides=True),
Pretty(pretty_data, indent_guides=True),
),
)
markdown_example = """\
# Markdown
Supports much of the *markdown* __syntax__!
- Headers
- Basic formatting: **bold**, *italic*, `code`
- Block quotes
- Lists, and more...
"""
table.add_row(
"Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example))
)
table.add_row(
"+more!",
"""Progress bars, columns, styled logging handler, tracebacks, etc...""",
)
return table
if __name__ == "__main__": # pragma: no cover
console = Console(
file=io.StringIO(),
force_terminal=True,
)
test_card = make_test_card()
# Print once to warm cache
start = process_time()
console.print(test_card)
pre_cache_taken = round((process_time() - start) * 1000.0, 1)
console.file = io.StringIO()
start = process_time()
console.print(test_card)
taken = round((process_time() - start) * 1000.0, 1)
c = Console(record=True)
c.print(test_card)
# c.save_svg(
# path="/Users/darrenburns/Library/Application Support/JetBrains/PyCharm2021.3/scratches/svg_export.svg",
# title="Rich can export to SVG",
# )
print(f"rendered in {pre_cache_taken}ms (cold cache)")
print(f"rendered in {taken}ms (warm cache)")
from pip._vendor.rich.panel import Panel
console = Console()
sponsor_message = Table.grid(padding=1)
sponsor_message.add_column(style="green", justify="right")
sponsor_message.add_column(no_wrap=True)
sponsor_message.add_row(
"Textualize",
"[u blue link=https://github.com/textualize]https://github.com/textualize",
)
sponsor_message.add_row(
"Buy devs a :coffee:",
"[u blue link=https://ko-fi.com/textualize]https://ko-fi.com/textualize",
)
sponsor_message.add_row(
"Twitter",
"[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan",
)
intro_message = Text.from_markup(
"""\
We hope you enjoy using Rich!
Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/]
- Will McGugan"""
)
message = Table.grid(padding=2)
message.add_column()
message.add_column(no_wrap=True)
message.add_row(intro_message, sponsor_message)
console.print(
Panel.fit(
message,
box=box.ROUNDED,
padding=(1, 2),
title="[b red]Thanks for trying out Rich!",
border_style="bright_blue",
),
justify="center",
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/__main__.py
|
Python
|
mit
| 8,808 |
# Auto generated by make_terminal_widths.py
CELL_WIDTHS = [
(0, 0, 0),
(1, 31, -1),
(127, 159, -1),
(768, 879, 0),
(1155, 1161, 0),
(1425, 1469, 0),
(1471, 1471, 0),
(1473, 1474, 0),
(1476, 1477, 0),
(1479, 1479, 0),
(1552, 1562, 0),
(1611, 1631, 0),
(1648, 1648, 0),
(1750, 1756, 0),
(1759, 1764, 0),
(1767, 1768, 0),
(1770, 1773, 0),
(1809, 1809, 0),
(1840, 1866, 0),
(1958, 1968, 0),
(2027, 2035, 0),
(2045, 2045, 0),
(2070, 2073, 0),
(2075, 2083, 0),
(2085, 2087, 0),
(2089, 2093, 0),
(2137, 2139, 0),
(2259, 2273, 0),
(2275, 2306, 0),
(2362, 2362, 0),
(2364, 2364, 0),
(2369, 2376, 0),
(2381, 2381, 0),
(2385, 2391, 0),
(2402, 2403, 0),
(2433, 2433, 0),
(2492, 2492, 0),
(2497, 2500, 0),
(2509, 2509, 0),
(2530, 2531, 0),
(2558, 2558, 0),
(2561, 2562, 0),
(2620, 2620, 0),
(2625, 2626, 0),
(2631, 2632, 0),
(2635, 2637, 0),
(2641, 2641, 0),
(2672, 2673, 0),
(2677, 2677, 0),
(2689, 2690, 0),
(2748, 2748, 0),
(2753, 2757, 0),
(2759, 2760, 0),
(2765, 2765, 0),
(2786, 2787, 0),
(2810, 2815, 0),
(2817, 2817, 0),
(2876, 2876, 0),
(2879, 2879, 0),
(2881, 2884, 0),
(2893, 2893, 0),
(2901, 2902, 0),
(2914, 2915, 0),
(2946, 2946, 0),
(3008, 3008, 0),
(3021, 3021, 0),
(3072, 3072, 0),
(3076, 3076, 0),
(3134, 3136, 0),
(3142, 3144, 0),
(3146, 3149, 0),
(3157, 3158, 0),
(3170, 3171, 0),
(3201, 3201, 0),
(3260, 3260, 0),
(3263, 3263, 0),
(3270, 3270, 0),
(3276, 3277, 0),
(3298, 3299, 0),
(3328, 3329, 0),
(3387, 3388, 0),
(3393, 3396, 0),
(3405, 3405, 0),
(3426, 3427, 0),
(3457, 3457, 0),
(3530, 3530, 0),
(3538, 3540, 0),
(3542, 3542, 0),
(3633, 3633, 0),
(3636, 3642, 0),
(3655, 3662, 0),
(3761, 3761, 0),
(3764, 3772, 0),
(3784, 3789, 0),
(3864, 3865, 0),
(3893, 3893, 0),
(3895, 3895, 0),
(3897, 3897, 0),
(3953, 3966, 0),
(3968, 3972, 0),
(3974, 3975, 0),
(3981, 3991, 0),
(3993, 4028, 0),
(4038, 4038, 0),
(4141, 4144, 0),
(4146, 4151, 0),
(4153, 4154, 0),
(4157, 4158, 0),
(4184, 4185, 0),
(4190, 4192, 0),
(4209, 4212, 0),
(4226, 4226, 0),
(4229, 4230, 0),
(4237, 4237, 0),
(4253, 4253, 0),
(4352, 4447, 2),
(4957, 4959, 0),
(5906, 5908, 0),
(5938, 5940, 0),
(5970, 5971, 0),
(6002, 6003, 0),
(6068, 6069, 0),
(6071, 6077, 0),
(6086, 6086, 0),
(6089, 6099, 0),
(6109, 6109, 0),
(6155, 6157, 0),
(6277, 6278, 0),
(6313, 6313, 0),
(6432, 6434, 0),
(6439, 6440, 0),
(6450, 6450, 0),
(6457, 6459, 0),
(6679, 6680, 0),
(6683, 6683, 0),
(6742, 6742, 0),
(6744, 6750, 0),
(6752, 6752, 0),
(6754, 6754, 0),
(6757, 6764, 0),
(6771, 6780, 0),
(6783, 6783, 0),
(6832, 6848, 0),
(6912, 6915, 0),
(6964, 6964, 0),
(6966, 6970, 0),
(6972, 6972, 0),
(6978, 6978, 0),
(7019, 7027, 0),
(7040, 7041, 0),
(7074, 7077, 0),
(7080, 7081, 0),
(7083, 7085, 0),
(7142, 7142, 0),
(7144, 7145, 0),
(7149, 7149, 0),
(7151, 7153, 0),
(7212, 7219, 0),
(7222, 7223, 0),
(7376, 7378, 0),
(7380, 7392, 0),
(7394, 7400, 0),
(7405, 7405, 0),
(7412, 7412, 0),
(7416, 7417, 0),
(7616, 7673, 0),
(7675, 7679, 0),
(8203, 8207, 0),
(8232, 8238, 0),
(8288, 8291, 0),
(8400, 8432, 0),
(8986, 8987, 2),
(9001, 9002, 2),
(9193, 9196, 2),
(9200, 9200, 2),
(9203, 9203, 2),
(9725, 9726, 2),
(9748, 9749, 2),
(9800, 9811, 2),
(9855, 9855, 2),
(9875, 9875, 2),
(9889, 9889, 2),
(9898, 9899, 2),
(9917, 9918, 2),
(9924, 9925, 2),
(9934, 9934, 2),
(9940, 9940, 2),
(9962, 9962, 2),
(9970, 9971, 2),
(9973, 9973, 2),
(9978, 9978, 2),
(9981, 9981, 2),
(9989, 9989, 2),
(9994, 9995, 2),
(10024, 10024, 2),
(10060, 10060, 2),
(10062, 10062, 2),
(10067, 10069, 2),
(10071, 10071, 2),
(10133, 10135, 2),
(10160, 10160, 2),
(10175, 10175, 2),
(11035, 11036, 2),
(11088, 11088, 2),
(11093, 11093, 2),
(11503, 11505, 0),
(11647, 11647, 0),
(11744, 11775, 0),
(11904, 11929, 2),
(11931, 12019, 2),
(12032, 12245, 2),
(12272, 12283, 2),
(12288, 12329, 2),
(12330, 12333, 0),
(12334, 12350, 2),
(12353, 12438, 2),
(12441, 12442, 0),
(12443, 12543, 2),
(12549, 12591, 2),
(12593, 12686, 2),
(12688, 12771, 2),
(12784, 12830, 2),
(12832, 12871, 2),
(12880, 19903, 2),
(19968, 42124, 2),
(42128, 42182, 2),
(42607, 42610, 0),
(42612, 42621, 0),
(42654, 42655, 0),
(42736, 42737, 0),
(43010, 43010, 0),
(43014, 43014, 0),
(43019, 43019, 0),
(43045, 43046, 0),
(43052, 43052, 0),
(43204, 43205, 0),
(43232, 43249, 0),
(43263, 43263, 0),
(43302, 43309, 0),
(43335, 43345, 0),
(43360, 43388, 2),
(43392, 43394, 0),
(43443, 43443, 0),
(43446, 43449, 0),
(43452, 43453, 0),
(43493, 43493, 0),
(43561, 43566, 0),
(43569, 43570, 0),
(43573, 43574, 0),
(43587, 43587, 0),
(43596, 43596, 0),
(43644, 43644, 0),
(43696, 43696, 0),
(43698, 43700, 0),
(43703, 43704, 0),
(43710, 43711, 0),
(43713, 43713, 0),
(43756, 43757, 0),
(43766, 43766, 0),
(44005, 44005, 0),
(44008, 44008, 0),
(44013, 44013, 0),
(44032, 55203, 2),
(63744, 64255, 2),
(64286, 64286, 0),
(65024, 65039, 0),
(65040, 65049, 2),
(65056, 65071, 0),
(65072, 65106, 2),
(65108, 65126, 2),
(65128, 65131, 2),
(65281, 65376, 2),
(65504, 65510, 2),
(66045, 66045, 0),
(66272, 66272, 0),
(66422, 66426, 0),
(68097, 68099, 0),
(68101, 68102, 0),
(68108, 68111, 0),
(68152, 68154, 0),
(68159, 68159, 0),
(68325, 68326, 0),
(68900, 68903, 0),
(69291, 69292, 0),
(69446, 69456, 0),
(69633, 69633, 0),
(69688, 69702, 0),
(69759, 69761, 0),
(69811, 69814, 0),
(69817, 69818, 0),
(69888, 69890, 0),
(69927, 69931, 0),
(69933, 69940, 0),
(70003, 70003, 0),
(70016, 70017, 0),
(70070, 70078, 0),
(70089, 70092, 0),
(70095, 70095, 0),
(70191, 70193, 0),
(70196, 70196, 0),
(70198, 70199, 0),
(70206, 70206, 0),
(70367, 70367, 0),
(70371, 70378, 0),
(70400, 70401, 0),
(70459, 70460, 0),
(70464, 70464, 0),
(70502, 70508, 0),
(70512, 70516, 0),
(70712, 70719, 0),
(70722, 70724, 0),
(70726, 70726, 0),
(70750, 70750, 0),
(70835, 70840, 0),
(70842, 70842, 0),
(70847, 70848, 0),
(70850, 70851, 0),
(71090, 71093, 0),
(71100, 71101, 0),
(71103, 71104, 0),
(71132, 71133, 0),
(71219, 71226, 0),
(71229, 71229, 0),
(71231, 71232, 0),
(71339, 71339, 0),
(71341, 71341, 0),
(71344, 71349, 0),
(71351, 71351, 0),
(71453, 71455, 0),
(71458, 71461, 0),
(71463, 71467, 0),
(71727, 71735, 0),
(71737, 71738, 0),
(71995, 71996, 0),
(71998, 71998, 0),
(72003, 72003, 0),
(72148, 72151, 0),
(72154, 72155, 0),
(72160, 72160, 0),
(72193, 72202, 0),
(72243, 72248, 0),
(72251, 72254, 0),
(72263, 72263, 0),
(72273, 72278, 0),
(72281, 72283, 0),
(72330, 72342, 0),
(72344, 72345, 0),
(72752, 72758, 0),
(72760, 72765, 0),
(72767, 72767, 0),
(72850, 72871, 0),
(72874, 72880, 0),
(72882, 72883, 0),
(72885, 72886, 0),
(73009, 73014, 0),
(73018, 73018, 0),
(73020, 73021, 0),
(73023, 73029, 0),
(73031, 73031, 0),
(73104, 73105, 0),
(73109, 73109, 0),
(73111, 73111, 0),
(73459, 73460, 0),
(92912, 92916, 0),
(92976, 92982, 0),
(94031, 94031, 0),
(94095, 94098, 0),
(94176, 94179, 2),
(94180, 94180, 0),
(94192, 94193, 2),
(94208, 100343, 2),
(100352, 101589, 2),
(101632, 101640, 2),
(110592, 110878, 2),
(110928, 110930, 2),
(110948, 110951, 2),
(110960, 111355, 2),
(113821, 113822, 0),
(119143, 119145, 0),
(119163, 119170, 0),
(119173, 119179, 0),
(119210, 119213, 0),
(119362, 119364, 0),
(121344, 121398, 0),
(121403, 121452, 0),
(121461, 121461, 0),
(121476, 121476, 0),
(121499, 121503, 0),
(121505, 121519, 0),
(122880, 122886, 0),
(122888, 122904, 0),
(122907, 122913, 0),
(122915, 122916, 0),
(122918, 122922, 0),
(123184, 123190, 0),
(123628, 123631, 0),
(125136, 125142, 0),
(125252, 125258, 0),
(126980, 126980, 2),
(127183, 127183, 2),
(127374, 127374, 2),
(127377, 127386, 2),
(127488, 127490, 2),
(127504, 127547, 2),
(127552, 127560, 2),
(127568, 127569, 2),
(127584, 127589, 2),
(127744, 127776, 2),
(127789, 127797, 2),
(127799, 127868, 2),
(127870, 127891, 2),
(127904, 127946, 2),
(127951, 127955, 2),
(127968, 127984, 2),
(127988, 127988, 2),
(127992, 128062, 2),
(128064, 128064, 2),
(128066, 128252, 2),
(128255, 128317, 2),
(128331, 128334, 2),
(128336, 128359, 2),
(128378, 128378, 2),
(128405, 128406, 2),
(128420, 128420, 2),
(128507, 128591, 2),
(128640, 128709, 2),
(128716, 128716, 2),
(128720, 128722, 2),
(128725, 128727, 2),
(128747, 128748, 2),
(128756, 128764, 2),
(128992, 129003, 2),
(129292, 129338, 2),
(129340, 129349, 2),
(129351, 129400, 2),
(129402, 129483, 2),
(129485, 129535, 2),
(129648, 129652, 2),
(129656, 129658, 2),
(129664, 129670, 2),
(129680, 129704, 2),
(129712, 129718, 2),
(129728, 129730, 2),
(129744, 129750, 2),
(131072, 196605, 2),
(196608, 262141, 2),
(917760, 917999, 0),
]
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_cell_widths.py
|
Python
|
mit
| 10,096 |
EMOJI = {
"1st_place_medal": "🥇",
"2nd_place_medal": "🥈",
"3rd_place_medal": "🥉",
"ab_button_(blood_type)": "🆎",
"atm_sign": "🏧",
"a_button_(blood_type)": "🅰",
"afghanistan": "🇦🇫",
"albania": "🇦🇱",
"algeria": "🇩🇿",
"american_samoa": "🇦🇸",
"andorra": "🇦🇩",
"angola": "🇦🇴",
"anguilla": "🇦🇮",
"antarctica": "🇦🇶",
"antigua_&_barbuda": "🇦🇬",
"aquarius": "♒",
"argentina": "🇦🇷",
"aries": "♈",
"armenia": "🇦🇲",
"aruba": "🇦🇼",
"ascension_island": "🇦🇨",
"australia": "🇦🇺",
"austria": "🇦🇹",
"azerbaijan": "🇦🇿",
"back_arrow": "🔙",
"b_button_(blood_type)": "🅱",
"bahamas": "🇧🇸",
"bahrain": "🇧🇭",
"bangladesh": "🇧🇩",
"barbados": "🇧🇧",
"belarus": "🇧🇾",
"belgium": "🇧🇪",
"belize": "🇧🇿",
"benin": "🇧🇯",
"bermuda": "🇧🇲",
"bhutan": "🇧🇹",
"bolivia": "🇧🇴",
"bosnia_&_herzegovina": "🇧🇦",
"botswana": "🇧🇼",
"bouvet_island": "🇧🇻",
"brazil": "🇧🇷",
"british_indian_ocean_territory": "🇮🇴",
"british_virgin_islands": "🇻🇬",
"brunei": "🇧🇳",
"bulgaria": "🇧🇬",
"burkina_faso": "🇧🇫",
"burundi": "🇧🇮",
"cl_button": "🆑",
"cool_button": "🆒",
"cambodia": "🇰🇭",
"cameroon": "🇨🇲",
"canada": "🇨🇦",
"canary_islands": "🇮🇨",
"cancer": "♋",
"cape_verde": "🇨🇻",
"capricorn": "♑",
"caribbean_netherlands": "🇧🇶",
"cayman_islands": "🇰🇾",
"central_african_republic": "🇨🇫",
"ceuta_&_melilla": "🇪🇦",
"chad": "🇹🇩",
"chile": "🇨🇱",
"china": "🇨🇳",
"christmas_island": "🇨🇽",
"christmas_tree": "🎄",
"clipperton_island": "🇨🇵",
"cocos_(keeling)_islands": "🇨🇨",
"colombia": "🇨🇴",
"comoros": "🇰🇲",
"congo_-_brazzaville": "🇨🇬",
"congo_-_kinshasa": "🇨🇩",
"cook_islands": "🇨🇰",
"costa_rica": "🇨🇷",
"croatia": "🇭🇷",
"cuba": "🇨🇺",
"curaçao": "🇨🇼",
"cyprus": "🇨🇾",
"czechia": "🇨🇿",
"côte_d’ivoire": "🇨🇮",
"denmark": "🇩🇰",
"diego_garcia": "🇩🇬",
"djibouti": "🇩🇯",
"dominica": "🇩🇲",
"dominican_republic": "🇩🇴",
"end_arrow": "🔚",
"ecuador": "🇪🇨",
"egypt": "🇪🇬",
"el_salvador": "🇸🇻",
"england": "🏴\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f",
"equatorial_guinea": "🇬🇶",
"eritrea": "🇪🇷",
"estonia": "🇪🇪",
"ethiopia": "🇪🇹",
"european_union": "🇪🇺",
"free_button": "🆓",
"falkland_islands": "🇫🇰",
"faroe_islands": "🇫🇴",
"fiji": "🇫🇯",
"finland": "🇫🇮",
"france": "🇫🇷",
"french_guiana": "🇬🇫",
"french_polynesia": "🇵🇫",
"french_southern_territories": "🇹🇫",
"gabon": "🇬🇦",
"gambia": "🇬🇲",
"gemini": "♊",
"georgia": "🇬🇪",
"germany": "🇩🇪",
"ghana": "🇬🇭",
"gibraltar": "🇬🇮",
"greece": "🇬🇷",
"greenland": "🇬🇱",
"grenada": "🇬🇩",
"guadeloupe": "🇬🇵",
"guam": "🇬🇺",
"guatemala": "🇬🇹",
"guernsey": "🇬🇬",
"guinea": "🇬🇳",
"guinea-bissau": "🇬🇼",
"guyana": "🇬🇾",
"haiti": "🇭🇹",
"heard_&_mcdonald_islands": "🇭🇲",
"honduras": "🇭🇳",
"hong_kong_sar_china": "🇭🇰",
"hungary": "🇭🇺",
"id_button": "🆔",
"iceland": "🇮🇸",
"india": "🇮🇳",
"indonesia": "🇮🇩",
"iran": "🇮🇷",
"iraq": "🇮🇶",
"ireland": "🇮🇪",
"isle_of_man": "🇮🇲",
"israel": "🇮🇱",
"italy": "🇮🇹",
"jamaica": "🇯🇲",
"japan": "🗾",
"japanese_acceptable_button": "🉑",
"japanese_application_button": "🈸",
"japanese_bargain_button": "🉐",
"japanese_castle": "🏯",
"japanese_congratulations_button": "㊗",
"japanese_discount_button": "🈹",
"japanese_dolls": "🎎",
"japanese_free_of_charge_button": "🈚",
"japanese_here_button": "🈁",
"japanese_monthly_amount_button": "🈷",
"japanese_no_vacancy_button": "🈵",
"japanese_not_free_of_charge_button": "🈶",
"japanese_open_for_business_button": "🈺",
"japanese_passing_grade_button": "🈴",
"japanese_post_office": "🏣",
"japanese_prohibited_button": "🈲",
"japanese_reserved_button": "🈯",
"japanese_secret_button": "㊙",
"japanese_service_charge_button": "🈂",
"japanese_symbol_for_beginner": "🔰",
"japanese_vacancy_button": "🈳",
"jersey": "🇯🇪",
"jordan": "🇯🇴",
"kazakhstan": "🇰🇿",
"kenya": "🇰🇪",
"kiribati": "🇰🇮",
"kosovo": "🇽🇰",
"kuwait": "🇰🇼",
"kyrgyzstan": "🇰🇬",
"laos": "🇱🇦",
"latvia": "🇱🇻",
"lebanon": "🇱🇧",
"leo": "♌",
"lesotho": "🇱🇸",
"liberia": "🇱🇷",
"libra": "♎",
"libya": "🇱🇾",
"liechtenstein": "🇱🇮",
"lithuania": "🇱🇹",
"luxembourg": "🇱🇺",
"macau_sar_china": "🇲🇴",
"macedonia": "🇲🇰",
"madagascar": "🇲🇬",
"malawi": "🇲🇼",
"malaysia": "🇲🇾",
"maldives": "🇲🇻",
"mali": "🇲🇱",
"malta": "🇲🇹",
"marshall_islands": "🇲🇭",
"martinique": "🇲🇶",
"mauritania": "🇲🇷",
"mauritius": "🇲🇺",
"mayotte": "🇾🇹",
"mexico": "🇲🇽",
"micronesia": "🇫🇲",
"moldova": "🇲🇩",
"monaco": "🇲🇨",
"mongolia": "🇲🇳",
"montenegro": "🇲🇪",
"montserrat": "🇲🇸",
"morocco": "🇲🇦",
"mozambique": "🇲🇿",
"mrs._claus": "🤶",
"mrs._claus_dark_skin_tone": "🤶🏿",
"mrs._claus_light_skin_tone": "🤶🏻",
"mrs._claus_medium-dark_skin_tone": "🤶🏾",
"mrs._claus_medium-light_skin_tone": "🤶🏼",
"mrs._claus_medium_skin_tone": "🤶🏽",
"myanmar_(burma)": "🇲🇲",
"new_button": "🆕",
"ng_button": "🆖",
"namibia": "🇳🇦",
"nauru": "🇳🇷",
"nepal": "🇳🇵",
"netherlands": "🇳🇱",
"new_caledonia": "🇳🇨",
"new_zealand": "🇳🇿",
"nicaragua": "🇳🇮",
"niger": "🇳🇪",
"nigeria": "🇳🇬",
"niue": "🇳🇺",
"norfolk_island": "🇳🇫",
"north_korea": "🇰🇵",
"northern_mariana_islands": "🇲🇵",
"norway": "🇳🇴",
"ok_button": "🆗",
"ok_hand": "👌",
"ok_hand_dark_skin_tone": "👌🏿",
"ok_hand_light_skin_tone": "👌🏻",
"ok_hand_medium-dark_skin_tone": "👌🏾",
"ok_hand_medium-light_skin_tone": "👌🏼",
"ok_hand_medium_skin_tone": "👌🏽",
"on!_arrow": "🔛",
"o_button_(blood_type)": "🅾",
"oman": "🇴🇲",
"ophiuchus": "⛎",
"p_button": "🅿",
"pakistan": "🇵🇰",
"palau": "🇵🇼",
"palestinian_territories": "🇵🇸",
"panama": "🇵🇦",
"papua_new_guinea": "🇵🇬",
"paraguay": "🇵🇾",
"peru": "🇵🇪",
"philippines": "🇵🇭",
"pisces": "♓",
"pitcairn_islands": "🇵🇳",
"poland": "🇵🇱",
"portugal": "🇵🇹",
"puerto_rico": "🇵🇷",
"qatar": "🇶🇦",
"romania": "🇷🇴",
"russia": "🇷🇺",
"rwanda": "🇷🇼",
"réunion": "🇷🇪",
"soon_arrow": "🔜",
"sos_button": "🆘",
"sagittarius": "♐",
"samoa": "🇼🇸",
"san_marino": "🇸🇲",
"santa_claus": "🎅",
"santa_claus_dark_skin_tone": "🎅🏿",
"santa_claus_light_skin_tone": "🎅🏻",
"santa_claus_medium-dark_skin_tone": "🎅🏾",
"santa_claus_medium-light_skin_tone": "🎅🏼",
"santa_claus_medium_skin_tone": "🎅🏽",
"saudi_arabia": "🇸🇦",
"scorpio": "♏",
"scotland": "🏴\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f",
"senegal": "🇸🇳",
"serbia": "🇷🇸",
"seychelles": "🇸🇨",
"sierra_leone": "🇸🇱",
"singapore": "🇸🇬",
"sint_maarten": "🇸🇽",
"slovakia": "🇸🇰",
"slovenia": "🇸🇮",
"solomon_islands": "🇸🇧",
"somalia": "🇸🇴",
"south_africa": "🇿🇦",
"south_georgia_&_south_sandwich_islands": "🇬🇸",
"south_korea": "🇰🇷",
"south_sudan": "🇸🇸",
"spain": "🇪🇸",
"sri_lanka": "🇱🇰",
"st._barthélemy": "🇧🇱",
"st._helena": "🇸🇭",
"st._kitts_&_nevis": "🇰🇳",
"st._lucia": "🇱🇨",
"st._martin": "🇲🇫",
"st._pierre_&_miquelon": "🇵🇲",
"st._vincent_&_grenadines": "🇻🇨",
"statue_of_liberty": "🗽",
"sudan": "🇸🇩",
"suriname": "🇸🇷",
"svalbard_&_jan_mayen": "🇸🇯",
"swaziland": "🇸🇿",
"sweden": "🇸🇪",
"switzerland": "🇨🇭",
"syria": "🇸🇾",
"são_tomé_&_príncipe": "🇸🇹",
"t-rex": "🦖",
"top_arrow": "🔝",
"taiwan": "🇹🇼",
"tajikistan": "🇹🇯",
"tanzania": "🇹🇿",
"taurus": "♉",
"thailand": "🇹🇭",
"timor-leste": "🇹🇱",
"togo": "🇹🇬",
"tokelau": "🇹🇰",
"tokyo_tower": "🗼",
"tonga": "🇹🇴",
"trinidad_&_tobago": "🇹🇹",
"tristan_da_cunha": "🇹🇦",
"tunisia": "🇹🇳",
"turkey": "🦃",
"turkmenistan": "🇹🇲",
"turks_&_caicos_islands": "🇹🇨",
"tuvalu": "🇹🇻",
"u.s._outlying_islands": "🇺🇲",
"u.s._virgin_islands": "🇻🇮",
"up!_button": "🆙",
"uganda": "🇺🇬",
"ukraine": "🇺🇦",
"united_arab_emirates": "🇦🇪",
"united_kingdom": "🇬🇧",
"united_nations": "🇺🇳",
"united_states": "🇺🇸",
"uruguay": "🇺🇾",
"uzbekistan": "🇺🇿",
"vs_button": "🆚",
"vanuatu": "🇻🇺",
"vatican_city": "🇻🇦",
"venezuela": "🇻🇪",
"vietnam": "🇻🇳",
"virgo": "♍",
"wales": "🏴\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f",
"wallis_&_futuna": "🇼🇫",
"western_sahara": "🇪🇭",
"yemen": "🇾🇪",
"zambia": "🇿🇲",
"zimbabwe": "🇿🇼",
"abacus": "🧮",
"adhesive_bandage": "🩹",
"admission_tickets": "🎟",
"adult": "🧑",
"adult_dark_skin_tone": "🧑🏿",
"adult_light_skin_tone": "🧑🏻",
"adult_medium-dark_skin_tone": "🧑🏾",
"adult_medium-light_skin_tone": "🧑🏼",
"adult_medium_skin_tone": "🧑🏽",
"aerial_tramway": "🚡",
"airplane": "✈",
"airplane_arrival": "🛬",
"airplane_departure": "🛫",
"alarm_clock": "⏰",
"alembic": "⚗",
"alien": "👽",
"alien_monster": "👾",
"ambulance": "🚑",
"american_football": "🏈",
"amphora": "🏺",
"anchor": "⚓",
"anger_symbol": "💢",
"angry_face": "😠",
"angry_face_with_horns": "👿",
"anguished_face": "😧",
"ant": "🐜",
"antenna_bars": "📶",
"anxious_face_with_sweat": "😰",
"articulated_lorry": "🚛",
"artist_palette": "🎨",
"astonished_face": "😲",
"atom_symbol": "⚛",
"auto_rickshaw": "🛺",
"automobile": "🚗",
"avocado": "🥑",
"axe": "🪓",
"baby": "👶",
"baby_angel": "👼",
"baby_angel_dark_skin_tone": "👼🏿",
"baby_angel_light_skin_tone": "👼🏻",
"baby_angel_medium-dark_skin_tone": "👼🏾",
"baby_angel_medium-light_skin_tone": "👼🏼",
"baby_angel_medium_skin_tone": "👼🏽",
"baby_bottle": "🍼",
"baby_chick": "🐤",
"baby_dark_skin_tone": "👶🏿",
"baby_light_skin_tone": "👶🏻",
"baby_medium-dark_skin_tone": "👶🏾",
"baby_medium-light_skin_tone": "👶🏼",
"baby_medium_skin_tone": "👶🏽",
"baby_symbol": "🚼",
"backhand_index_pointing_down": "👇",
"backhand_index_pointing_down_dark_skin_tone": "👇🏿",
"backhand_index_pointing_down_light_skin_tone": "👇🏻",
"backhand_index_pointing_down_medium-dark_skin_tone": "👇🏾",
"backhand_index_pointing_down_medium-light_skin_tone": "👇🏼",
"backhand_index_pointing_down_medium_skin_tone": "👇🏽",
"backhand_index_pointing_left": "👈",
"backhand_index_pointing_left_dark_skin_tone": "👈🏿",
"backhand_index_pointing_left_light_skin_tone": "👈🏻",
"backhand_index_pointing_left_medium-dark_skin_tone": "👈🏾",
"backhand_index_pointing_left_medium-light_skin_tone": "👈🏼",
"backhand_index_pointing_left_medium_skin_tone": "👈🏽",
"backhand_index_pointing_right": "👉",
"backhand_index_pointing_right_dark_skin_tone": "👉🏿",
"backhand_index_pointing_right_light_skin_tone": "👉🏻",
"backhand_index_pointing_right_medium-dark_skin_tone": "👉🏾",
"backhand_index_pointing_right_medium-light_skin_tone": "👉🏼",
"backhand_index_pointing_right_medium_skin_tone": "👉🏽",
"backhand_index_pointing_up": "👆",
"backhand_index_pointing_up_dark_skin_tone": "👆🏿",
"backhand_index_pointing_up_light_skin_tone": "👆🏻",
"backhand_index_pointing_up_medium-dark_skin_tone": "👆🏾",
"backhand_index_pointing_up_medium-light_skin_tone": "👆🏼",
"backhand_index_pointing_up_medium_skin_tone": "👆🏽",
"bacon": "🥓",
"badger": "🦡",
"badminton": "🏸",
"bagel": "🥯",
"baggage_claim": "🛄",
"baguette_bread": "🥖",
"balance_scale": "⚖",
"bald": "🦲",
"bald_man": "👨\u200d🦲",
"bald_woman": "👩\u200d🦲",
"ballet_shoes": "🩰",
"balloon": "🎈",
"ballot_box_with_ballot": "🗳",
"ballot_box_with_check": "☑",
"banana": "🍌",
"banjo": "🪕",
"bank": "🏦",
"bar_chart": "📊",
"barber_pole": "💈",
"baseball": "⚾",
"basket": "🧺",
"basketball": "🏀",
"bat": "🦇",
"bathtub": "🛁",
"battery": "🔋",
"beach_with_umbrella": "🏖",
"beaming_face_with_smiling_eyes": "😁",
"bear_face": "🐻",
"bearded_person": "🧔",
"bearded_person_dark_skin_tone": "🧔🏿",
"bearded_person_light_skin_tone": "🧔🏻",
"bearded_person_medium-dark_skin_tone": "🧔🏾",
"bearded_person_medium-light_skin_tone": "🧔🏼",
"bearded_person_medium_skin_tone": "🧔🏽",
"beating_heart": "💓",
"bed": "🛏",
"beer_mug": "🍺",
"bell": "🔔",
"bell_with_slash": "🔕",
"bellhop_bell": "🛎",
"bento_box": "🍱",
"beverage_box": "🧃",
"bicycle": "🚲",
"bikini": "👙",
"billed_cap": "🧢",
"biohazard": "☣",
"bird": "🐦",
"birthday_cake": "🎂",
"black_circle": "⚫",
"black_flag": "🏴",
"black_heart": "🖤",
"black_large_square": "⬛",
"black_medium-small_square": "◾",
"black_medium_square": "◼",
"black_nib": "✒",
"black_small_square": "▪",
"black_square_button": "🔲",
"blond-haired_man": "👱\u200d♂️",
"blond-haired_man_dark_skin_tone": "👱🏿\u200d♂️",
"blond-haired_man_light_skin_tone": "👱🏻\u200d♂️",
"blond-haired_man_medium-dark_skin_tone": "👱🏾\u200d♂️",
"blond-haired_man_medium-light_skin_tone": "👱🏼\u200d♂️",
"blond-haired_man_medium_skin_tone": "👱🏽\u200d♂️",
"blond-haired_person": "👱",
"blond-haired_person_dark_skin_tone": "👱🏿",
"blond-haired_person_light_skin_tone": "👱🏻",
"blond-haired_person_medium-dark_skin_tone": "👱🏾",
"blond-haired_person_medium-light_skin_tone": "👱🏼",
"blond-haired_person_medium_skin_tone": "👱🏽",
"blond-haired_woman": "👱\u200d♀️",
"blond-haired_woman_dark_skin_tone": "👱🏿\u200d♀️",
"blond-haired_woman_light_skin_tone": "👱🏻\u200d♀️",
"blond-haired_woman_medium-dark_skin_tone": "👱🏾\u200d♀️",
"blond-haired_woman_medium-light_skin_tone": "👱🏼\u200d♀️",
"blond-haired_woman_medium_skin_tone": "👱🏽\u200d♀️",
"blossom": "🌼",
"blowfish": "🐡",
"blue_book": "📘",
"blue_circle": "🔵",
"blue_heart": "💙",
"blue_square": "🟦",
"boar": "🐗",
"bomb": "💣",
"bone": "🦴",
"bookmark": "🔖",
"bookmark_tabs": "📑",
"books": "📚",
"bottle_with_popping_cork": "🍾",
"bouquet": "💐",
"bow_and_arrow": "🏹",
"bowl_with_spoon": "🥣",
"bowling": "🎳",
"boxing_glove": "🥊",
"boy": "👦",
"boy_dark_skin_tone": "👦🏿",
"boy_light_skin_tone": "👦🏻",
"boy_medium-dark_skin_tone": "👦🏾",
"boy_medium-light_skin_tone": "👦🏼",
"boy_medium_skin_tone": "👦🏽",
"brain": "🧠",
"bread": "🍞",
"breast-feeding": "🤱",
"breast-feeding_dark_skin_tone": "🤱🏿",
"breast-feeding_light_skin_tone": "🤱🏻",
"breast-feeding_medium-dark_skin_tone": "🤱🏾",
"breast-feeding_medium-light_skin_tone": "🤱🏼",
"breast-feeding_medium_skin_tone": "🤱🏽",
"brick": "🧱",
"bride_with_veil": "👰",
"bride_with_veil_dark_skin_tone": "👰🏿",
"bride_with_veil_light_skin_tone": "👰🏻",
"bride_with_veil_medium-dark_skin_tone": "👰🏾",
"bride_with_veil_medium-light_skin_tone": "👰🏼",
"bride_with_veil_medium_skin_tone": "👰🏽",
"bridge_at_night": "🌉",
"briefcase": "💼",
"briefs": "🩲",
"bright_button": "🔆",
"broccoli": "🥦",
"broken_heart": "💔",
"broom": "🧹",
"brown_circle": "🟤",
"brown_heart": "🤎",
"brown_square": "🟫",
"bug": "🐛",
"building_construction": "🏗",
"bullet_train": "🚅",
"burrito": "🌯",
"bus": "🚌",
"bus_stop": "🚏",
"bust_in_silhouette": "👤",
"busts_in_silhouette": "👥",
"butter": "🧈",
"butterfly": "🦋",
"cactus": "🌵",
"calendar": "📆",
"call_me_hand": "🤙",
"call_me_hand_dark_skin_tone": "🤙🏿",
"call_me_hand_light_skin_tone": "🤙🏻",
"call_me_hand_medium-dark_skin_tone": "🤙🏾",
"call_me_hand_medium-light_skin_tone": "🤙🏼",
"call_me_hand_medium_skin_tone": "🤙🏽",
"camel": "🐫",
"camera": "📷",
"camera_with_flash": "📸",
"camping": "🏕",
"candle": "🕯",
"candy": "🍬",
"canned_food": "🥫",
"canoe": "🛶",
"card_file_box": "🗃",
"card_index": "📇",
"card_index_dividers": "🗂",
"carousel_horse": "🎠",
"carp_streamer": "🎏",
"carrot": "🥕",
"castle": "🏰",
"cat": "🐱",
"cat_face": "🐱",
"cat_face_with_tears_of_joy": "😹",
"cat_face_with_wry_smile": "😼",
"chains": "⛓",
"chair": "🪑",
"chart_decreasing": "📉",
"chart_increasing": "📈",
"chart_increasing_with_yen": "💹",
"cheese_wedge": "🧀",
"chequered_flag": "🏁",
"cherries": "🍒",
"cherry_blossom": "🌸",
"chess_pawn": "♟",
"chestnut": "🌰",
"chicken": "🐔",
"child": "🧒",
"child_dark_skin_tone": "🧒🏿",
"child_light_skin_tone": "🧒🏻",
"child_medium-dark_skin_tone": "🧒🏾",
"child_medium-light_skin_tone": "🧒🏼",
"child_medium_skin_tone": "🧒🏽",
"children_crossing": "🚸",
"chipmunk": "🐿",
"chocolate_bar": "🍫",
"chopsticks": "🥢",
"church": "⛪",
"cigarette": "🚬",
"cinema": "🎦",
"circled_m": "Ⓜ",
"circus_tent": "🎪",
"cityscape": "🏙",
"cityscape_at_dusk": "🌆",
"clamp": "🗜",
"clapper_board": "🎬",
"clapping_hands": "👏",
"clapping_hands_dark_skin_tone": "👏🏿",
"clapping_hands_light_skin_tone": "👏🏻",
"clapping_hands_medium-dark_skin_tone": "👏🏾",
"clapping_hands_medium-light_skin_tone": "👏🏼",
"clapping_hands_medium_skin_tone": "👏🏽",
"classical_building": "🏛",
"clinking_beer_mugs": "🍻",
"clinking_glasses": "🥂",
"clipboard": "📋",
"clockwise_vertical_arrows": "🔃",
"closed_book": "📕",
"closed_mailbox_with_lowered_flag": "📪",
"closed_mailbox_with_raised_flag": "📫",
"closed_umbrella": "🌂",
"cloud": "☁",
"cloud_with_lightning": "🌩",
"cloud_with_lightning_and_rain": "⛈",
"cloud_with_rain": "🌧",
"cloud_with_snow": "🌨",
"clown_face": "🤡",
"club_suit": "♣",
"clutch_bag": "👝",
"coat": "🧥",
"cocktail_glass": "🍸",
"coconut": "🥥",
"coffin": "⚰",
"cold_face": "🥶",
"collision": "💥",
"comet": "☄",
"compass": "🧭",
"computer_disk": "💽",
"computer_mouse": "🖱",
"confetti_ball": "🎊",
"confounded_face": "😖",
"confused_face": "😕",
"construction": "🚧",
"construction_worker": "👷",
"construction_worker_dark_skin_tone": "👷🏿",
"construction_worker_light_skin_tone": "👷🏻",
"construction_worker_medium-dark_skin_tone": "👷🏾",
"construction_worker_medium-light_skin_tone": "👷🏼",
"construction_worker_medium_skin_tone": "👷🏽",
"control_knobs": "🎛",
"convenience_store": "🏪",
"cooked_rice": "🍚",
"cookie": "🍪",
"cooking": "🍳",
"copyright": "©",
"couch_and_lamp": "🛋",
"counterclockwise_arrows_button": "🔄",
"couple_with_heart": "💑",
"couple_with_heart_man_man": "👨\u200d❤️\u200d👨",
"couple_with_heart_woman_man": "👩\u200d❤️\u200d👨",
"couple_with_heart_woman_woman": "👩\u200d❤️\u200d👩",
"cow": "🐮",
"cow_face": "🐮",
"cowboy_hat_face": "🤠",
"crab": "🦀",
"crayon": "🖍",
"credit_card": "💳",
"crescent_moon": "🌙",
"cricket": "🦗",
"cricket_game": "🏏",
"crocodile": "🐊",
"croissant": "🥐",
"cross_mark": "❌",
"cross_mark_button": "❎",
"crossed_fingers": "🤞",
"crossed_fingers_dark_skin_tone": "🤞🏿",
"crossed_fingers_light_skin_tone": "🤞🏻",
"crossed_fingers_medium-dark_skin_tone": "🤞🏾",
"crossed_fingers_medium-light_skin_tone": "🤞🏼",
"crossed_fingers_medium_skin_tone": "🤞🏽",
"crossed_flags": "🎌",
"crossed_swords": "⚔",
"crown": "👑",
"crying_cat_face": "😿",
"crying_face": "😢",
"crystal_ball": "🔮",
"cucumber": "🥒",
"cupcake": "🧁",
"cup_with_straw": "🥤",
"curling_stone": "🥌",
"curly_hair": "🦱",
"curly-haired_man": "👨\u200d🦱",
"curly-haired_woman": "👩\u200d🦱",
"curly_loop": "➰",
"currency_exchange": "💱",
"curry_rice": "🍛",
"custard": "🍮",
"customs": "🛃",
"cut_of_meat": "🥩",
"cyclone": "🌀",
"dagger": "🗡",
"dango": "🍡",
"dashing_away": "💨",
"deaf_person": "🧏",
"deciduous_tree": "🌳",
"deer": "🦌",
"delivery_truck": "🚚",
"department_store": "🏬",
"derelict_house": "🏚",
"desert": "🏜",
"desert_island": "🏝",
"desktop_computer": "🖥",
"detective": "🕵",
"detective_dark_skin_tone": "🕵🏿",
"detective_light_skin_tone": "🕵🏻",
"detective_medium-dark_skin_tone": "🕵🏾",
"detective_medium-light_skin_tone": "🕵🏼",
"detective_medium_skin_tone": "🕵🏽",
"diamond_suit": "♦",
"diamond_with_a_dot": "💠",
"dim_button": "🔅",
"direct_hit": "🎯",
"disappointed_face": "😞",
"diving_mask": "🤿",
"diya_lamp": "🪔",
"dizzy": "💫",
"dizzy_face": "😵",
"dna": "🧬",
"dog": "🐶",
"dog_face": "🐶",
"dollar_banknote": "💵",
"dolphin": "🐬",
"door": "🚪",
"dotted_six-pointed_star": "🔯",
"double_curly_loop": "➿",
"double_exclamation_mark": "‼",
"doughnut": "🍩",
"dove": "🕊",
"down-left_arrow": "↙",
"down-right_arrow": "↘",
"down_arrow": "⬇",
"downcast_face_with_sweat": "😓",
"downwards_button": "🔽",
"dragon": "🐉",
"dragon_face": "🐲",
"dress": "👗",
"drooling_face": "🤤",
"drop_of_blood": "🩸",
"droplet": "💧",
"drum": "🥁",
"duck": "🦆",
"dumpling": "🥟",
"dvd": "📀",
"e-mail": "📧",
"eagle": "🦅",
"ear": "👂",
"ear_dark_skin_tone": "👂🏿",
"ear_light_skin_tone": "👂🏻",
"ear_medium-dark_skin_tone": "👂🏾",
"ear_medium-light_skin_tone": "👂🏼",
"ear_medium_skin_tone": "👂🏽",
"ear_of_corn": "🌽",
"ear_with_hearing_aid": "🦻",
"egg": "🍳",
"eggplant": "🍆",
"eight-pointed_star": "✴",
"eight-spoked_asterisk": "✳",
"eight-thirty": "🕣",
"eight_o’clock": "🕗",
"eject_button": "⏏",
"electric_plug": "🔌",
"elephant": "🐘",
"eleven-thirty": "🕦",
"eleven_o’clock": "🕚",
"elf": "🧝",
"elf_dark_skin_tone": "🧝🏿",
"elf_light_skin_tone": "🧝🏻",
"elf_medium-dark_skin_tone": "🧝🏾",
"elf_medium-light_skin_tone": "🧝🏼",
"elf_medium_skin_tone": "🧝🏽",
"envelope": "✉",
"envelope_with_arrow": "📩",
"euro_banknote": "💶",
"evergreen_tree": "🌲",
"ewe": "🐑",
"exclamation_mark": "❗",
"exclamation_question_mark": "⁉",
"exploding_head": "🤯",
"expressionless_face": "😑",
"eye": "👁",
"eye_in_speech_bubble": "👁️\u200d🗨️",
"eyes": "👀",
"face_blowing_a_kiss": "😘",
"face_savoring_food": "😋",
"face_screaming_in_fear": "😱",
"face_vomiting": "🤮",
"face_with_hand_over_mouth": "🤭",
"face_with_head-bandage": "🤕",
"face_with_medical_mask": "😷",
"face_with_monocle": "🧐",
"face_with_open_mouth": "😮",
"face_with_raised_eyebrow": "🤨",
"face_with_rolling_eyes": "🙄",
"face_with_steam_from_nose": "😤",
"face_with_symbols_on_mouth": "🤬",
"face_with_tears_of_joy": "😂",
"face_with_thermometer": "🤒",
"face_with_tongue": "😛",
"face_without_mouth": "😶",
"factory": "🏭",
"fairy": "🧚",
"fairy_dark_skin_tone": "🧚🏿",
"fairy_light_skin_tone": "🧚🏻",
"fairy_medium-dark_skin_tone": "🧚🏾",
"fairy_medium-light_skin_tone": "🧚🏼",
"fairy_medium_skin_tone": "🧚🏽",
"falafel": "🧆",
"fallen_leaf": "🍂",
"family": "👪",
"family_man_boy": "👨\u200d👦",
"family_man_boy_boy": "👨\u200d👦\u200d👦",
"family_man_girl": "👨\u200d👧",
"family_man_girl_boy": "👨\u200d👧\u200d👦",
"family_man_girl_girl": "👨\u200d👧\u200d👧",
"family_man_man_boy": "👨\u200d👨\u200d👦",
"family_man_man_boy_boy": "👨\u200d👨\u200d👦\u200d👦",
"family_man_man_girl": "👨\u200d👨\u200d👧",
"family_man_man_girl_boy": "👨\u200d👨\u200d👧\u200d👦",
"family_man_man_girl_girl": "👨\u200d👨\u200d👧\u200d👧",
"family_man_woman_boy": "👨\u200d👩\u200d👦",
"family_man_woman_boy_boy": "👨\u200d👩\u200d👦\u200d👦",
"family_man_woman_girl": "👨\u200d👩\u200d👧",
"family_man_woman_girl_boy": "👨\u200d👩\u200d👧\u200d👦",
"family_man_woman_girl_girl": "👨\u200d👩\u200d👧\u200d👧",
"family_woman_boy": "👩\u200d👦",
"family_woman_boy_boy": "👩\u200d👦\u200d👦",
"family_woman_girl": "👩\u200d👧",
"family_woman_girl_boy": "👩\u200d👧\u200d👦",
"family_woman_girl_girl": "👩\u200d👧\u200d👧",
"family_woman_woman_boy": "👩\u200d👩\u200d👦",
"family_woman_woman_boy_boy": "👩\u200d👩\u200d👦\u200d👦",
"family_woman_woman_girl": "👩\u200d👩\u200d👧",
"family_woman_woman_girl_boy": "👩\u200d👩\u200d👧\u200d👦",
"family_woman_woman_girl_girl": "👩\u200d👩\u200d👧\u200d👧",
"fast-forward_button": "⏩",
"fast_down_button": "⏬",
"fast_reverse_button": "⏪",
"fast_up_button": "⏫",
"fax_machine": "📠",
"fearful_face": "😨",
"female_sign": "♀",
"ferris_wheel": "🎡",
"ferry": "⛴",
"field_hockey": "🏑",
"file_cabinet": "🗄",
"file_folder": "📁",
"film_frames": "🎞",
"film_projector": "📽",
"fire": "🔥",
"fire_extinguisher": "🧯",
"firecracker": "🧨",
"fire_engine": "🚒",
"fireworks": "🎆",
"first_quarter_moon": "🌓",
"first_quarter_moon_face": "🌛",
"fish": "🐟",
"fish_cake_with_swirl": "🍥",
"fishing_pole": "🎣",
"five-thirty": "🕠",
"five_o’clock": "🕔",
"flag_in_hole": "⛳",
"flamingo": "🦩",
"flashlight": "🔦",
"flat_shoe": "🥿",
"fleur-de-lis": "⚜",
"flexed_biceps": "💪",
"flexed_biceps_dark_skin_tone": "💪🏿",
"flexed_biceps_light_skin_tone": "💪🏻",
"flexed_biceps_medium-dark_skin_tone": "💪🏾",
"flexed_biceps_medium-light_skin_tone": "💪🏼",
"flexed_biceps_medium_skin_tone": "💪🏽",
"floppy_disk": "💾",
"flower_playing_cards": "🎴",
"flushed_face": "😳",
"flying_disc": "🥏",
"flying_saucer": "🛸",
"fog": "🌫",
"foggy": "🌁",
"folded_hands": "🙏",
"folded_hands_dark_skin_tone": "🙏🏿",
"folded_hands_light_skin_tone": "🙏🏻",
"folded_hands_medium-dark_skin_tone": "🙏🏾",
"folded_hands_medium-light_skin_tone": "🙏🏼",
"folded_hands_medium_skin_tone": "🙏🏽",
"foot": "🦶",
"footprints": "👣",
"fork_and_knife": "🍴",
"fork_and_knife_with_plate": "🍽",
"fortune_cookie": "🥠",
"fountain": "⛲",
"fountain_pen": "🖋",
"four-thirty": "🕟",
"four_leaf_clover": "🍀",
"four_o’clock": "🕓",
"fox_face": "🦊",
"framed_picture": "🖼",
"french_fries": "🍟",
"fried_shrimp": "🍤",
"frog_face": "🐸",
"front-facing_baby_chick": "🐥",
"frowning_face": "☹",
"frowning_face_with_open_mouth": "😦",
"fuel_pump": "⛽",
"full_moon": "🌕",
"full_moon_face": "🌝",
"funeral_urn": "⚱",
"game_die": "🎲",
"garlic": "🧄",
"gear": "⚙",
"gem_stone": "💎",
"genie": "🧞",
"ghost": "👻",
"giraffe": "🦒",
"girl": "👧",
"girl_dark_skin_tone": "👧🏿",
"girl_light_skin_tone": "👧🏻",
"girl_medium-dark_skin_tone": "👧🏾",
"girl_medium-light_skin_tone": "👧🏼",
"girl_medium_skin_tone": "👧🏽",
"glass_of_milk": "🥛",
"glasses": "👓",
"globe_showing_americas": "🌎",
"globe_showing_asia-australia": "🌏",
"globe_showing_europe-africa": "🌍",
"globe_with_meridians": "🌐",
"gloves": "🧤",
"glowing_star": "🌟",
"goal_net": "🥅",
"goat": "🐐",
"goblin": "👺",
"goggles": "🥽",
"gorilla": "🦍",
"graduation_cap": "🎓",
"grapes": "🍇",
"green_apple": "🍏",
"green_book": "📗",
"green_circle": "🟢",
"green_heart": "💚",
"green_salad": "🥗",
"green_square": "🟩",
"grimacing_face": "😬",
"grinning_cat_face": "😺",
"grinning_cat_face_with_smiling_eyes": "😸",
"grinning_face": "😀",
"grinning_face_with_big_eyes": "😃",
"grinning_face_with_smiling_eyes": "😄",
"grinning_face_with_sweat": "😅",
"grinning_squinting_face": "😆",
"growing_heart": "💗",
"guard": "💂",
"guard_dark_skin_tone": "💂🏿",
"guard_light_skin_tone": "💂🏻",
"guard_medium-dark_skin_tone": "💂🏾",
"guard_medium-light_skin_tone": "💂🏼",
"guard_medium_skin_tone": "💂🏽",
"guide_dog": "🦮",
"guitar": "🎸",
"hamburger": "🍔",
"hammer": "🔨",
"hammer_and_pick": "⚒",
"hammer_and_wrench": "🛠",
"hamster_face": "🐹",
"hand_with_fingers_splayed": "🖐",
"hand_with_fingers_splayed_dark_skin_tone": "🖐🏿",
"hand_with_fingers_splayed_light_skin_tone": "🖐🏻",
"hand_with_fingers_splayed_medium-dark_skin_tone": "🖐🏾",
"hand_with_fingers_splayed_medium-light_skin_tone": "🖐🏼",
"hand_with_fingers_splayed_medium_skin_tone": "🖐🏽",
"handbag": "👜",
"handshake": "🤝",
"hatching_chick": "🐣",
"headphone": "🎧",
"hear-no-evil_monkey": "🙉",
"heart_decoration": "💟",
"heart_suit": "♥",
"heart_with_arrow": "💘",
"heart_with_ribbon": "💝",
"heavy_check_mark": "✔",
"heavy_division_sign": "➗",
"heavy_dollar_sign": "💲",
"heavy_heart_exclamation": "❣",
"heavy_large_circle": "⭕",
"heavy_minus_sign": "➖",
"heavy_multiplication_x": "✖",
"heavy_plus_sign": "➕",
"hedgehog": "🦔",
"helicopter": "🚁",
"herb": "🌿",
"hibiscus": "🌺",
"high-heeled_shoe": "👠",
"high-speed_train": "🚄",
"high_voltage": "⚡",
"hiking_boot": "🥾",
"hindu_temple": "🛕",
"hippopotamus": "🦛",
"hole": "🕳",
"honey_pot": "🍯",
"honeybee": "🐝",
"horizontal_traffic_light": "🚥",
"horse": "🐴",
"horse_face": "🐴",
"horse_racing": "🏇",
"horse_racing_dark_skin_tone": "🏇🏿",
"horse_racing_light_skin_tone": "🏇🏻",
"horse_racing_medium-dark_skin_tone": "🏇🏾",
"horse_racing_medium-light_skin_tone": "🏇🏼",
"horse_racing_medium_skin_tone": "🏇🏽",
"hospital": "🏥",
"hot_beverage": "☕",
"hot_dog": "🌭",
"hot_face": "🥵",
"hot_pepper": "🌶",
"hot_springs": "♨",
"hotel": "🏨",
"hourglass_done": "⌛",
"hourglass_not_done": "⏳",
"house": "🏠",
"house_with_garden": "🏡",
"houses": "🏘",
"hugging_face": "🤗",
"hundred_points": "💯",
"hushed_face": "😯",
"ice": "🧊",
"ice_cream": "🍨",
"ice_hockey": "🏒",
"ice_skate": "⛸",
"inbox_tray": "📥",
"incoming_envelope": "📨",
"index_pointing_up": "☝",
"index_pointing_up_dark_skin_tone": "☝🏿",
"index_pointing_up_light_skin_tone": "☝🏻",
"index_pointing_up_medium-dark_skin_tone": "☝🏾",
"index_pointing_up_medium-light_skin_tone": "☝🏼",
"index_pointing_up_medium_skin_tone": "☝🏽",
"infinity": "♾",
"information": "ℹ",
"input_latin_letters": "🔤",
"input_latin_lowercase": "🔡",
"input_latin_uppercase": "🔠",
"input_numbers": "🔢",
"input_symbols": "🔣",
"jack-o-lantern": "🎃",
"jeans": "👖",
"jigsaw": "🧩",
"joker": "🃏",
"joystick": "🕹",
"kaaba": "🕋",
"kangaroo": "🦘",
"key": "🔑",
"keyboard": "⌨",
"keycap_#": "#️⃣",
"keycap_*": "*️⃣",
"keycap_0": "0️⃣",
"keycap_1": "1️⃣",
"keycap_10": "🔟",
"keycap_2": "2️⃣",
"keycap_3": "3️⃣",
"keycap_4": "4️⃣",
"keycap_5": "5️⃣",
"keycap_6": "6️⃣",
"keycap_7": "7️⃣",
"keycap_8": "8️⃣",
"keycap_9": "9️⃣",
"kick_scooter": "🛴",
"kimono": "👘",
"kiss": "💋",
"kiss_man_man": "👨\u200d❤️\u200d💋\u200d👨",
"kiss_mark": "💋",
"kiss_woman_man": "👩\u200d❤️\u200d💋\u200d👨",
"kiss_woman_woman": "👩\u200d❤️\u200d💋\u200d👩",
"kissing_cat_face": "😽",
"kissing_face": "😗",
"kissing_face_with_closed_eyes": "😚",
"kissing_face_with_smiling_eyes": "😙",
"kitchen_knife": "🔪",
"kite": "🪁",
"kiwi_fruit": "🥝",
"koala": "🐨",
"lab_coat": "🥼",
"label": "🏷",
"lacrosse": "🥍",
"lady_beetle": "🐞",
"laptop_computer": "💻",
"large_blue_diamond": "🔷",
"large_orange_diamond": "🔶",
"last_quarter_moon": "🌗",
"last_quarter_moon_face": "🌜",
"last_track_button": "⏮",
"latin_cross": "✝",
"leaf_fluttering_in_wind": "🍃",
"leafy_green": "🥬",
"ledger": "📒",
"left-facing_fist": "🤛",
"left-facing_fist_dark_skin_tone": "🤛🏿",
"left-facing_fist_light_skin_tone": "🤛🏻",
"left-facing_fist_medium-dark_skin_tone": "🤛🏾",
"left-facing_fist_medium-light_skin_tone": "🤛🏼",
"left-facing_fist_medium_skin_tone": "🤛🏽",
"left-right_arrow": "↔",
"left_arrow": "⬅",
"left_arrow_curving_right": "↪",
"left_luggage": "🛅",
"left_speech_bubble": "🗨",
"leg": "🦵",
"lemon": "🍋",
"leopard": "🐆",
"level_slider": "🎚",
"light_bulb": "💡",
"light_rail": "🚈",
"link": "🔗",
"linked_paperclips": "🖇",
"lion_face": "🦁",
"lipstick": "💄",
"litter_in_bin_sign": "🚮",
"lizard": "🦎",
"llama": "🦙",
"lobster": "🦞",
"locked": "🔒",
"locked_with_key": "🔐",
"locked_with_pen": "🔏",
"locomotive": "🚂",
"lollipop": "🍭",
"lotion_bottle": "🧴",
"loudly_crying_face": "😭",
"loudspeaker": "📢",
"love-you_gesture": "🤟",
"love-you_gesture_dark_skin_tone": "🤟🏿",
"love-you_gesture_light_skin_tone": "🤟🏻",
"love-you_gesture_medium-dark_skin_tone": "🤟🏾",
"love-you_gesture_medium-light_skin_tone": "🤟🏼",
"love-you_gesture_medium_skin_tone": "🤟🏽",
"love_hotel": "🏩",
"love_letter": "💌",
"luggage": "🧳",
"lying_face": "🤥",
"mage": "🧙",
"mage_dark_skin_tone": "🧙🏿",
"mage_light_skin_tone": "🧙🏻",
"mage_medium-dark_skin_tone": "🧙🏾",
"mage_medium-light_skin_tone": "🧙🏼",
"mage_medium_skin_tone": "🧙🏽",
"magnet": "🧲",
"magnifying_glass_tilted_left": "🔍",
"magnifying_glass_tilted_right": "🔎",
"mahjong_red_dragon": "🀄",
"male_sign": "♂",
"man": "👨",
"man_and_woman_holding_hands": "👫",
"man_artist": "👨\u200d🎨",
"man_artist_dark_skin_tone": "👨🏿\u200d🎨",
"man_artist_light_skin_tone": "👨🏻\u200d🎨",
"man_artist_medium-dark_skin_tone": "👨🏾\u200d🎨",
"man_artist_medium-light_skin_tone": "👨🏼\u200d🎨",
"man_artist_medium_skin_tone": "👨🏽\u200d🎨",
"man_astronaut": "👨\u200d🚀",
"man_astronaut_dark_skin_tone": "👨🏿\u200d🚀",
"man_astronaut_light_skin_tone": "👨🏻\u200d🚀",
"man_astronaut_medium-dark_skin_tone": "👨🏾\u200d🚀",
"man_astronaut_medium-light_skin_tone": "👨🏼\u200d🚀",
"man_astronaut_medium_skin_tone": "👨🏽\u200d🚀",
"man_biking": "🚴\u200d♂️",
"man_biking_dark_skin_tone": "🚴🏿\u200d♂️",
"man_biking_light_skin_tone": "🚴🏻\u200d♂️",
"man_biking_medium-dark_skin_tone": "🚴🏾\u200d♂️",
"man_biking_medium-light_skin_tone": "🚴🏼\u200d♂️",
"man_biking_medium_skin_tone": "🚴🏽\u200d♂️",
"man_bouncing_ball": "⛹️\u200d♂️",
"man_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♂️",
"man_bouncing_ball_light_skin_tone": "⛹🏻\u200d♂️",
"man_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♂️",
"man_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♂️",
"man_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♂️",
"man_bowing": "🙇\u200d♂️",
"man_bowing_dark_skin_tone": "🙇🏿\u200d♂️",
"man_bowing_light_skin_tone": "🙇🏻\u200d♂️",
"man_bowing_medium-dark_skin_tone": "🙇🏾\u200d♂️",
"man_bowing_medium-light_skin_tone": "🙇🏼\u200d♂️",
"man_bowing_medium_skin_tone": "🙇🏽\u200d♂️",
"man_cartwheeling": "🤸\u200d♂️",
"man_cartwheeling_dark_skin_tone": "🤸🏿\u200d♂️",
"man_cartwheeling_light_skin_tone": "🤸🏻\u200d♂️",
"man_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♂️",
"man_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♂️",
"man_cartwheeling_medium_skin_tone": "🤸🏽\u200d♂️",
"man_climbing": "🧗\u200d♂️",
"man_climbing_dark_skin_tone": "🧗🏿\u200d♂️",
"man_climbing_light_skin_tone": "🧗🏻\u200d♂️",
"man_climbing_medium-dark_skin_tone": "🧗🏾\u200d♂️",
"man_climbing_medium-light_skin_tone": "🧗🏼\u200d♂️",
"man_climbing_medium_skin_tone": "🧗🏽\u200d♂️",
"man_construction_worker": "👷\u200d♂️",
"man_construction_worker_dark_skin_tone": "👷🏿\u200d♂️",
"man_construction_worker_light_skin_tone": "👷🏻\u200d♂️",
"man_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♂️",
"man_construction_worker_medium-light_skin_tone": "👷🏼\u200d♂️",
"man_construction_worker_medium_skin_tone": "👷🏽\u200d♂️",
"man_cook": "👨\u200d🍳",
"man_cook_dark_skin_tone": "👨🏿\u200d🍳",
"man_cook_light_skin_tone": "👨🏻\u200d🍳",
"man_cook_medium-dark_skin_tone": "👨🏾\u200d🍳",
"man_cook_medium-light_skin_tone": "👨🏼\u200d🍳",
"man_cook_medium_skin_tone": "👨🏽\u200d🍳",
"man_dancing": "🕺",
"man_dancing_dark_skin_tone": "🕺🏿",
"man_dancing_light_skin_tone": "🕺🏻",
"man_dancing_medium-dark_skin_tone": "🕺🏾",
"man_dancing_medium-light_skin_tone": "🕺🏼",
"man_dancing_medium_skin_tone": "🕺🏽",
"man_dark_skin_tone": "👨🏿",
"man_detective": "🕵️\u200d♂️",
"man_detective_dark_skin_tone": "🕵🏿\u200d♂️",
"man_detective_light_skin_tone": "🕵🏻\u200d♂️",
"man_detective_medium-dark_skin_tone": "🕵🏾\u200d♂️",
"man_detective_medium-light_skin_tone": "🕵🏼\u200d♂️",
"man_detective_medium_skin_tone": "🕵🏽\u200d♂️",
"man_elf": "🧝\u200d♂️",
"man_elf_dark_skin_tone": "🧝🏿\u200d♂️",
"man_elf_light_skin_tone": "🧝🏻\u200d♂️",
"man_elf_medium-dark_skin_tone": "🧝🏾\u200d♂️",
"man_elf_medium-light_skin_tone": "🧝🏼\u200d♂️",
"man_elf_medium_skin_tone": "🧝🏽\u200d♂️",
"man_facepalming": "🤦\u200d♂️",
"man_facepalming_dark_skin_tone": "🤦🏿\u200d♂️",
"man_facepalming_light_skin_tone": "🤦🏻\u200d♂️",
"man_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♂️",
"man_facepalming_medium-light_skin_tone": "🤦🏼\u200d♂️",
"man_facepalming_medium_skin_tone": "🤦🏽\u200d♂️",
"man_factory_worker": "👨\u200d🏭",
"man_factory_worker_dark_skin_tone": "👨🏿\u200d🏭",
"man_factory_worker_light_skin_tone": "👨🏻\u200d🏭",
"man_factory_worker_medium-dark_skin_tone": "👨🏾\u200d🏭",
"man_factory_worker_medium-light_skin_tone": "👨🏼\u200d🏭",
"man_factory_worker_medium_skin_tone": "👨🏽\u200d🏭",
"man_fairy": "🧚\u200d♂️",
"man_fairy_dark_skin_tone": "🧚🏿\u200d♂️",
"man_fairy_light_skin_tone": "🧚🏻\u200d♂️",
"man_fairy_medium-dark_skin_tone": "🧚🏾\u200d♂️",
"man_fairy_medium-light_skin_tone": "🧚🏼\u200d♂️",
"man_fairy_medium_skin_tone": "🧚🏽\u200d♂️",
"man_farmer": "👨\u200d🌾",
"man_farmer_dark_skin_tone": "👨🏿\u200d🌾",
"man_farmer_light_skin_tone": "👨🏻\u200d🌾",
"man_farmer_medium-dark_skin_tone": "👨🏾\u200d🌾",
"man_farmer_medium-light_skin_tone": "👨🏼\u200d🌾",
"man_farmer_medium_skin_tone": "👨🏽\u200d🌾",
"man_firefighter": "👨\u200d🚒",
"man_firefighter_dark_skin_tone": "👨🏿\u200d🚒",
"man_firefighter_light_skin_tone": "👨🏻\u200d🚒",
"man_firefighter_medium-dark_skin_tone": "👨🏾\u200d🚒",
"man_firefighter_medium-light_skin_tone": "👨🏼\u200d🚒",
"man_firefighter_medium_skin_tone": "👨🏽\u200d🚒",
"man_frowning": "🙍\u200d♂️",
"man_frowning_dark_skin_tone": "🙍🏿\u200d♂️",
"man_frowning_light_skin_tone": "🙍🏻\u200d♂️",
"man_frowning_medium-dark_skin_tone": "🙍🏾\u200d♂️",
"man_frowning_medium-light_skin_tone": "🙍🏼\u200d♂️",
"man_frowning_medium_skin_tone": "🙍🏽\u200d♂️",
"man_genie": "🧞\u200d♂️",
"man_gesturing_no": "🙅\u200d♂️",
"man_gesturing_no_dark_skin_tone": "🙅🏿\u200d♂️",
"man_gesturing_no_light_skin_tone": "🙅🏻\u200d♂️",
"man_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♂️",
"man_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♂️",
"man_gesturing_no_medium_skin_tone": "🙅🏽\u200d♂️",
"man_gesturing_ok": "🙆\u200d♂️",
"man_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♂️",
"man_gesturing_ok_light_skin_tone": "🙆🏻\u200d♂️",
"man_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♂️",
"man_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♂️",
"man_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♂️",
"man_getting_haircut": "💇\u200d♂️",
"man_getting_haircut_dark_skin_tone": "💇🏿\u200d♂️",
"man_getting_haircut_light_skin_tone": "💇🏻\u200d♂️",
"man_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♂️",
"man_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♂️",
"man_getting_haircut_medium_skin_tone": "💇🏽\u200d♂️",
"man_getting_massage": "💆\u200d♂️",
"man_getting_massage_dark_skin_tone": "💆🏿\u200d♂️",
"man_getting_massage_light_skin_tone": "💆🏻\u200d♂️",
"man_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♂️",
"man_getting_massage_medium-light_skin_tone": "💆🏼\u200d♂️",
"man_getting_massage_medium_skin_tone": "💆🏽\u200d♂️",
"man_golfing": "🏌️\u200d♂️",
"man_golfing_dark_skin_tone": "🏌🏿\u200d♂️",
"man_golfing_light_skin_tone": "🏌🏻\u200d♂️",
"man_golfing_medium-dark_skin_tone": "🏌🏾\u200d♂️",
"man_golfing_medium-light_skin_tone": "🏌🏼\u200d♂️",
"man_golfing_medium_skin_tone": "🏌🏽\u200d♂️",
"man_guard": "💂\u200d♂️",
"man_guard_dark_skin_tone": "💂🏿\u200d♂️",
"man_guard_light_skin_tone": "💂🏻\u200d♂️",
"man_guard_medium-dark_skin_tone": "💂🏾\u200d♂️",
"man_guard_medium-light_skin_tone": "💂🏼\u200d♂️",
"man_guard_medium_skin_tone": "💂🏽\u200d♂️",
"man_health_worker": "👨\u200d⚕️",
"man_health_worker_dark_skin_tone": "👨🏿\u200d⚕️",
"man_health_worker_light_skin_tone": "👨🏻\u200d⚕️",
"man_health_worker_medium-dark_skin_tone": "👨🏾\u200d⚕️",
"man_health_worker_medium-light_skin_tone": "👨🏼\u200d⚕️",
"man_health_worker_medium_skin_tone": "👨🏽\u200d⚕️",
"man_in_lotus_position": "🧘\u200d♂️",
"man_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♂️",
"man_in_lotus_position_light_skin_tone": "🧘🏻\u200d♂️",
"man_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♂️",
"man_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♂️",
"man_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♂️",
"man_in_manual_wheelchair": "👨\u200d🦽",
"man_in_motorized_wheelchair": "👨\u200d🦼",
"man_in_steamy_room": "🧖\u200d♂️",
"man_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♂️",
"man_in_steamy_room_light_skin_tone": "🧖🏻\u200d♂️",
"man_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♂️",
"man_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♂️",
"man_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♂️",
"man_in_suit_levitating": "🕴",
"man_in_suit_levitating_dark_skin_tone": "🕴🏿",
"man_in_suit_levitating_light_skin_tone": "🕴🏻",
"man_in_suit_levitating_medium-dark_skin_tone": "🕴🏾",
"man_in_suit_levitating_medium-light_skin_tone": "🕴🏼",
"man_in_suit_levitating_medium_skin_tone": "🕴🏽",
"man_in_tuxedo": "🤵",
"man_in_tuxedo_dark_skin_tone": "🤵🏿",
"man_in_tuxedo_light_skin_tone": "🤵🏻",
"man_in_tuxedo_medium-dark_skin_tone": "🤵🏾",
"man_in_tuxedo_medium-light_skin_tone": "🤵🏼",
"man_in_tuxedo_medium_skin_tone": "🤵🏽",
"man_judge": "👨\u200d⚖️",
"man_judge_dark_skin_tone": "👨🏿\u200d⚖️",
"man_judge_light_skin_tone": "👨🏻\u200d⚖️",
"man_judge_medium-dark_skin_tone": "👨🏾\u200d⚖️",
"man_judge_medium-light_skin_tone": "👨🏼\u200d⚖️",
"man_judge_medium_skin_tone": "👨🏽\u200d⚖️",
"man_juggling": "🤹\u200d♂️",
"man_juggling_dark_skin_tone": "🤹🏿\u200d♂️",
"man_juggling_light_skin_tone": "🤹🏻\u200d♂️",
"man_juggling_medium-dark_skin_tone": "🤹🏾\u200d♂️",
"man_juggling_medium-light_skin_tone": "🤹🏼\u200d♂️",
"man_juggling_medium_skin_tone": "🤹🏽\u200d♂️",
"man_lifting_weights": "🏋️\u200d♂️",
"man_lifting_weights_dark_skin_tone": "🏋🏿\u200d♂️",
"man_lifting_weights_light_skin_tone": "🏋🏻\u200d♂️",
"man_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♂️",
"man_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♂️",
"man_lifting_weights_medium_skin_tone": "🏋🏽\u200d♂️",
"man_light_skin_tone": "👨🏻",
"man_mage": "🧙\u200d♂️",
"man_mage_dark_skin_tone": "🧙🏿\u200d♂️",
"man_mage_light_skin_tone": "🧙🏻\u200d♂️",
"man_mage_medium-dark_skin_tone": "🧙🏾\u200d♂️",
"man_mage_medium-light_skin_tone": "🧙🏼\u200d♂️",
"man_mage_medium_skin_tone": "🧙🏽\u200d♂️",
"man_mechanic": "👨\u200d🔧",
"man_mechanic_dark_skin_tone": "👨🏿\u200d🔧",
"man_mechanic_light_skin_tone": "👨🏻\u200d🔧",
"man_mechanic_medium-dark_skin_tone": "👨🏾\u200d🔧",
"man_mechanic_medium-light_skin_tone": "👨🏼\u200d🔧",
"man_mechanic_medium_skin_tone": "👨🏽\u200d🔧",
"man_medium-dark_skin_tone": "👨🏾",
"man_medium-light_skin_tone": "👨🏼",
"man_medium_skin_tone": "👨🏽",
"man_mountain_biking": "🚵\u200d♂️",
"man_mountain_biking_dark_skin_tone": "🚵🏿\u200d♂️",
"man_mountain_biking_light_skin_tone": "🚵🏻\u200d♂️",
"man_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♂️",
"man_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♂️",
"man_mountain_biking_medium_skin_tone": "🚵🏽\u200d♂️",
"man_office_worker": "👨\u200d💼",
"man_office_worker_dark_skin_tone": "👨🏿\u200d💼",
"man_office_worker_light_skin_tone": "👨🏻\u200d💼",
"man_office_worker_medium-dark_skin_tone": "👨🏾\u200d💼",
"man_office_worker_medium-light_skin_tone": "👨🏼\u200d💼",
"man_office_worker_medium_skin_tone": "👨🏽\u200d💼",
"man_pilot": "👨\u200d✈️",
"man_pilot_dark_skin_tone": "👨🏿\u200d✈️",
"man_pilot_light_skin_tone": "👨🏻\u200d✈️",
"man_pilot_medium-dark_skin_tone": "👨🏾\u200d✈️",
"man_pilot_medium-light_skin_tone": "👨🏼\u200d✈️",
"man_pilot_medium_skin_tone": "👨🏽\u200d✈️",
"man_playing_handball": "🤾\u200d♂️",
"man_playing_handball_dark_skin_tone": "🤾🏿\u200d♂️",
"man_playing_handball_light_skin_tone": "🤾🏻\u200d♂️",
"man_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♂️",
"man_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♂️",
"man_playing_handball_medium_skin_tone": "🤾🏽\u200d♂️",
"man_playing_water_polo": "🤽\u200d♂️",
"man_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♂️",
"man_playing_water_polo_light_skin_tone": "🤽🏻\u200d♂️",
"man_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♂️",
"man_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♂️",
"man_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♂️",
"man_police_officer": "👮\u200d♂️",
"man_police_officer_dark_skin_tone": "👮🏿\u200d♂️",
"man_police_officer_light_skin_tone": "👮🏻\u200d♂️",
"man_police_officer_medium-dark_skin_tone": "👮🏾\u200d♂️",
"man_police_officer_medium-light_skin_tone": "👮🏼\u200d♂️",
"man_police_officer_medium_skin_tone": "👮🏽\u200d♂️",
"man_pouting": "🙎\u200d♂️",
"man_pouting_dark_skin_tone": "🙎🏿\u200d♂️",
"man_pouting_light_skin_tone": "🙎🏻\u200d♂️",
"man_pouting_medium-dark_skin_tone": "🙎🏾\u200d♂️",
"man_pouting_medium-light_skin_tone": "🙎🏼\u200d♂️",
"man_pouting_medium_skin_tone": "🙎🏽\u200d♂️",
"man_raising_hand": "🙋\u200d♂️",
"man_raising_hand_dark_skin_tone": "🙋🏿\u200d♂️",
"man_raising_hand_light_skin_tone": "🙋🏻\u200d♂️",
"man_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♂️",
"man_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♂️",
"man_raising_hand_medium_skin_tone": "🙋🏽\u200d♂️",
"man_rowing_boat": "🚣\u200d♂️",
"man_rowing_boat_dark_skin_tone": "🚣🏿\u200d♂️",
"man_rowing_boat_light_skin_tone": "🚣🏻\u200d♂️",
"man_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♂️",
"man_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♂️",
"man_rowing_boat_medium_skin_tone": "🚣🏽\u200d♂️",
"man_running": "🏃\u200d♂️",
"man_running_dark_skin_tone": "🏃🏿\u200d♂️",
"man_running_light_skin_tone": "🏃🏻\u200d♂️",
"man_running_medium-dark_skin_tone": "🏃🏾\u200d♂️",
"man_running_medium-light_skin_tone": "🏃🏼\u200d♂️",
"man_running_medium_skin_tone": "🏃🏽\u200d♂️",
"man_scientist": "👨\u200d🔬",
"man_scientist_dark_skin_tone": "👨🏿\u200d🔬",
"man_scientist_light_skin_tone": "👨🏻\u200d🔬",
"man_scientist_medium-dark_skin_tone": "👨🏾\u200d🔬",
"man_scientist_medium-light_skin_tone": "👨🏼\u200d🔬",
"man_scientist_medium_skin_tone": "👨🏽\u200d🔬",
"man_shrugging": "🤷\u200d♂️",
"man_shrugging_dark_skin_tone": "🤷🏿\u200d♂️",
"man_shrugging_light_skin_tone": "🤷🏻\u200d♂️",
"man_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♂️",
"man_shrugging_medium-light_skin_tone": "🤷🏼\u200d♂️",
"man_shrugging_medium_skin_tone": "🤷🏽\u200d♂️",
"man_singer": "👨\u200d🎤",
"man_singer_dark_skin_tone": "👨🏿\u200d🎤",
"man_singer_light_skin_tone": "👨🏻\u200d🎤",
"man_singer_medium-dark_skin_tone": "👨🏾\u200d🎤",
"man_singer_medium-light_skin_tone": "👨🏼\u200d🎤",
"man_singer_medium_skin_tone": "👨🏽\u200d🎤",
"man_student": "👨\u200d🎓",
"man_student_dark_skin_tone": "👨🏿\u200d🎓",
"man_student_light_skin_tone": "👨🏻\u200d🎓",
"man_student_medium-dark_skin_tone": "👨🏾\u200d🎓",
"man_student_medium-light_skin_tone": "👨🏼\u200d🎓",
"man_student_medium_skin_tone": "👨🏽\u200d🎓",
"man_surfing": "🏄\u200d♂️",
"man_surfing_dark_skin_tone": "🏄🏿\u200d♂️",
"man_surfing_light_skin_tone": "🏄🏻\u200d♂️",
"man_surfing_medium-dark_skin_tone": "🏄🏾\u200d♂️",
"man_surfing_medium-light_skin_tone": "🏄🏼\u200d♂️",
"man_surfing_medium_skin_tone": "🏄🏽\u200d♂️",
"man_swimming": "🏊\u200d♂️",
"man_swimming_dark_skin_tone": "🏊🏿\u200d♂️",
"man_swimming_light_skin_tone": "🏊🏻\u200d♂️",
"man_swimming_medium-dark_skin_tone": "🏊🏾\u200d♂️",
"man_swimming_medium-light_skin_tone": "🏊🏼\u200d♂️",
"man_swimming_medium_skin_tone": "🏊🏽\u200d♂️",
"man_teacher": "👨\u200d🏫",
"man_teacher_dark_skin_tone": "👨🏿\u200d🏫",
"man_teacher_light_skin_tone": "👨🏻\u200d🏫",
"man_teacher_medium-dark_skin_tone": "👨🏾\u200d🏫",
"man_teacher_medium-light_skin_tone": "👨🏼\u200d🏫",
"man_teacher_medium_skin_tone": "👨🏽\u200d🏫",
"man_technologist": "👨\u200d💻",
"man_technologist_dark_skin_tone": "👨🏿\u200d💻",
"man_technologist_light_skin_tone": "👨🏻\u200d💻",
"man_technologist_medium-dark_skin_tone": "👨🏾\u200d💻",
"man_technologist_medium-light_skin_tone": "👨🏼\u200d💻",
"man_technologist_medium_skin_tone": "👨🏽\u200d💻",
"man_tipping_hand": "💁\u200d♂️",
"man_tipping_hand_dark_skin_tone": "💁🏿\u200d♂️",
"man_tipping_hand_light_skin_tone": "💁🏻\u200d♂️",
"man_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♂️",
"man_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♂️",
"man_tipping_hand_medium_skin_tone": "💁🏽\u200d♂️",
"man_vampire": "🧛\u200d♂️",
"man_vampire_dark_skin_tone": "🧛🏿\u200d♂️",
"man_vampire_light_skin_tone": "🧛🏻\u200d♂️",
"man_vampire_medium-dark_skin_tone": "🧛🏾\u200d♂️",
"man_vampire_medium-light_skin_tone": "🧛🏼\u200d♂️",
"man_vampire_medium_skin_tone": "🧛🏽\u200d♂️",
"man_walking": "🚶\u200d♂️",
"man_walking_dark_skin_tone": "🚶🏿\u200d♂️",
"man_walking_light_skin_tone": "🚶🏻\u200d♂️",
"man_walking_medium-dark_skin_tone": "🚶🏾\u200d♂️",
"man_walking_medium-light_skin_tone": "🚶🏼\u200d♂️",
"man_walking_medium_skin_tone": "🚶🏽\u200d♂️",
"man_wearing_turban": "👳\u200d♂️",
"man_wearing_turban_dark_skin_tone": "👳🏿\u200d♂️",
"man_wearing_turban_light_skin_tone": "👳🏻\u200d♂️",
"man_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♂️",
"man_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♂️",
"man_wearing_turban_medium_skin_tone": "👳🏽\u200d♂️",
"man_with_probing_cane": "👨\u200d🦯",
"man_with_chinese_cap": "👲",
"man_with_chinese_cap_dark_skin_tone": "👲🏿",
"man_with_chinese_cap_light_skin_tone": "👲🏻",
"man_with_chinese_cap_medium-dark_skin_tone": "👲🏾",
"man_with_chinese_cap_medium-light_skin_tone": "👲🏼",
"man_with_chinese_cap_medium_skin_tone": "👲🏽",
"man_zombie": "🧟\u200d♂️",
"mango": "🥭",
"mantelpiece_clock": "🕰",
"manual_wheelchair": "🦽",
"man’s_shoe": "👞",
"map_of_japan": "🗾",
"maple_leaf": "🍁",
"martial_arts_uniform": "🥋",
"mate": "🧉",
"meat_on_bone": "🍖",
"mechanical_arm": "🦾",
"mechanical_leg": "🦿",
"medical_symbol": "⚕",
"megaphone": "📣",
"melon": "🍈",
"memo": "📝",
"men_with_bunny_ears": "👯\u200d♂️",
"men_wrestling": "🤼\u200d♂️",
"menorah": "🕎",
"men’s_room": "🚹",
"mermaid": "🧜\u200d♀️",
"mermaid_dark_skin_tone": "🧜🏿\u200d♀️",
"mermaid_light_skin_tone": "🧜🏻\u200d♀️",
"mermaid_medium-dark_skin_tone": "🧜🏾\u200d♀️",
"mermaid_medium-light_skin_tone": "🧜🏼\u200d♀️",
"mermaid_medium_skin_tone": "🧜🏽\u200d♀️",
"merman": "🧜\u200d♂️",
"merman_dark_skin_tone": "🧜🏿\u200d♂️",
"merman_light_skin_tone": "🧜🏻\u200d♂️",
"merman_medium-dark_skin_tone": "🧜🏾\u200d♂️",
"merman_medium-light_skin_tone": "🧜🏼\u200d♂️",
"merman_medium_skin_tone": "🧜🏽\u200d♂️",
"merperson": "🧜",
"merperson_dark_skin_tone": "🧜🏿",
"merperson_light_skin_tone": "🧜🏻",
"merperson_medium-dark_skin_tone": "🧜🏾",
"merperson_medium-light_skin_tone": "🧜🏼",
"merperson_medium_skin_tone": "🧜🏽",
"metro": "🚇",
"microbe": "🦠",
"microphone": "🎤",
"microscope": "🔬",
"middle_finger": "🖕",
"middle_finger_dark_skin_tone": "🖕🏿",
"middle_finger_light_skin_tone": "🖕🏻",
"middle_finger_medium-dark_skin_tone": "🖕🏾",
"middle_finger_medium-light_skin_tone": "🖕🏼",
"middle_finger_medium_skin_tone": "🖕🏽",
"military_medal": "🎖",
"milky_way": "🌌",
"minibus": "🚐",
"moai": "🗿",
"mobile_phone": "📱",
"mobile_phone_off": "📴",
"mobile_phone_with_arrow": "📲",
"money-mouth_face": "🤑",
"money_bag": "💰",
"money_with_wings": "💸",
"monkey": "🐒",
"monkey_face": "🐵",
"monorail": "🚝",
"moon_cake": "🥮",
"moon_viewing_ceremony": "🎑",
"mosque": "🕌",
"mosquito": "🦟",
"motor_boat": "🛥",
"motor_scooter": "🛵",
"motorcycle": "🏍",
"motorized_wheelchair": "🦼",
"motorway": "🛣",
"mount_fuji": "🗻",
"mountain": "⛰",
"mountain_cableway": "🚠",
"mountain_railway": "🚞",
"mouse": "🐭",
"mouse_face": "🐭",
"mouth": "👄",
"movie_camera": "🎥",
"mushroom": "🍄",
"musical_keyboard": "🎹",
"musical_note": "🎵",
"musical_notes": "🎶",
"musical_score": "🎼",
"muted_speaker": "🔇",
"nail_polish": "💅",
"nail_polish_dark_skin_tone": "💅🏿",
"nail_polish_light_skin_tone": "💅🏻",
"nail_polish_medium-dark_skin_tone": "💅🏾",
"nail_polish_medium-light_skin_tone": "💅🏼",
"nail_polish_medium_skin_tone": "💅🏽",
"name_badge": "📛",
"national_park": "🏞",
"nauseated_face": "🤢",
"nazar_amulet": "🧿",
"necktie": "👔",
"nerd_face": "🤓",
"neutral_face": "😐",
"new_moon": "🌑",
"new_moon_face": "🌚",
"newspaper": "📰",
"next_track_button": "⏭",
"night_with_stars": "🌃",
"nine-thirty": "🕤",
"nine_o’clock": "🕘",
"no_bicycles": "🚳",
"no_entry": "⛔",
"no_littering": "🚯",
"no_mobile_phones": "📵",
"no_one_under_eighteen": "🔞",
"no_pedestrians": "🚷",
"no_smoking": "🚭",
"non-potable_water": "🚱",
"nose": "👃",
"nose_dark_skin_tone": "👃🏿",
"nose_light_skin_tone": "👃🏻",
"nose_medium-dark_skin_tone": "👃🏾",
"nose_medium-light_skin_tone": "👃🏼",
"nose_medium_skin_tone": "👃🏽",
"notebook": "📓",
"notebook_with_decorative_cover": "📔",
"nut_and_bolt": "🔩",
"octopus": "🐙",
"oden": "🍢",
"office_building": "🏢",
"ogre": "👹",
"oil_drum": "🛢",
"old_key": "🗝",
"old_man": "👴",
"old_man_dark_skin_tone": "👴🏿",
"old_man_light_skin_tone": "👴🏻",
"old_man_medium-dark_skin_tone": "👴🏾",
"old_man_medium-light_skin_tone": "👴🏼",
"old_man_medium_skin_tone": "👴🏽",
"old_woman": "👵",
"old_woman_dark_skin_tone": "👵🏿",
"old_woman_light_skin_tone": "👵🏻",
"old_woman_medium-dark_skin_tone": "👵🏾",
"old_woman_medium-light_skin_tone": "👵🏼",
"old_woman_medium_skin_tone": "👵🏽",
"older_adult": "🧓",
"older_adult_dark_skin_tone": "🧓🏿",
"older_adult_light_skin_tone": "🧓🏻",
"older_adult_medium-dark_skin_tone": "🧓🏾",
"older_adult_medium-light_skin_tone": "🧓🏼",
"older_adult_medium_skin_tone": "🧓🏽",
"om": "🕉",
"oncoming_automobile": "🚘",
"oncoming_bus": "🚍",
"oncoming_fist": "👊",
"oncoming_fist_dark_skin_tone": "👊🏿",
"oncoming_fist_light_skin_tone": "👊🏻",
"oncoming_fist_medium-dark_skin_tone": "👊🏾",
"oncoming_fist_medium-light_skin_tone": "👊🏼",
"oncoming_fist_medium_skin_tone": "👊🏽",
"oncoming_police_car": "🚔",
"oncoming_taxi": "🚖",
"one-piece_swimsuit": "🩱",
"one-thirty": "🕜",
"one_o’clock": "🕐",
"onion": "🧅",
"open_book": "📖",
"open_file_folder": "📂",
"open_hands": "👐",
"open_hands_dark_skin_tone": "👐🏿",
"open_hands_light_skin_tone": "👐🏻",
"open_hands_medium-dark_skin_tone": "👐🏾",
"open_hands_medium-light_skin_tone": "👐🏼",
"open_hands_medium_skin_tone": "👐🏽",
"open_mailbox_with_lowered_flag": "📭",
"open_mailbox_with_raised_flag": "📬",
"optical_disk": "💿",
"orange_book": "📙",
"orange_circle": "🟠",
"orange_heart": "🧡",
"orange_square": "🟧",
"orangutan": "🦧",
"orthodox_cross": "☦",
"otter": "🦦",
"outbox_tray": "📤",
"owl": "🦉",
"ox": "🐂",
"oyster": "🦪",
"package": "📦",
"page_facing_up": "📄",
"page_with_curl": "📃",
"pager": "📟",
"paintbrush": "🖌",
"palm_tree": "🌴",
"palms_up_together": "🤲",
"palms_up_together_dark_skin_tone": "🤲🏿",
"palms_up_together_light_skin_tone": "🤲🏻",
"palms_up_together_medium-dark_skin_tone": "🤲🏾",
"palms_up_together_medium-light_skin_tone": "🤲🏼",
"palms_up_together_medium_skin_tone": "🤲🏽",
"pancakes": "🥞",
"panda_face": "🐼",
"paperclip": "📎",
"parrot": "🦜",
"part_alternation_mark": "〽",
"party_popper": "🎉",
"partying_face": "🥳",
"passenger_ship": "🛳",
"passport_control": "🛂",
"pause_button": "⏸",
"paw_prints": "🐾",
"peace_symbol": "☮",
"peach": "🍑",
"peacock": "🦚",
"peanuts": "🥜",
"pear": "🍐",
"pen": "🖊",
"pencil": "📝",
"penguin": "🐧",
"pensive_face": "😔",
"people_holding_hands": "🧑\u200d🤝\u200d🧑",
"people_with_bunny_ears": "👯",
"people_wrestling": "🤼",
"performing_arts": "🎭",
"persevering_face": "😣",
"person_biking": "🚴",
"person_biking_dark_skin_tone": "🚴🏿",
"person_biking_light_skin_tone": "🚴🏻",
"person_biking_medium-dark_skin_tone": "🚴🏾",
"person_biking_medium-light_skin_tone": "🚴🏼",
"person_biking_medium_skin_tone": "🚴🏽",
"person_bouncing_ball": "⛹",
"person_bouncing_ball_dark_skin_tone": "⛹🏿",
"person_bouncing_ball_light_skin_tone": "⛹🏻",
"person_bouncing_ball_medium-dark_skin_tone": "⛹🏾",
"person_bouncing_ball_medium-light_skin_tone": "⛹🏼",
"person_bouncing_ball_medium_skin_tone": "⛹🏽",
"person_bowing": "🙇",
"person_bowing_dark_skin_tone": "🙇🏿",
"person_bowing_light_skin_tone": "🙇🏻",
"person_bowing_medium-dark_skin_tone": "🙇🏾",
"person_bowing_medium-light_skin_tone": "🙇🏼",
"person_bowing_medium_skin_tone": "🙇🏽",
"person_cartwheeling": "🤸",
"person_cartwheeling_dark_skin_tone": "🤸🏿",
"person_cartwheeling_light_skin_tone": "🤸🏻",
"person_cartwheeling_medium-dark_skin_tone": "🤸🏾",
"person_cartwheeling_medium-light_skin_tone": "🤸🏼",
"person_cartwheeling_medium_skin_tone": "🤸🏽",
"person_climbing": "🧗",
"person_climbing_dark_skin_tone": "🧗🏿",
"person_climbing_light_skin_tone": "🧗🏻",
"person_climbing_medium-dark_skin_tone": "🧗🏾",
"person_climbing_medium-light_skin_tone": "🧗🏼",
"person_climbing_medium_skin_tone": "🧗🏽",
"person_facepalming": "🤦",
"person_facepalming_dark_skin_tone": "🤦🏿",
"person_facepalming_light_skin_tone": "🤦🏻",
"person_facepalming_medium-dark_skin_tone": "🤦🏾",
"person_facepalming_medium-light_skin_tone": "🤦🏼",
"person_facepalming_medium_skin_tone": "🤦🏽",
"person_fencing": "🤺",
"person_frowning": "🙍",
"person_frowning_dark_skin_tone": "🙍🏿",
"person_frowning_light_skin_tone": "🙍🏻",
"person_frowning_medium-dark_skin_tone": "🙍🏾",
"person_frowning_medium-light_skin_tone": "🙍🏼",
"person_frowning_medium_skin_tone": "🙍🏽",
"person_gesturing_no": "🙅",
"person_gesturing_no_dark_skin_tone": "🙅🏿",
"person_gesturing_no_light_skin_tone": "🙅🏻",
"person_gesturing_no_medium-dark_skin_tone": "🙅🏾",
"person_gesturing_no_medium-light_skin_tone": "🙅🏼",
"person_gesturing_no_medium_skin_tone": "🙅🏽",
"person_gesturing_ok": "🙆",
"person_gesturing_ok_dark_skin_tone": "🙆🏿",
"person_gesturing_ok_light_skin_tone": "🙆🏻",
"person_gesturing_ok_medium-dark_skin_tone": "🙆🏾",
"person_gesturing_ok_medium-light_skin_tone": "🙆🏼",
"person_gesturing_ok_medium_skin_tone": "🙆🏽",
"person_getting_haircut": "💇",
"person_getting_haircut_dark_skin_tone": "💇🏿",
"person_getting_haircut_light_skin_tone": "💇🏻",
"person_getting_haircut_medium-dark_skin_tone": "💇🏾",
"person_getting_haircut_medium-light_skin_tone": "💇🏼",
"person_getting_haircut_medium_skin_tone": "💇🏽",
"person_getting_massage": "💆",
"person_getting_massage_dark_skin_tone": "💆🏿",
"person_getting_massage_light_skin_tone": "💆🏻",
"person_getting_massage_medium-dark_skin_tone": "💆🏾",
"person_getting_massage_medium-light_skin_tone": "💆🏼",
"person_getting_massage_medium_skin_tone": "💆🏽",
"person_golfing": "🏌",
"person_golfing_dark_skin_tone": "🏌🏿",
"person_golfing_light_skin_tone": "🏌🏻",
"person_golfing_medium-dark_skin_tone": "🏌🏾",
"person_golfing_medium-light_skin_tone": "🏌🏼",
"person_golfing_medium_skin_tone": "🏌🏽",
"person_in_bed": "🛌",
"person_in_bed_dark_skin_tone": "🛌🏿",
"person_in_bed_light_skin_tone": "🛌🏻",
"person_in_bed_medium-dark_skin_tone": "🛌🏾",
"person_in_bed_medium-light_skin_tone": "🛌🏼",
"person_in_bed_medium_skin_tone": "🛌🏽",
"person_in_lotus_position": "🧘",
"person_in_lotus_position_dark_skin_tone": "🧘🏿",
"person_in_lotus_position_light_skin_tone": "🧘🏻",
"person_in_lotus_position_medium-dark_skin_tone": "🧘🏾",
"person_in_lotus_position_medium-light_skin_tone": "🧘🏼",
"person_in_lotus_position_medium_skin_tone": "🧘🏽",
"person_in_steamy_room": "🧖",
"person_in_steamy_room_dark_skin_tone": "🧖🏿",
"person_in_steamy_room_light_skin_tone": "🧖🏻",
"person_in_steamy_room_medium-dark_skin_tone": "🧖🏾",
"person_in_steamy_room_medium-light_skin_tone": "🧖🏼",
"person_in_steamy_room_medium_skin_tone": "🧖🏽",
"person_juggling": "🤹",
"person_juggling_dark_skin_tone": "🤹🏿",
"person_juggling_light_skin_tone": "🤹🏻",
"person_juggling_medium-dark_skin_tone": "🤹🏾",
"person_juggling_medium-light_skin_tone": "🤹🏼",
"person_juggling_medium_skin_tone": "🤹🏽",
"person_kneeling": "🧎",
"person_lifting_weights": "🏋",
"person_lifting_weights_dark_skin_tone": "🏋🏿",
"person_lifting_weights_light_skin_tone": "🏋🏻",
"person_lifting_weights_medium-dark_skin_tone": "🏋🏾",
"person_lifting_weights_medium-light_skin_tone": "🏋🏼",
"person_lifting_weights_medium_skin_tone": "🏋🏽",
"person_mountain_biking": "🚵",
"person_mountain_biking_dark_skin_tone": "🚵🏿",
"person_mountain_biking_light_skin_tone": "🚵🏻",
"person_mountain_biking_medium-dark_skin_tone": "🚵🏾",
"person_mountain_biking_medium-light_skin_tone": "🚵🏼",
"person_mountain_biking_medium_skin_tone": "🚵🏽",
"person_playing_handball": "🤾",
"person_playing_handball_dark_skin_tone": "🤾🏿",
"person_playing_handball_light_skin_tone": "🤾🏻",
"person_playing_handball_medium-dark_skin_tone": "🤾🏾",
"person_playing_handball_medium-light_skin_tone": "🤾🏼",
"person_playing_handball_medium_skin_tone": "🤾🏽",
"person_playing_water_polo": "🤽",
"person_playing_water_polo_dark_skin_tone": "🤽🏿",
"person_playing_water_polo_light_skin_tone": "🤽🏻",
"person_playing_water_polo_medium-dark_skin_tone": "🤽🏾",
"person_playing_water_polo_medium-light_skin_tone": "🤽🏼",
"person_playing_water_polo_medium_skin_tone": "🤽🏽",
"person_pouting": "🙎",
"person_pouting_dark_skin_tone": "🙎🏿",
"person_pouting_light_skin_tone": "🙎🏻",
"person_pouting_medium-dark_skin_tone": "🙎🏾",
"person_pouting_medium-light_skin_tone": "🙎🏼",
"person_pouting_medium_skin_tone": "🙎🏽",
"person_raising_hand": "🙋",
"person_raising_hand_dark_skin_tone": "🙋🏿",
"person_raising_hand_light_skin_tone": "🙋🏻",
"person_raising_hand_medium-dark_skin_tone": "🙋🏾",
"person_raising_hand_medium-light_skin_tone": "🙋🏼",
"person_raising_hand_medium_skin_tone": "🙋🏽",
"person_rowing_boat": "🚣",
"person_rowing_boat_dark_skin_tone": "🚣🏿",
"person_rowing_boat_light_skin_tone": "🚣🏻",
"person_rowing_boat_medium-dark_skin_tone": "🚣🏾",
"person_rowing_boat_medium-light_skin_tone": "🚣🏼",
"person_rowing_boat_medium_skin_tone": "🚣🏽",
"person_running": "🏃",
"person_running_dark_skin_tone": "🏃🏿",
"person_running_light_skin_tone": "🏃🏻",
"person_running_medium-dark_skin_tone": "🏃🏾",
"person_running_medium-light_skin_tone": "🏃🏼",
"person_running_medium_skin_tone": "🏃🏽",
"person_shrugging": "🤷",
"person_shrugging_dark_skin_tone": "🤷🏿",
"person_shrugging_light_skin_tone": "🤷🏻",
"person_shrugging_medium-dark_skin_tone": "🤷🏾",
"person_shrugging_medium-light_skin_tone": "🤷🏼",
"person_shrugging_medium_skin_tone": "🤷🏽",
"person_standing": "🧍",
"person_surfing": "🏄",
"person_surfing_dark_skin_tone": "🏄🏿",
"person_surfing_light_skin_tone": "🏄🏻",
"person_surfing_medium-dark_skin_tone": "🏄🏾",
"person_surfing_medium-light_skin_tone": "🏄🏼",
"person_surfing_medium_skin_tone": "🏄🏽",
"person_swimming": "🏊",
"person_swimming_dark_skin_tone": "🏊🏿",
"person_swimming_light_skin_tone": "🏊🏻",
"person_swimming_medium-dark_skin_tone": "🏊🏾",
"person_swimming_medium-light_skin_tone": "🏊🏼",
"person_swimming_medium_skin_tone": "🏊🏽",
"person_taking_bath": "🛀",
"person_taking_bath_dark_skin_tone": "🛀🏿",
"person_taking_bath_light_skin_tone": "🛀🏻",
"person_taking_bath_medium-dark_skin_tone": "🛀🏾",
"person_taking_bath_medium-light_skin_tone": "🛀🏼",
"person_taking_bath_medium_skin_tone": "🛀🏽",
"person_tipping_hand": "💁",
"person_tipping_hand_dark_skin_tone": "💁🏿",
"person_tipping_hand_light_skin_tone": "💁🏻",
"person_tipping_hand_medium-dark_skin_tone": "💁🏾",
"person_tipping_hand_medium-light_skin_tone": "💁🏼",
"person_tipping_hand_medium_skin_tone": "💁🏽",
"person_walking": "🚶",
"person_walking_dark_skin_tone": "🚶🏿",
"person_walking_light_skin_tone": "🚶🏻",
"person_walking_medium-dark_skin_tone": "🚶🏾",
"person_walking_medium-light_skin_tone": "🚶🏼",
"person_walking_medium_skin_tone": "🚶🏽",
"person_wearing_turban": "👳",
"person_wearing_turban_dark_skin_tone": "👳🏿",
"person_wearing_turban_light_skin_tone": "👳🏻",
"person_wearing_turban_medium-dark_skin_tone": "👳🏾",
"person_wearing_turban_medium-light_skin_tone": "👳🏼",
"person_wearing_turban_medium_skin_tone": "👳🏽",
"petri_dish": "🧫",
"pick": "⛏",
"pie": "🥧",
"pig": "🐷",
"pig_face": "🐷",
"pig_nose": "🐽",
"pile_of_poo": "💩",
"pill": "💊",
"pinching_hand": "🤏",
"pine_decoration": "🎍",
"pineapple": "🍍",
"ping_pong": "🏓",
"pirate_flag": "🏴\u200d☠️",
"pistol": "🔫",
"pizza": "🍕",
"place_of_worship": "🛐",
"play_button": "▶",
"play_or_pause_button": "⏯",
"pleading_face": "🥺",
"police_car": "🚓",
"police_car_light": "🚨",
"police_officer": "👮",
"police_officer_dark_skin_tone": "👮🏿",
"police_officer_light_skin_tone": "👮🏻",
"police_officer_medium-dark_skin_tone": "👮🏾",
"police_officer_medium-light_skin_tone": "👮🏼",
"police_officer_medium_skin_tone": "👮🏽",
"poodle": "🐩",
"pool_8_ball": "🎱",
"popcorn": "🍿",
"post_office": "🏣",
"postal_horn": "📯",
"postbox": "📮",
"pot_of_food": "🍲",
"potable_water": "🚰",
"potato": "🥔",
"poultry_leg": "🍗",
"pound_banknote": "💷",
"pouting_cat_face": "😾",
"pouting_face": "😡",
"prayer_beads": "📿",
"pregnant_woman": "🤰",
"pregnant_woman_dark_skin_tone": "🤰🏿",
"pregnant_woman_light_skin_tone": "🤰🏻",
"pregnant_woman_medium-dark_skin_tone": "🤰🏾",
"pregnant_woman_medium-light_skin_tone": "🤰🏼",
"pregnant_woman_medium_skin_tone": "🤰🏽",
"pretzel": "🥨",
"probing_cane": "🦯",
"prince": "🤴",
"prince_dark_skin_tone": "🤴🏿",
"prince_light_skin_tone": "🤴🏻",
"prince_medium-dark_skin_tone": "🤴🏾",
"prince_medium-light_skin_tone": "🤴🏼",
"prince_medium_skin_tone": "🤴🏽",
"princess": "👸",
"princess_dark_skin_tone": "👸🏿",
"princess_light_skin_tone": "👸🏻",
"princess_medium-dark_skin_tone": "👸🏾",
"princess_medium-light_skin_tone": "👸🏼",
"princess_medium_skin_tone": "👸🏽",
"printer": "🖨",
"prohibited": "🚫",
"purple_circle": "🟣",
"purple_heart": "💜",
"purple_square": "🟪",
"purse": "👛",
"pushpin": "📌",
"question_mark": "❓",
"rabbit": "🐰",
"rabbit_face": "🐰",
"raccoon": "🦝",
"racing_car": "🏎",
"radio": "📻",
"radio_button": "🔘",
"radioactive": "☢",
"railway_car": "🚃",
"railway_track": "🛤",
"rainbow": "🌈",
"rainbow_flag": "🏳️\u200d🌈",
"raised_back_of_hand": "🤚",
"raised_back_of_hand_dark_skin_tone": "🤚🏿",
"raised_back_of_hand_light_skin_tone": "🤚🏻",
"raised_back_of_hand_medium-dark_skin_tone": "🤚🏾",
"raised_back_of_hand_medium-light_skin_tone": "🤚🏼",
"raised_back_of_hand_medium_skin_tone": "🤚🏽",
"raised_fist": "✊",
"raised_fist_dark_skin_tone": "✊🏿",
"raised_fist_light_skin_tone": "✊🏻",
"raised_fist_medium-dark_skin_tone": "✊🏾",
"raised_fist_medium-light_skin_tone": "✊🏼",
"raised_fist_medium_skin_tone": "✊🏽",
"raised_hand": "✋",
"raised_hand_dark_skin_tone": "✋🏿",
"raised_hand_light_skin_tone": "✋🏻",
"raised_hand_medium-dark_skin_tone": "✋🏾",
"raised_hand_medium-light_skin_tone": "✋🏼",
"raised_hand_medium_skin_tone": "✋🏽",
"raising_hands": "🙌",
"raising_hands_dark_skin_tone": "🙌🏿",
"raising_hands_light_skin_tone": "🙌🏻",
"raising_hands_medium-dark_skin_tone": "🙌🏾",
"raising_hands_medium-light_skin_tone": "🙌🏼",
"raising_hands_medium_skin_tone": "🙌🏽",
"ram": "🐏",
"rat": "🐀",
"razor": "🪒",
"ringed_planet": "🪐",
"receipt": "🧾",
"record_button": "⏺",
"recycling_symbol": "♻",
"red_apple": "🍎",
"red_circle": "🔴",
"red_envelope": "🧧",
"red_hair": "🦰",
"red-haired_man": "👨\u200d🦰",
"red-haired_woman": "👩\u200d🦰",
"red_heart": "❤",
"red_paper_lantern": "🏮",
"red_square": "🟥",
"red_triangle_pointed_down": "🔻",
"red_triangle_pointed_up": "🔺",
"registered": "®",
"relieved_face": "😌",
"reminder_ribbon": "🎗",
"repeat_button": "🔁",
"repeat_single_button": "🔂",
"rescue_worker’s_helmet": "⛑",
"restroom": "🚻",
"reverse_button": "◀",
"revolving_hearts": "💞",
"rhinoceros": "🦏",
"ribbon": "🎀",
"rice_ball": "🍙",
"rice_cracker": "🍘",
"right-facing_fist": "🤜",
"right-facing_fist_dark_skin_tone": "🤜🏿",
"right-facing_fist_light_skin_tone": "🤜🏻",
"right-facing_fist_medium-dark_skin_tone": "🤜🏾",
"right-facing_fist_medium-light_skin_tone": "🤜🏼",
"right-facing_fist_medium_skin_tone": "🤜🏽",
"right_anger_bubble": "🗯",
"right_arrow": "➡",
"right_arrow_curving_down": "⤵",
"right_arrow_curving_left": "↩",
"right_arrow_curving_up": "⤴",
"ring": "💍",
"roasted_sweet_potato": "🍠",
"robot_face": "🤖",
"rocket": "🚀",
"roll_of_paper": "🧻",
"rolled-up_newspaper": "🗞",
"roller_coaster": "🎢",
"rolling_on_the_floor_laughing": "🤣",
"rooster": "🐓",
"rose": "🌹",
"rosette": "🏵",
"round_pushpin": "📍",
"rugby_football": "🏉",
"running_shirt": "🎽",
"running_shoe": "👟",
"sad_but_relieved_face": "😥",
"safety_pin": "🧷",
"safety_vest": "🦺",
"salt": "🧂",
"sailboat": "⛵",
"sake": "🍶",
"sandwich": "🥪",
"sari": "🥻",
"satellite": "📡",
"satellite_antenna": "📡",
"sauropod": "🦕",
"saxophone": "🎷",
"scarf": "🧣",
"school": "🏫",
"school_backpack": "🎒",
"scissors": "✂",
"scorpion": "🦂",
"scroll": "📜",
"seat": "💺",
"see-no-evil_monkey": "🙈",
"seedling": "🌱",
"selfie": "🤳",
"selfie_dark_skin_tone": "🤳🏿",
"selfie_light_skin_tone": "🤳🏻",
"selfie_medium-dark_skin_tone": "🤳🏾",
"selfie_medium-light_skin_tone": "🤳🏼",
"selfie_medium_skin_tone": "🤳🏽",
"service_dog": "🐕\u200d🦺",
"seven-thirty": "🕢",
"seven_o’clock": "🕖",
"shallow_pan_of_food": "🥘",
"shamrock": "☘",
"shark": "🦈",
"shaved_ice": "🍧",
"sheaf_of_rice": "🌾",
"shield": "🛡",
"shinto_shrine": "⛩",
"ship": "🚢",
"shooting_star": "🌠",
"shopping_bags": "🛍",
"shopping_cart": "🛒",
"shortcake": "🍰",
"shorts": "🩳",
"shower": "🚿",
"shrimp": "🦐",
"shuffle_tracks_button": "🔀",
"shushing_face": "🤫",
"sign_of_the_horns": "🤘",
"sign_of_the_horns_dark_skin_tone": "🤘🏿",
"sign_of_the_horns_light_skin_tone": "🤘🏻",
"sign_of_the_horns_medium-dark_skin_tone": "🤘🏾",
"sign_of_the_horns_medium-light_skin_tone": "🤘🏼",
"sign_of_the_horns_medium_skin_tone": "🤘🏽",
"six-thirty": "🕡",
"six_o’clock": "🕕",
"skateboard": "🛹",
"skier": "⛷",
"skis": "🎿",
"skull": "💀",
"skull_and_crossbones": "☠",
"skunk": "🦨",
"sled": "🛷",
"sleeping_face": "😴",
"sleepy_face": "😪",
"slightly_frowning_face": "🙁",
"slightly_smiling_face": "🙂",
"slot_machine": "🎰",
"sloth": "🦥",
"small_airplane": "🛩",
"small_blue_diamond": "🔹",
"small_orange_diamond": "🔸",
"smiling_cat_face_with_heart-eyes": "😻",
"smiling_face": "☺",
"smiling_face_with_halo": "😇",
"smiling_face_with_3_hearts": "🥰",
"smiling_face_with_heart-eyes": "😍",
"smiling_face_with_horns": "😈",
"smiling_face_with_smiling_eyes": "😊",
"smiling_face_with_sunglasses": "😎",
"smirking_face": "😏",
"snail": "🐌",
"snake": "🐍",
"sneezing_face": "🤧",
"snow-capped_mountain": "🏔",
"snowboarder": "🏂",
"snowboarder_dark_skin_tone": "🏂🏿",
"snowboarder_light_skin_tone": "🏂🏻",
"snowboarder_medium-dark_skin_tone": "🏂🏾",
"snowboarder_medium-light_skin_tone": "🏂🏼",
"snowboarder_medium_skin_tone": "🏂🏽",
"snowflake": "❄",
"snowman": "☃",
"snowman_without_snow": "⛄",
"soap": "🧼",
"soccer_ball": "⚽",
"socks": "🧦",
"softball": "🥎",
"soft_ice_cream": "🍦",
"spade_suit": "♠",
"spaghetti": "🍝",
"sparkle": "❇",
"sparkler": "🎇",
"sparkles": "✨",
"sparkling_heart": "💖",
"speak-no-evil_monkey": "🙊",
"speaker_high_volume": "🔊",
"speaker_low_volume": "🔈",
"speaker_medium_volume": "🔉",
"speaking_head": "🗣",
"speech_balloon": "💬",
"speedboat": "🚤",
"spider": "🕷",
"spider_web": "🕸",
"spiral_calendar": "🗓",
"spiral_notepad": "🗒",
"spiral_shell": "🐚",
"spoon": "🥄",
"sponge": "🧽",
"sport_utility_vehicle": "🚙",
"sports_medal": "🏅",
"spouting_whale": "🐳",
"squid": "🦑",
"squinting_face_with_tongue": "😝",
"stadium": "🏟",
"star-struck": "🤩",
"star_and_crescent": "☪",
"star_of_david": "✡",
"station": "🚉",
"steaming_bowl": "🍜",
"stethoscope": "🩺",
"stop_button": "⏹",
"stop_sign": "🛑",
"stopwatch": "⏱",
"straight_ruler": "📏",
"strawberry": "🍓",
"studio_microphone": "🎙",
"stuffed_flatbread": "🥙",
"sun": "☀",
"sun_behind_cloud": "⛅",
"sun_behind_large_cloud": "🌥",
"sun_behind_rain_cloud": "🌦",
"sun_behind_small_cloud": "🌤",
"sun_with_face": "🌞",
"sunflower": "🌻",
"sunglasses": "😎",
"sunrise": "🌅",
"sunrise_over_mountains": "🌄",
"sunset": "🌇",
"superhero": "🦸",
"supervillain": "🦹",
"sushi": "🍣",
"suspension_railway": "🚟",
"swan": "🦢",
"sweat_droplets": "💦",
"synagogue": "🕍",
"syringe": "💉",
"t-shirt": "👕",
"taco": "🌮",
"takeout_box": "🥡",
"tanabata_tree": "🎋",
"tangerine": "🍊",
"taxi": "🚕",
"teacup_without_handle": "🍵",
"tear-off_calendar": "📆",
"teddy_bear": "🧸",
"telephone": "☎",
"telephone_receiver": "📞",
"telescope": "🔭",
"television": "📺",
"ten-thirty": "🕥",
"ten_o’clock": "🕙",
"tennis": "🎾",
"tent": "⛺",
"test_tube": "🧪",
"thermometer": "🌡",
"thinking_face": "🤔",
"thought_balloon": "💭",
"thread": "🧵",
"three-thirty": "🕞",
"three_o’clock": "🕒",
"thumbs_down": "👎",
"thumbs_down_dark_skin_tone": "👎🏿",
"thumbs_down_light_skin_tone": "👎🏻",
"thumbs_down_medium-dark_skin_tone": "👎🏾",
"thumbs_down_medium-light_skin_tone": "👎🏼",
"thumbs_down_medium_skin_tone": "👎🏽",
"thumbs_up": "👍",
"thumbs_up_dark_skin_tone": "👍🏿",
"thumbs_up_light_skin_tone": "👍🏻",
"thumbs_up_medium-dark_skin_tone": "👍🏾",
"thumbs_up_medium-light_skin_tone": "👍🏼",
"thumbs_up_medium_skin_tone": "👍🏽",
"ticket": "🎫",
"tiger": "🐯",
"tiger_face": "🐯",
"timer_clock": "⏲",
"tired_face": "😫",
"toolbox": "🧰",
"toilet": "🚽",
"tomato": "🍅",
"tongue": "👅",
"tooth": "🦷",
"top_hat": "🎩",
"tornado": "🌪",
"trackball": "🖲",
"tractor": "🚜",
"trade_mark": "™",
"train": "🚋",
"tram": "🚊",
"tram_car": "🚋",
"triangular_flag": "🚩",
"triangular_ruler": "📐",
"trident_emblem": "🔱",
"trolleybus": "🚎",
"trophy": "🏆",
"tropical_drink": "🍹",
"tropical_fish": "🐠",
"trumpet": "🎺",
"tulip": "🌷",
"tumbler_glass": "🥃",
"turtle": "🐢",
"twelve-thirty": "🕧",
"twelve_o’clock": "🕛",
"two-hump_camel": "🐫",
"two-thirty": "🕝",
"two_hearts": "💕",
"two_men_holding_hands": "👬",
"two_o’clock": "🕑",
"two_women_holding_hands": "👭",
"umbrella": "☂",
"umbrella_on_ground": "⛱",
"umbrella_with_rain_drops": "☔",
"unamused_face": "😒",
"unicorn_face": "🦄",
"unlocked": "🔓",
"up-down_arrow": "↕",
"up-left_arrow": "↖",
"up-right_arrow": "↗",
"up_arrow": "⬆",
"upside-down_face": "🙃",
"upwards_button": "🔼",
"vampire": "🧛",
"vampire_dark_skin_tone": "🧛🏿",
"vampire_light_skin_tone": "🧛🏻",
"vampire_medium-dark_skin_tone": "🧛🏾",
"vampire_medium-light_skin_tone": "🧛🏼",
"vampire_medium_skin_tone": "🧛🏽",
"vertical_traffic_light": "🚦",
"vibration_mode": "📳",
"victory_hand": "✌",
"victory_hand_dark_skin_tone": "✌🏿",
"victory_hand_light_skin_tone": "✌🏻",
"victory_hand_medium-dark_skin_tone": "✌🏾",
"victory_hand_medium-light_skin_tone": "✌🏼",
"victory_hand_medium_skin_tone": "✌🏽",
"video_camera": "📹",
"video_game": "🎮",
"videocassette": "📼",
"violin": "🎻",
"volcano": "🌋",
"volleyball": "🏐",
"vulcan_salute": "🖖",
"vulcan_salute_dark_skin_tone": "🖖🏿",
"vulcan_salute_light_skin_tone": "🖖🏻",
"vulcan_salute_medium-dark_skin_tone": "🖖🏾",
"vulcan_salute_medium-light_skin_tone": "🖖🏼",
"vulcan_salute_medium_skin_tone": "🖖🏽",
"waffle": "🧇",
"waning_crescent_moon": "🌘",
"waning_gibbous_moon": "🌖",
"warning": "⚠",
"wastebasket": "🗑",
"watch": "⌚",
"water_buffalo": "🐃",
"water_closet": "🚾",
"water_wave": "🌊",
"watermelon": "🍉",
"waving_hand": "👋",
"waving_hand_dark_skin_tone": "👋🏿",
"waving_hand_light_skin_tone": "👋🏻",
"waving_hand_medium-dark_skin_tone": "👋🏾",
"waving_hand_medium-light_skin_tone": "👋🏼",
"waving_hand_medium_skin_tone": "👋🏽",
"wavy_dash": "〰",
"waxing_crescent_moon": "🌒",
"waxing_gibbous_moon": "🌔",
"weary_cat_face": "🙀",
"weary_face": "😩",
"wedding": "💒",
"whale": "🐳",
"wheel_of_dharma": "☸",
"wheelchair_symbol": "♿",
"white_circle": "⚪",
"white_exclamation_mark": "❕",
"white_flag": "🏳",
"white_flower": "💮",
"white_hair": "🦳",
"white-haired_man": "👨\u200d🦳",
"white-haired_woman": "👩\u200d🦳",
"white_heart": "🤍",
"white_heavy_check_mark": "✅",
"white_large_square": "⬜",
"white_medium-small_square": "◽",
"white_medium_square": "◻",
"white_medium_star": "⭐",
"white_question_mark": "❔",
"white_small_square": "▫",
"white_square_button": "🔳",
"wilted_flower": "🥀",
"wind_chime": "🎐",
"wind_face": "🌬",
"wine_glass": "🍷",
"winking_face": "😉",
"winking_face_with_tongue": "😜",
"wolf_face": "🐺",
"woman": "👩",
"woman_artist": "👩\u200d🎨",
"woman_artist_dark_skin_tone": "👩🏿\u200d🎨",
"woman_artist_light_skin_tone": "👩🏻\u200d🎨",
"woman_artist_medium-dark_skin_tone": "👩🏾\u200d🎨",
"woman_artist_medium-light_skin_tone": "👩🏼\u200d🎨",
"woman_artist_medium_skin_tone": "👩🏽\u200d🎨",
"woman_astronaut": "👩\u200d🚀",
"woman_astronaut_dark_skin_tone": "👩🏿\u200d🚀",
"woman_astronaut_light_skin_tone": "👩🏻\u200d🚀",
"woman_astronaut_medium-dark_skin_tone": "👩🏾\u200d🚀",
"woman_astronaut_medium-light_skin_tone": "👩🏼\u200d🚀",
"woman_astronaut_medium_skin_tone": "👩🏽\u200d🚀",
"woman_biking": "🚴\u200d♀️",
"woman_biking_dark_skin_tone": "🚴🏿\u200d♀️",
"woman_biking_light_skin_tone": "🚴🏻\u200d♀️",
"woman_biking_medium-dark_skin_tone": "🚴🏾\u200d♀️",
"woman_biking_medium-light_skin_tone": "🚴🏼\u200d♀️",
"woman_biking_medium_skin_tone": "🚴🏽\u200d♀️",
"woman_bouncing_ball": "⛹️\u200d♀️",
"woman_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♀️",
"woman_bouncing_ball_light_skin_tone": "⛹🏻\u200d♀️",
"woman_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♀️",
"woman_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♀️",
"woman_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♀️",
"woman_bowing": "🙇\u200d♀️",
"woman_bowing_dark_skin_tone": "🙇🏿\u200d♀️",
"woman_bowing_light_skin_tone": "🙇🏻\u200d♀️",
"woman_bowing_medium-dark_skin_tone": "🙇🏾\u200d♀️",
"woman_bowing_medium-light_skin_tone": "🙇🏼\u200d♀️",
"woman_bowing_medium_skin_tone": "🙇🏽\u200d♀️",
"woman_cartwheeling": "🤸\u200d♀️",
"woman_cartwheeling_dark_skin_tone": "🤸🏿\u200d♀️",
"woman_cartwheeling_light_skin_tone": "🤸🏻\u200d♀️",
"woman_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♀️",
"woman_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♀️",
"woman_cartwheeling_medium_skin_tone": "🤸🏽\u200d♀️",
"woman_climbing": "🧗\u200d♀️",
"woman_climbing_dark_skin_tone": "🧗🏿\u200d♀️",
"woman_climbing_light_skin_tone": "🧗🏻\u200d♀️",
"woman_climbing_medium-dark_skin_tone": "🧗🏾\u200d♀️",
"woman_climbing_medium-light_skin_tone": "🧗🏼\u200d♀️",
"woman_climbing_medium_skin_tone": "🧗🏽\u200d♀️",
"woman_construction_worker": "👷\u200d♀️",
"woman_construction_worker_dark_skin_tone": "👷🏿\u200d♀️",
"woman_construction_worker_light_skin_tone": "👷🏻\u200d♀️",
"woman_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♀️",
"woman_construction_worker_medium-light_skin_tone": "👷🏼\u200d♀️",
"woman_construction_worker_medium_skin_tone": "👷🏽\u200d♀️",
"woman_cook": "👩\u200d🍳",
"woman_cook_dark_skin_tone": "👩🏿\u200d🍳",
"woman_cook_light_skin_tone": "👩🏻\u200d🍳",
"woman_cook_medium-dark_skin_tone": "👩🏾\u200d🍳",
"woman_cook_medium-light_skin_tone": "👩🏼\u200d🍳",
"woman_cook_medium_skin_tone": "👩🏽\u200d🍳",
"woman_dancing": "💃",
"woman_dancing_dark_skin_tone": "💃🏿",
"woman_dancing_light_skin_tone": "💃🏻",
"woman_dancing_medium-dark_skin_tone": "💃🏾",
"woman_dancing_medium-light_skin_tone": "💃🏼",
"woman_dancing_medium_skin_tone": "💃🏽",
"woman_dark_skin_tone": "👩🏿",
"woman_detective": "🕵️\u200d♀️",
"woman_detective_dark_skin_tone": "🕵🏿\u200d♀️",
"woman_detective_light_skin_tone": "🕵🏻\u200d♀️",
"woman_detective_medium-dark_skin_tone": "🕵🏾\u200d♀️",
"woman_detective_medium-light_skin_tone": "🕵🏼\u200d♀️",
"woman_detective_medium_skin_tone": "🕵🏽\u200d♀️",
"woman_elf": "🧝\u200d♀️",
"woman_elf_dark_skin_tone": "🧝🏿\u200d♀️",
"woman_elf_light_skin_tone": "🧝🏻\u200d♀️",
"woman_elf_medium-dark_skin_tone": "🧝🏾\u200d♀️",
"woman_elf_medium-light_skin_tone": "🧝🏼\u200d♀️",
"woman_elf_medium_skin_tone": "🧝🏽\u200d♀️",
"woman_facepalming": "🤦\u200d♀️",
"woman_facepalming_dark_skin_tone": "🤦🏿\u200d♀️",
"woman_facepalming_light_skin_tone": "🤦🏻\u200d♀️",
"woman_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♀️",
"woman_facepalming_medium-light_skin_tone": "🤦🏼\u200d♀️",
"woman_facepalming_medium_skin_tone": "🤦🏽\u200d♀️",
"woman_factory_worker": "👩\u200d🏭",
"woman_factory_worker_dark_skin_tone": "👩🏿\u200d🏭",
"woman_factory_worker_light_skin_tone": "👩🏻\u200d🏭",
"woman_factory_worker_medium-dark_skin_tone": "👩🏾\u200d🏭",
"woman_factory_worker_medium-light_skin_tone": "👩🏼\u200d🏭",
"woman_factory_worker_medium_skin_tone": "👩🏽\u200d🏭",
"woman_fairy": "🧚\u200d♀️",
"woman_fairy_dark_skin_tone": "🧚🏿\u200d♀️",
"woman_fairy_light_skin_tone": "🧚🏻\u200d♀️",
"woman_fairy_medium-dark_skin_tone": "🧚🏾\u200d♀️",
"woman_fairy_medium-light_skin_tone": "🧚🏼\u200d♀️",
"woman_fairy_medium_skin_tone": "🧚🏽\u200d♀️",
"woman_farmer": "👩\u200d🌾",
"woman_farmer_dark_skin_tone": "👩🏿\u200d🌾",
"woman_farmer_light_skin_tone": "👩🏻\u200d🌾",
"woman_farmer_medium-dark_skin_tone": "👩🏾\u200d🌾",
"woman_farmer_medium-light_skin_tone": "👩🏼\u200d🌾",
"woman_farmer_medium_skin_tone": "👩🏽\u200d🌾",
"woman_firefighter": "👩\u200d🚒",
"woman_firefighter_dark_skin_tone": "👩🏿\u200d🚒",
"woman_firefighter_light_skin_tone": "👩🏻\u200d🚒",
"woman_firefighter_medium-dark_skin_tone": "👩🏾\u200d🚒",
"woman_firefighter_medium-light_skin_tone": "👩🏼\u200d🚒",
"woman_firefighter_medium_skin_tone": "👩🏽\u200d🚒",
"woman_frowning": "🙍\u200d♀️",
"woman_frowning_dark_skin_tone": "🙍🏿\u200d♀️",
"woman_frowning_light_skin_tone": "🙍🏻\u200d♀️",
"woman_frowning_medium-dark_skin_tone": "🙍🏾\u200d♀️",
"woman_frowning_medium-light_skin_tone": "🙍🏼\u200d♀️",
"woman_frowning_medium_skin_tone": "🙍🏽\u200d♀️",
"woman_genie": "🧞\u200d♀️",
"woman_gesturing_no": "🙅\u200d♀️",
"woman_gesturing_no_dark_skin_tone": "🙅🏿\u200d♀️",
"woman_gesturing_no_light_skin_tone": "🙅🏻\u200d♀️",
"woman_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♀️",
"woman_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♀️",
"woman_gesturing_no_medium_skin_tone": "🙅🏽\u200d♀️",
"woman_gesturing_ok": "🙆\u200d♀️",
"woman_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♀️",
"woman_gesturing_ok_light_skin_tone": "🙆🏻\u200d♀️",
"woman_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♀️",
"woman_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♀️",
"woman_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♀️",
"woman_getting_haircut": "💇\u200d♀️",
"woman_getting_haircut_dark_skin_tone": "💇🏿\u200d♀️",
"woman_getting_haircut_light_skin_tone": "💇🏻\u200d♀️",
"woman_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♀️",
"woman_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♀️",
"woman_getting_haircut_medium_skin_tone": "💇🏽\u200d♀️",
"woman_getting_massage": "💆\u200d♀️",
"woman_getting_massage_dark_skin_tone": "💆🏿\u200d♀️",
"woman_getting_massage_light_skin_tone": "💆🏻\u200d♀️",
"woman_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♀️",
"woman_getting_massage_medium-light_skin_tone": "💆🏼\u200d♀️",
"woman_getting_massage_medium_skin_tone": "💆🏽\u200d♀️",
"woman_golfing": "🏌️\u200d♀️",
"woman_golfing_dark_skin_tone": "🏌🏿\u200d♀️",
"woman_golfing_light_skin_tone": "🏌🏻\u200d♀️",
"woman_golfing_medium-dark_skin_tone": "🏌🏾\u200d♀️",
"woman_golfing_medium-light_skin_tone": "🏌🏼\u200d♀️",
"woman_golfing_medium_skin_tone": "🏌🏽\u200d♀️",
"woman_guard": "💂\u200d♀️",
"woman_guard_dark_skin_tone": "💂🏿\u200d♀️",
"woman_guard_light_skin_tone": "💂🏻\u200d♀️",
"woman_guard_medium-dark_skin_tone": "💂🏾\u200d♀️",
"woman_guard_medium-light_skin_tone": "💂🏼\u200d♀️",
"woman_guard_medium_skin_tone": "💂🏽\u200d♀️",
"woman_health_worker": "👩\u200d⚕️",
"woman_health_worker_dark_skin_tone": "👩🏿\u200d⚕️",
"woman_health_worker_light_skin_tone": "👩🏻\u200d⚕️",
"woman_health_worker_medium-dark_skin_tone": "👩🏾\u200d⚕️",
"woman_health_worker_medium-light_skin_tone": "👩🏼\u200d⚕️",
"woman_health_worker_medium_skin_tone": "👩🏽\u200d⚕️",
"woman_in_lotus_position": "🧘\u200d♀️",
"woman_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♀️",
"woman_in_lotus_position_light_skin_tone": "🧘🏻\u200d♀️",
"woman_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♀️",
"woman_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♀️",
"woman_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♀️",
"woman_in_manual_wheelchair": "👩\u200d🦽",
"woman_in_motorized_wheelchair": "👩\u200d🦼",
"woman_in_steamy_room": "🧖\u200d♀️",
"woman_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♀️",
"woman_in_steamy_room_light_skin_tone": "🧖🏻\u200d♀️",
"woman_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♀️",
"woman_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♀️",
"woman_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♀️",
"woman_judge": "👩\u200d⚖️",
"woman_judge_dark_skin_tone": "👩🏿\u200d⚖️",
"woman_judge_light_skin_tone": "👩🏻\u200d⚖️",
"woman_judge_medium-dark_skin_tone": "👩🏾\u200d⚖️",
"woman_judge_medium-light_skin_tone": "👩🏼\u200d⚖️",
"woman_judge_medium_skin_tone": "👩🏽\u200d⚖️",
"woman_juggling": "🤹\u200d♀️",
"woman_juggling_dark_skin_tone": "🤹🏿\u200d♀️",
"woman_juggling_light_skin_tone": "🤹🏻\u200d♀️",
"woman_juggling_medium-dark_skin_tone": "🤹🏾\u200d♀️",
"woman_juggling_medium-light_skin_tone": "🤹🏼\u200d♀️",
"woman_juggling_medium_skin_tone": "🤹🏽\u200d♀️",
"woman_lifting_weights": "🏋️\u200d♀️",
"woman_lifting_weights_dark_skin_tone": "🏋🏿\u200d♀️",
"woman_lifting_weights_light_skin_tone": "🏋🏻\u200d♀️",
"woman_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♀️",
"woman_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♀️",
"woman_lifting_weights_medium_skin_tone": "🏋🏽\u200d♀️",
"woman_light_skin_tone": "👩🏻",
"woman_mage": "🧙\u200d♀️",
"woman_mage_dark_skin_tone": "🧙🏿\u200d♀️",
"woman_mage_light_skin_tone": "🧙🏻\u200d♀️",
"woman_mage_medium-dark_skin_tone": "🧙🏾\u200d♀️",
"woman_mage_medium-light_skin_tone": "🧙🏼\u200d♀️",
"woman_mage_medium_skin_tone": "🧙🏽\u200d♀️",
"woman_mechanic": "👩\u200d🔧",
"woman_mechanic_dark_skin_tone": "👩🏿\u200d🔧",
"woman_mechanic_light_skin_tone": "👩🏻\u200d🔧",
"woman_mechanic_medium-dark_skin_tone": "👩🏾\u200d🔧",
"woman_mechanic_medium-light_skin_tone": "👩🏼\u200d🔧",
"woman_mechanic_medium_skin_tone": "👩🏽\u200d🔧",
"woman_medium-dark_skin_tone": "👩🏾",
"woman_medium-light_skin_tone": "👩🏼",
"woman_medium_skin_tone": "👩🏽",
"woman_mountain_biking": "🚵\u200d♀️",
"woman_mountain_biking_dark_skin_tone": "🚵🏿\u200d♀️",
"woman_mountain_biking_light_skin_tone": "🚵🏻\u200d♀️",
"woman_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♀️",
"woman_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♀️",
"woman_mountain_biking_medium_skin_tone": "🚵🏽\u200d♀️",
"woman_office_worker": "👩\u200d💼",
"woman_office_worker_dark_skin_tone": "👩🏿\u200d💼",
"woman_office_worker_light_skin_tone": "👩🏻\u200d💼",
"woman_office_worker_medium-dark_skin_tone": "👩🏾\u200d💼",
"woman_office_worker_medium-light_skin_tone": "👩🏼\u200d💼",
"woman_office_worker_medium_skin_tone": "👩🏽\u200d💼",
"woman_pilot": "👩\u200d✈️",
"woman_pilot_dark_skin_tone": "👩🏿\u200d✈️",
"woman_pilot_light_skin_tone": "👩🏻\u200d✈️",
"woman_pilot_medium-dark_skin_tone": "👩🏾\u200d✈️",
"woman_pilot_medium-light_skin_tone": "👩🏼\u200d✈️",
"woman_pilot_medium_skin_tone": "👩🏽\u200d✈️",
"woman_playing_handball": "🤾\u200d♀️",
"woman_playing_handball_dark_skin_tone": "🤾🏿\u200d♀️",
"woman_playing_handball_light_skin_tone": "🤾🏻\u200d♀️",
"woman_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♀️",
"woman_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♀️",
"woman_playing_handball_medium_skin_tone": "🤾🏽\u200d♀️",
"woman_playing_water_polo": "🤽\u200d♀️",
"woman_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♀️",
"woman_playing_water_polo_light_skin_tone": "🤽🏻\u200d♀️",
"woman_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♀️",
"woman_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♀️",
"woman_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♀️",
"woman_police_officer": "👮\u200d♀️",
"woman_police_officer_dark_skin_tone": "👮🏿\u200d♀️",
"woman_police_officer_light_skin_tone": "👮🏻\u200d♀️",
"woman_police_officer_medium-dark_skin_tone": "👮🏾\u200d♀️",
"woman_police_officer_medium-light_skin_tone": "👮🏼\u200d♀️",
"woman_police_officer_medium_skin_tone": "👮🏽\u200d♀️",
"woman_pouting": "🙎\u200d♀️",
"woman_pouting_dark_skin_tone": "🙎🏿\u200d♀️",
"woman_pouting_light_skin_tone": "🙎🏻\u200d♀️",
"woman_pouting_medium-dark_skin_tone": "🙎🏾\u200d♀️",
"woman_pouting_medium-light_skin_tone": "🙎🏼\u200d♀️",
"woman_pouting_medium_skin_tone": "🙎🏽\u200d♀️",
"woman_raising_hand": "🙋\u200d♀️",
"woman_raising_hand_dark_skin_tone": "🙋🏿\u200d♀️",
"woman_raising_hand_light_skin_tone": "🙋🏻\u200d♀️",
"woman_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♀️",
"woman_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♀️",
"woman_raising_hand_medium_skin_tone": "🙋🏽\u200d♀️",
"woman_rowing_boat": "🚣\u200d♀️",
"woman_rowing_boat_dark_skin_tone": "🚣🏿\u200d♀️",
"woman_rowing_boat_light_skin_tone": "🚣🏻\u200d♀️",
"woman_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♀️",
"woman_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♀️",
"woman_rowing_boat_medium_skin_tone": "🚣🏽\u200d♀️",
"woman_running": "🏃\u200d♀️",
"woman_running_dark_skin_tone": "🏃🏿\u200d♀️",
"woman_running_light_skin_tone": "🏃🏻\u200d♀️",
"woman_running_medium-dark_skin_tone": "🏃🏾\u200d♀️",
"woman_running_medium-light_skin_tone": "🏃🏼\u200d♀️",
"woman_running_medium_skin_tone": "🏃🏽\u200d♀️",
"woman_scientist": "👩\u200d🔬",
"woman_scientist_dark_skin_tone": "👩🏿\u200d🔬",
"woman_scientist_light_skin_tone": "👩🏻\u200d🔬",
"woman_scientist_medium-dark_skin_tone": "👩🏾\u200d🔬",
"woman_scientist_medium-light_skin_tone": "👩🏼\u200d🔬",
"woman_scientist_medium_skin_tone": "👩🏽\u200d🔬",
"woman_shrugging": "🤷\u200d♀️",
"woman_shrugging_dark_skin_tone": "🤷🏿\u200d♀️",
"woman_shrugging_light_skin_tone": "🤷🏻\u200d♀️",
"woman_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♀️",
"woman_shrugging_medium-light_skin_tone": "🤷🏼\u200d♀️",
"woman_shrugging_medium_skin_tone": "🤷🏽\u200d♀️",
"woman_singer": "👩\u200d🎤",
"woman_singer_dark_skin_tone": "👩🏿\u200d🎤",
"woman_singer_light_skin_tone": "👩🏻\u200d🎤",
"woman_singer_medium-dark_skin_tone": "👩🏾\u200d🎤",
"woman_singer_medium-light_skin_tone": "👩🏼\u200d🎤",
"woman_singer_medium_skin_tone": "👩🏽\u200d🎤",
"woman_student": "👩\u200d🎓",
"woman_student_dark_skin_tone": "👩🏿\u200d🎓",
"woman_student_light_skin_tone": "👩🏻\u200d🎓",
"woman_student_medium-dark_skin_tone": "👩🏾\u200d🎓",
"woman_student_medium-light_skin_tone": "👩🏼\u200d🎓",
"woman_student_medium_skin_tone": "👩🏽\u200d🎓",
"woman_surfing": "🏄\u200d♀️",
"woman_surfing_dark_skin_tone": "🏄🏿\u200d♀️",
"woman_surfing_light_skin_tone": "🏄🏻\u200d♀️",
"woman_surfing_medium-dark_skin_tone": "🏄🏾\u200d♀️",
"woman_surfing_medium-light_skin_tone": "🏄🏼\u200d♀️",
"woman_surfing_medium_skin_tone": "🏄🏽\u200d♀️",
"woman_swimming": "🏊\u200d♀️",
"woman_swimming_dark_skin_tone": "🏊🏿\u200d♀️",
"woman_swimming_light_skin_tone": "🏊🏻\u200d♀️",
"woman_swimming_medium-dark_skin_tone": "🏊🏾\u200d♀️",
"woman_swimming_medium-light_skin_tone": "🏊🏼\u200d♀️",
"woman_swimming_medium_skin_tone": "🏊🏽\u200d♀️",
"woman_teacher": "👩\u200d🏫",
"woman_teacher_dark_skin_tone": "👩🏿\u200d🏫",
"woman_teacher_light_skin_tone": "👩🏻\u200d🏫",
"woman_teacher_medium-dark_skin_tone": "👩🏾\u200d🏫",
"woman_teacher_medium-light_skin_tone": "👩🏼\u200d🏫",
"woman_teacher_medium_skin_tone": "👩🏽\u200d🏫",
"woman_technologist": "👩\u200d💻",
"woman_technologist_dark_skin_tone": "👩🏿\u200d💻",
"woman_technologist_light_skin_tone": "👩🏻\u200d💻",
"woman_technologist_medium-dark_skin_tone": "👩🏾\u200d💻",
"woman_technologist_medium-light_skin_tone": "👩🏼\u200d💻",
"woman_technologist_medium_skin_tone": "👩🏽\u200d💻",
"woman_tipping_hand": "💁\u200d♀️",
"woman_tipping_hand_dark_skin_tone": "💁🏿\u200d♀️",
"woman_tipping_hand_light_skin_tone": "💁🏻\u200d♀️",
"woman_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♀️",
"woman_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♀️",
"woman_tipping_hand_medium_skin_tone": "💁🏽\u200d♀️",
"woman_vampire": "🧛\u200d♀️",
"woman_vampire_dark_skin_tone": "🧛🏿\u200d♀️",
"woman_vampire_light_skin_tone": "🧛🏻\u200d♀️",
"woman_vampire_medium-dark_skin_tone": "🧛🏾\u200d♀️",
"woman_vampire_medium-light_skin_tone": "🧛🏼\u200d♀️",
"woman_vampire_medium_skin_tone": "🧛🏽\u200d♀️",
"woman_walking": "🚶\u200d♀️",
"woman_walking_dark_skin_tone": "🚶🏿\u200d♀️",
"woman_walking_light_skin_tone": "🚶🏻\u200d♀️",
"woman_walking_medium-dark_skin_tone": "🚶🏾\u200d♀️",
"woman_walking_medium-light_skin_tone": "🚶🏼\u200d♀️",
"woman_walking_medium_skin_tone": "🚶🏽\u200d♀️",
"woman_wearing_turban": "👳\u200d♀️",
"woman_wearing_turban_dark_skin_tone": "👳🏿\u200d♀️",
"woman_wearing_turban_light_skin_tone": "👳🏻\u200d♀️",
"woman_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♀️",
"woman_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♀️",
"woman_wearing_turban_medium_skin_tone": "👳🏽\u200d♀️",
"woman_with_headscarf": "🧕",
"woman_with_headscarf_dark_skin_tone": "🧕🏿",
"woman_with_headscarf_light_skin_tone": "🧕🏻",
"woman_with_headscarf_medium-dark_skin_tone": "🧕🏾",
"woman_with_headscarf_medium-light_skin_tone": "🧕🏼",
"woman_with_headscarf_medium_skin_tone": "🧕🏽",
"woman_with_probing_cane": "👩\u200d🦯",
"woman_zombie": "🧟\u200d♀️",
"woman’s_boot": "👢",
"woman’s_clothes": "👚",
"woman’s_hat": "👒",
"woman’s_sandal": "👡",
"women_with_bunny_ears": "👯\u200d♀️",
"women_wrestling": "🤼\u200d♀️",
"women’s_room": "🚺",
"woozy_face": "🥴",
"world_map": "🗺",
"worried_face": "😟",
"wrapped_gift": "🎁",
"wrench": "🔧",
"writing_hand": "✍",
"writing_hand_dark_skin_tone": "✍🏿",
"writing_hand_light_skin_tone": "✍🏻",
"writing_hand_medium-dark_skin_tone": "✍🏾",
"writing_hand_medium-light_skin_tone": "✍🏼",
"writing_hand_medium_skin_tone": "✍🏽",
"yarn": "🧶",
"yawning_face": "🥱",
"yellow_circle": "🟡",
"yellow_heart": "💛",
"yellow_square": "🟨",
"yen_banknote": "💴",
"yo-yo": "🪀",
"yin_yang": "☯",
"zany_face": "🤪",
"zebra": "🦓",
"zipper-mouth_face": "🤐",
"zombie": "🧟",
"zzz": "💤",
"åland_islands": "🇦🇽",
"keycap_asterisk": "*⃣",
"keycap_digit_eight": "8⃣",
"keycap_digit_five": "5⃣",
"keycap_digit_four": "4⃣",
"keycap_digit_nine": "9⃣",
"keycap_digit_one": "1⃣",
"keycap_digit_seven": "7⃣",
"keycap_digit_six": "6⃣",
"keycap_digit_three": "3⃣",
"keycap_digit_two": "2⃣",
"keycap_digit_zero": "0⃣",
"keycap_number_sign": "#⃣",
"light_skin_tone": "🏻",
"medium_light_skin_tone": "🏼",
"medium_skin_tone": "🏽",
"medium_dark_skin_tone": "🏾",
"dark_skin_tone": "🏿",
"regional_indicator_symbol_letter_a": "🇦",
"regional_indicator_symbol_letter_b": "🇧",
"regional_indicator_symbol_letter_c": "🇨",
"regional_indicator_symbol_letter_d": "🇩",
"regional_indicator_symbol_letter_e": "🇪",
"regional_indicator_symbol_letter_f": "🇫",
"regional_indicator_symbol_letter_g": "🇬",
"regional_indicator_symbol_letter_h": "🇭",
"regional_indicator_symbol_letter_i": "🇮",
"regional_indicator_symbol_letter_j": "🇯",
"regional_indicator_symbol_letter_k": "🇰",
"regional_indicator_symbol_letter_l": "🇱",
"regional_indicator_symbol_letter_m": "🇲",
"regional_indicator_symbol_letter_n": "🇳",
"regional_indicator_symbol_letter_o": "🇴",
"regional_indicator_symbol_letter_p": "🇵",
"regional_indicator_symbol_letter_q": "🇶",
"regional_indicator_symbol_letter_r": "🇷",
"regional_indicator_symbol_letter_s": "🇸",
"regional_indicator_symbol_letter_t": "🇹",
"regional_indicator_symbol_letter_u": "🇺",
"regional_indicator_symbol_letter_v": "🇻",
"regional_indicator_symbol_letter_w": "🇼",
"regional_indicator_symbol_letter_x": "🇽",
"regional_indicator_symbol_letter_y": "🇾",
"regional_indicator_symbol_letter_z": "🇿",
"airplane_arriving": "🛬",
"space_invader": "👾",
"football": "🏈",
"anger": "💢",
"angry": "😠",
"anguished": "😧",
"signal_strength": "📶",
"arrows_counterclockwise": "🔄",
"arrow_heading_down": "⤵",
"arrow_heading_up": "⤴",
"art": "🎨",
"astonished": "😲",
"athletic_shoe": "👟",
"atm": "🏧",
"car": "🚗",
"red_car": "🚗",
"angel": "👼",
"back": "🔙",
"badminton_racquet_and_shuttlecock": "🏸",
"dollar": "💵",
"euro": "💶",
"pound": "💷",
"yen": "💴",
"barber": "💈",
"bath": "🛀",
"bear": "🐻",
"heartbeat": "💓",
"beer": "🍺",
"no_bell": "🔕",
"bento": "🍱",
"bike": "🚲",
"bicyclist": "🚴",
"8ball": "🎱",
"biohazard_sign": "☣",
"birthday": "🎂",
"black_circle_for_record": "⏺",
"clubs": "♣",
"diamonds": "♦",
"arrow_double_down": "⏬",
"hearts": "♥",
"rewind": "⏪",
"black_left__pointing_double_triangle_with_vertical_bar": "⏮",
"arrow_backward": "◀",
"black_medium_small_square": "◾",
"question": "❓",
"fast_forward": "⏩",
"black_right__pointing_double_triangle_with_vertical_bar": "⏭",
"arrow_forward": "▶",
"black_right__pointing_triangle_with_double_vertical_bar": "⏯",
"arrow_right": "➡",
"spades": "♠",
"black_square_for_stop": "⏹",
"sunny": "☀",
"phone": "☎",
"recycle": "♻",
"arrow_double_up": "⏫",
"busstop": "🚏",
"date": "📅",
"flags": "🎏",
"cat2": "🐈",
"joy_cat": "😹",
"smirk_cat": "😼",
"chart_with_downwards_trend": "📉",
"chart_with_upwards_trend": "📈",
"chart": "💹",
"mega": "📣",
"checkered_flag": "🏁",
"accept": "🉑",
"ideograph_advantage": "🉐",
"congratulations": "㊗",
"secret": "㊙",
"m": "Ⓜ",
"city_sunset": "🌆",
"clapper": "🎬",
"clap": "👏",
"beers": "🍻",
"clock830": "🕣",
"clock8": "🕗",
"clock1130": "🕦",
"clock11": "🕚",
"clock530": "🕠",
"clock5": "🕔",
"clock430": "🕟",
"clock4": "🕓",
"clock930": "🕤",
"clock9": "🕘",
"clock130": "🕜",
"clock1": "🕐",
"clock730": "🕢",
"clock7": "🕖",
"clock630": "🕡",
"clock6": "🕕",
"clock1030": "🕥",
"clock10": "🕙",
"clock330": "🕞",
"clock3": "🕒",
"clock1230": "🕧",
"clock12": "🕛",
"clock230": "🕝",
"clock2": "🕑",
"arrows_clockwise": "🔃",
"repeat": "🔁",
"repeat_one": "🔂",
"closed_lock_with_key": "🔐",
"mailbox_closed": "📪",
"mailbox": "📫",
"cloud_with_tornado": "🌪",
"cocktail": "🍸",
"boom": "💥",
"compression": "🗜",
"confounded": "😖",
"confused": "😕",
"rice": "🍚",
"cow2": "🐄",
"cricket_bat_and_ball": "🏏",
"x": "❌",
"cry": "😢",
"curry": "🍛",
"dagger_knife": "🗡",
"dancer": "💃",
"dark_sunglasses": "🕶",
"dash": "💨",
"truck": "🚚",
"derelict_house_building": "🏚",
"diamond_shape_with_a_dot_inside": "💠",
"dart": "🎯",
"disappointed_relieved": "😥",
"disappointed": "😞",
"do_not_litter": "🚯",
"dog2": "🐕",
"flipper": "🐬",
"loop": "➿",
"bangbang": "‼",
"double_vertical_bar": "⏸",
"dove_of_peace": "🕊",
"small_red_triangle_down": "🔻",
"arrow_down_small": "🔽",
"arrow_down": "⬇",
"dromedary_camel": "🐪",
"e__mail": "📧",
"corn": "🌽",
"ear_of_rice": "🌾",
"earth_americas": "🌎",
"earth_asia": "🌏",
"earth_africa": "🌍",
"eight_pointed_black_star": "✴",
"eight_spoked_asterisk": "✳",
"eject_symbol": "⏏",
"bulb": "💡",
"emoji_modifier_fitzpatrick_type__1__2": "🏻",
"emoji_modifier_fitzpatrick_type__3": "🏼",
"emoji_modifier_fitzpatrick_type__4": "🏽",
"emoji_modifier_fitzpatrick_type__5": "🏾",
"emoji_modifier_fitzpatrick_type__6": "🏿",
"end": "🔚",
"email": "✉",
"european_castle": "🏰",
"european_post_office": "🏤",
"interrobang": "⁉",
"expressionless": "😑",
"eyeglasses": "👓",
"massage": "💆",
"yum": "😋",
"scream": "😱",
"kissing_heart": "😘",
"sweat": "😓",
"face_with_head__bandage": "🤕",
"triumph": "😤",
"mask": "😷",
"no_good": "🙅",
"ok_woman": "🙆",
"open_mouth": "😮",
"cold_sweat": "😰",
"stuck_out_tongue": "😛",
"stuck_out_tongue_closed_eyes": "😝",
"stuck_out_tongue_winking_eye": "😜",
"joy": "😂",
"no_mouth": "😶",
"santa": "🎅",
"fax": "📠",
"fearful": "😨",
"field_hockey_stick_and_ball": "🏑",
"first_quarter_moon_with_face": "🌛",
"fish_cake": "🍥",
"fishing_pole_and_fish": "🎣",
"facepunch": "👊",
"punch": "👊",
"flag_for_afghanistan": "🇦🇫",
"flag_for_albania": "🇦🇱",
"flag_for_algeria": "🇩🇿",
"flag_for_american_samoa": "🇦🇸",
"flag_for_andorra": "🇦🇩",
"flag_for_angola": "🇦🇴",
"flag_for_anguilla": "🇦🇮",
"flag_for_antarctica": "🇦🇶",
"flag_for_antigua_&_barbuda": "🇦🇬",
"flag_for_argentina": "🇦🇷",
"flag_for_armenia": "🇦🇲",
"flag_for_aruba": "🇦🇼",
"flag_for_ascension_island": "🇦🇨",
"flag_for_australia": "🇦🇺",
"flag_for_austria": "🇦🇹",
"flag_for_azerbaijan": "🇦🇿",
"flag_for_bahamas": "🇧🇸",
"flag_for_bahrain": "🇧🇭",
"flag_for_bangladesh": "🇧🇩",
"flag_for_barbados": "🇧🇧",
"flag_for_belarus": "🇧🇾",
"flag_for_belgium": "🇧🇪",
"flag_for_belize": "🇧🇿",
"flag_for_benin": "🇧🇯",
"flag_for_bermuda": "🇧🇲",
"flag_for_bhutan": "🇧🇹",
"flag_for_bolivia": "🇧🇴",
"flag_for_bosnia_&_herzegovina": "🇧🇦",
"flag_for_botswana": "🇧🇼",
"flag_for_bouvet_island": "🇧🇻",
"flag_for_brazil": "🇧🇷",
"flag_for_british_indian_ocean_territory": "🇮🇴",
"flag_for_british_virgin_islands": "🇻🇬",
"flag_for_brunei": "🇧🇳",
"flag_for_bulgaria": "🇧🇬",
"flag_for_burkina_faso": "🇧🇫",
"flag_for_burundi": "🇧🇮",
"flag_for_cambodia": "🇰🇭",
"flag_for_cameroon": "🇨🇲",
"flag_for_canada": "🇨🇦",
"flag_for_canary_islands": "🇮🇨",
"flag_for_cape_verde": "🇨🇻",
"flag_for_caribbean_netherlands": "🇧🇶",
"flag_for_cayman_islands": "🇰🇾",
"flag_for_central_african_republic": "🇨🇫",
"flag_for_ceuta_&_melilla": "🇪🇦",
"flag_for_chad": "🇹🇩",
"flag_for_chile": "🇨🇱",
"flag_for_china": "🇨🇳",
"flag_for_christmas_island": "🇨🇽",
"flag_for_clipperton_island": "🇨🇵",
"flag_for_cocos__islands": "🇨🇨",
"flag_for_colombia": "🇨🇴",
"flag_for_comoros": "🇰🇲",
"flag_for_congo____brazzaville": "🇨🇬",
"flag_for_congo____kinshasa": "🇨🇩",
"flag_for_cook_islands": "🇨🇰",
"flag_for_costa_rica": "🇨🇷",
"flag_for_croatia": "🇭🇷",
"flag_for_cuba": "🇨🇺",
"flag_for_curaçao": "🇨🇼",
"flag_for_cyprus": "🇨🇾",
"flag_for_czech_republic": "🇨🇿",
"flag_for_côte_d’ivoire": "🇨🇮",
"flag_for_denmark": "🇩🇰",
"flag_for_diego_garcia": "🇩🇬",
"flag_for_djibouti": "🇩🇯",
"flag_for_dominica": "🇩🇲",
"flag_for_dominican_republic": "🇩🇴",
"flag_for_ecuador": "🇪🇨",
"flag_for_egypt": "🇪🇬",
"flag_for_el_salvador": "🇸🇻",
"flag_for_equatorial_guinea": "🇬🇶",
"flag_for_eritrea": "🇪🇷",
"flag_for_estonia": "🇪🇪",
"flag_for_ethiopia": "🇪🇹",
"flag_for_european_union": "🇪🇺",
"flag_for_falkland_islands": "🇫🇰",
"flag_for_faroe_islands": "🇫🇴",
"flag_for_fiji": "🇫🇯",
"flag_for_finland": "🇫🇮",
"flag_for_france": "🇫🇷",
"flag_for_french_guiana": "🇬🇫",
"flag_for_french_polynesia": "🇵🇫",
"flag_for_french_southern_territories": "🇹🇫",
"flag_for_gabon": "🇬🇦",
"flag_for_gambia": "🇬🇲",
"flag_for_georgia": "🇬🇪",
"flag_for_germany": "🇩🇪",
"flag_for_ghana": "🇬🇭",
"flag_for_gibraltar": "🇬🇮",
"flag_for_greece": "🇬🇷",
"flag_for_greenland": "🇬🇱",
"flag_for_grenada": "🇬🇩",
"flag_for_guadeloupe": "🇬🇵",
"flag_for_guam": "🇬🇺",
"flag_for_guatemala": "🇬🇹",
"flag_for_guernsey": "🇬🇬",
"flag_for_guinea": "🇬🇳",
"flag_for_guinea__bissau": "🇬🇼",
"flag_for_guyana": "🇬🇾",
"flag_for_haiti": "🇭🇹",
"flag_for_heard_&_mcdonald_islands": "🇭🇲",
"flag_for_honduras": "🇭🇳",
"flag_for_hong_kong": "🇭🇰",
"flag_for_hungary": "🇭🇺",
"flag_for_iceland": "🇮🇸",
"flag_for_india": "🇮🇳",
"flag_for_indonesia": "🇮🇩",
"flag_for_iran": "🇮🇷",
"flag_for_iraq": "🇮🇶",
"flag_for_ireland": "🇮🇪",
"flag_for_isle_of_man": "🇮🇲",
"flag_for_israel": "🇮🇱",
"flag_for_italy": "🇮🇹",
"flag_for_jamaica": "🇯🇲",
"flag_for_japan": "🇯🇵",
"flag_for_jersey": "🇯🇪",
"flag_for_jordan": "🇯🇴",
"flag_for_kazakhstan": "🇰🇿",
"flag_for_kenya": "🇰🇪",
"flag_for_kiribati": "🇰🇮",
"flag_for_kosovo": "🇽🇰",
"flag_for_kuwait": "🇰🇼",
"flag_for_kyrgyzstan": "🇰🇬",
"flag_for_laos": "🇱🇦",
"flag_for_latvia": "🇱🇻",
"flag_for_lebanon": "🇱🇧",
"flag_for_lesotho": "🇱🇸",
"flag_for_liberia": "🇱🇷",
"flag_for_libya": "🇱🇾",
"flag_for_liechtenstein": "🇱🇮",
"flag_for_lithuania": "🇱🇹",
"flag_for_luxembourg": "🇱🇺",
"flag_for_macau": "🇲🇴",
"flag_for_macedonia": "🇲🇰",
"flag_for_madagascar": "🇲🇬",
"flag_for_malawi": "🇲🇼",
"flag_for_malaysia": "🇲🇾",
"flag_for_maldives": "🇲🇻",
"flag_for_mali": "🇲🇱",
"flag_for_malta": "🇲🇹",
"flag_for_marshall_islands": "🇲🇭",
"flag_for_martinique": "🇲🇶",
"flag_for_mauritania": "🇲🇷",
"flag_for_mauritius": "🇲🇺",
"flag_for_mayotte": "🇾🇹",
"flag_for_mexico": "🇲🇽",
"flag_for_micronesia": "🇫🇲",
"flag_for_moldova": "🇲🇩",
"flag_for_monaco": "🇲🇨",
"flag_for_mongolia": "🇲🇳",
"flag_for_montenegro": "🇲🇪",
"flag_for_montserrat": "🇲🇸",
"flag_for_morocco": "🇲🇦",
"flag_for_mozambique": "🇲🇿",
"flag_for_myanmar": "🇲🇲",
"flag_for_namibia": "🇳🇦",
"flag_for_nauru": "🇳🇷",
"flag_for_nepal": "🇳🇵",
"flag_for_netherlands": "🇳🇱",
"flag_for_new_caledonia": "🇳🇨",
"flag_for_new_zealand": "🇳🇿",
"flag_for_nicaragua": "🇳🇮",
"flag_for_niger": "🇳🇪",
"flag_for_nigeria": "🇳🇬",
"flag_for_niue": "🇳🇺",
"flag_for_norfolk_island": "🇳🇫",
"flag_for_north_korea": "🇰🇵",
"flag_for_northern_mariana_islands": "🇲🇵",
"flag_for_norway": "🇳🇴",
"flag_for_oman": "🇴🇲",
"flag_for_pakistan": "🇵🇰",
"flag_for_palau": "🇵🇼",
"flag_for_palestinian_territories": "🇵🇸",
"flag_for_panama": "🇵🇦",
"flag_for_papua_new_guinea": "🇵🇬",
"flag_for_paraguay": "🇵🇾",
"flag_for_peru": "🇵🇪",
"flag_for_philippines": "🇵🇭",
"flag_for_pitcairn_islands": "🇵🇳",
"flag_for_poland": "🇵🇱",
"flag_for_portugal": "🇵🇹",
"flag_for_puerto_rico": "🇵🇷",
"flag_for_qatar": "🇶🇦",
"flag_for_romania": "🇷🇴",
"flag_for_russia": "🇷🇺",
"flag_for_rwanda": "🇷🇼",
"flag_for_réunion": "🇷🇪",
"flag_for_samoa": "🇼🇸",
"flag_for_san_marino": "🇸🇲",
"flag_for_saudi_arabia": "🇸🇦",
"flag_for_senegal": "🇸🇳",
"flag_for_serbia": "🇷🇸",
"flag_for_seychelles": "🇸🇨",
"flag_for_sierra_leone": "🇸🇱",
"flag_for_singapore": "🇸🇬",
"flag_for_sint_maarten": "🇸🇽",
"flag_for_slovakia": "🇸🇰",
"flag_for_slovenia": "🇸🇮",
"flag_for_solomon_islands": "🇸🇧",
"flag_for_somalia": "🇸🇴",
"flag_for_south_africa": "🇿🇦",
"flag_for_south_georgia_&_south_sandwich_islands": "🇬🇸",
"flag_for_south_korea": "🇰🇷",
"flag_for_south_sudan": "🇸🇸",
"flag_for_spain": "🇪🇸",
"flag_for_sri_lanka": "🇱🇰",
"flag_for_st._barthélemy": "🇧🇱",
"flag_for_st._helena": "🇸🇭",
"flag_for_st._kitts_&_nevis": "🇰🇳",
"flag_for_st._lucia": "🇱🇨",
"flag_for_st._martin": "🇲🇫",
"flag_for_st._pierre_&_miquelon": "🇵🇲",
"flag_for_st._vincent_&_grenadines": "🇻🇨",
"flag_for_sudan": "🇸🇩",
"flag_for_suriname": "🇸🇷",
"flag_for_svalbard_&_jan_mayen": "🇸🇯",
"flag_for_swaziland": "🇸🇿",
"flag_for_sweden": "🇸🇪",
"flag_for_switzerland": "🇨🇭",
"flag_for_syria": "🇸🇾",
"flag_for_são_tomé_&_príncipe": "🇸🇹",
"flag_for_taiwan": "🇹🇼",
"flag_for_tajikistan": "🇹🇯",
"flag_for_tanzania": "🇹🇿",
"flag_for_thailand": "🇹🇭",
"flag_for_timor__leste": "🇹🇱",
"flag_for_togo": "🇹🇬",
"flag_for_tokelau": "🇹🇰",
"flag_for_tonga": "🇹🇴",
"flag_for_trinidad_&_tobago": "🇹🇹",
"flag_for_tristan_da_cunha": "🇹🇦",
"flag_for_tunisia": "🇹🇳",
"flag_for_turkey": "🇹🇷",
"flag_for_turkmenistan": "🇹🇲",
"flag_for_turks_&_caicos_islands": "🇹🇨",
"flag_for_tuvalu": "🇹🇻",
"flag_for_u.s._outlying_islands": "🇺🇲",
"flag_for_u.s._virgin_islands": "🇻🇮",
"flag_for_uganda": "🇺🇬",
"flag_for_ukraine": "🇺🇦",
"flag_for_united_arab_emirates": "🇦🇪",
"flag_for_united_kingdom": "🇬🇧",
"flag_for_united_states": "🇺🇸",
"flag_for_uruguay": "🇺🇾",
"flag_for_uzbekistan": "🇺🇿",
"flag_for_vanuatu": "🇻🇺",
"flag_for_vatican_city": "🇻🇦",
"flag_for_venezuela": "🇻🇪",
"flag_for_vietnam": "🇻🇳",
"flag_for_wallis_&_futuna": "🇼🇫",
"flag_for_western_sahara": "🇪🇭",
"flag_for_yemen": "🇾🇪",
"flag_for_zambia": "🇿🇲",
"flag_for_zimbabwe": "🇿🇼",
"flag_for_åland_islands": "🇦🇽",
"golf": "⛳",
"fleur__de__lis": "⚜",
"muscle": "💪",
"flushed": "😳",
"frame_with_picture": "🖼",
"fries": "🍟",
"frog": "🐸",
"hatched_chick": "🐥",
"frowning": "😦",
"fuelpump": "⛽",
"full_moon_with_face": "🌝",
"gem": "💎",
"star2": "🌟",
"golfer": "🏌",
"mortar_board": "🎓",
"grimacing": "😬",
"smile_cat": "😸",
"grinning": "😀",
"grin": "😁",
"heartpulse": "💗",
"guardsman": "💂",
"haircut": "💇",
"hamster": "🐹",
"raising_hand": "🙋",
"headphones": "🎧",
"hear_no_evil": "🙉",
"cupid": "💘",
"gift_heart": "💝",
"heart": "❤",
"exclamation": "❗",
"heavy_exclamation_mark": "❗",
"heavy_heart_exclamation_mark_ornament": "❣",
"o": "⭕",
"helm_symbol": "⎈",
"helmet_with_white_cross": "⛑",
"high_heel": "👠",
"bullettrain_side": "🚄",
"bullettrain_front": "🚅",
"high_brightness": "🔆",
"zap": "⚡",
"hocho": "🔪",
"knife": "🔪",
"bee": "🐝",
"traffic_light": "🚥",
"racehorse": "🐎",
"coffee": "☕",
"hotsprings": "♨",
"hourglass": "⌛",
"hourglass_flowing_sand": "⏳",
"house_buildings": "🏘",
"100": "💯",
"hushed": "😯",
"ice_hockey_stick_and_puck": "🏒",
"imp": "👿",
"information_desk_person": "💁",
"information_source": "ℹ",
"capital_abcd": "🔠",
"abc": "🔤",
"abcd": "🔡",
"1234": "🔢",
"symbols": "🔣",
"izakaya_lantern": "🏮",
"lantern": "🏮",
"jack_o_lantern": "🎃",
"dolls": "🎎",
"japanese_goblin": "👺",
"japanese_ogre": "👹",
"beginner": "🔰",
"zero": "0️⃣",
"one": "1️⃣",
"ten": "🔟",
"two": "2️⃣",
"three": "3️⃣",
"four": "4️⃣",
"five": "5️⃣",
"six": "6️⃣",
"seven": "7️⃣",
"eight": "8️⃣",
"nine": "9️⃣",
"couplekiss": "💏",
"kissing_cat": "😽",
"kissing": "😗",
"kissing_closed_eyes": "😚",
"kissing_smiling_eyes": "😙",
"beetle": "🐞",
"large_blue_circle": "🔵",
"last_quarter_moon_with_face": "🌜",
"leaves": "🍃",
"mag": "🔍",
"left_right_arrow": "↔",
"leftwards_arrow_with_hook": "↩",
"arrow_left": "⬅",
"lock": "🔒",
"lock_with_ink_pen": "🔏",
"sob": "😭",
"low_brightness": "🔅",
"lower_left_ballpoint_pen": "🖊",
"lower_left_crayon": "🖍",
"lower_left_fountain_pen": "🖋",
"lower_left_paintbrush": "🖌",
"mahjong": "🀄",
"couple": "👫",
"man_in_business_suit_levitating": "🕴",
"man_with_gua_pi_mao": "👲",
"man_with_turban": "👳",
"mans_shoe": "👞",
"shoe": "👞",
"menorah_with_nine_branches": "🕎",
"mens": "🚹",
"minidisc": "💽",
"iphone": "📱",
"calling": "📲",
"money__mouth_face": "🤑",
"moneybag": "💰",
"rice_scene": "🎑",
"mountain_bicyclist": "🚵",
"mouse2": "🐁",
"lips": "👄",
"moyai": "🗿",
"notes": "🎶",
"nail_care": "💅",
"ab": "🆎",
"negative_squared_cross_mark": "❎",
"a": "🅰",
"b": "🅱",
"o2": "🅾",
"parking": "🅿",
"new_moon_with_face": "🌚",
"no_entry_sign": "🚫",
"underage": "🔞",
"non__potable_water": "🚱",
"arrow_upper_right": "↗",
"arrow_upper_left": "↖",
"office": "🏢",
"older_man": "👴",
"older_woman": "👵",
"om_symbol": "🕉",
"on": "🔛",
"book": "📖",
"unlock": "🔓",
"mailbox_with_no_mail": "📭",
"mailbox_with_mail": "📬",
"cd": "💿",
"tada": "🎉",
"feet": "🐾",
"walking": "🚶",
"pencil2": "✏",
"pensive": "😔",
"persevere": "😣",
"bow": "🙇",
"raised_hands": "🙌",
"person_with_ball": "⛹",
"person_with_blond_hair": "👱",
"pray": "🙏",
"person_with_pouting_face": "🙎",
"computer": "💻",
"pig2": "🐖",
"hankey": "💩",
"poop": "💩",
"shit": "💩",
"bamboo": "🎍",
"gun": "🔫",
"black_joker": "🃏",
"rotating_light": "🚨",
"cop": "👮",
"stew": "🍲",
"pouch": "👝",
"pouting_cat": "😾",
"rage": "😡",
"put_litter_in_its_place": "🚮",
"rabbit2": "🐇",
"racing_motorcycle": "🏍",
"radioactive_sign": "☢",
"fist": "✊",
"hand": "✋",
"raised_hand_with_fingers_splayed": "🖐",
"raised_hand_with_part_between_middle_and_ring_fingers": "🖖",
"blue_car": "🚙",
"apple": "🍎",
"relieved": "😌",
"reversed_hand_with_middle_finger_extended": "🖕",
"mag_right": "🔎",
"arrow_right_hook": "↪",
"sweet_potato": "🍠",
"robot": "🤖",
"rolled__up_newspaper": "🗞",
"rowboat": "🚣",
"runner": "🏃",
"running": "🏃",
"running_shirt_with_sash": "🎽",
"boat": "⛵",
"scales": "⚖",
"school_satchel": "🎒",
"scorpius": "♏",
"see_no_evil": "🙈",
"sheep": "🐑",
"stars": "🌠",
"cake": "🍰",
"six_pointed_star": "🔯",
"ski": "🎿",
"sleeping_accommodation": "🛌",
"sleeping": "😴",
"sleepy": "😪",
"sleuth_or_spy": "🕵",
"heart_eyes_cat": "😻",
"smiley_cat": "😺",
"innocent": "😇",
"heart_eyes": "😍",
"smiling_imp": "😈",
"smiley": "😃",
"sweat_smile": "😅",
"smile": "😄",
"laughing": "😆",
"satisfied": "😆",
"blush": "😊",
"smirk": "😏",
"smoking": "🚬",
"snow_capped_mountain": "🏔",
"soccer": "⚽",
"icecream": "🍦",
"soon": "🔜",
"arrow_lower_right": "↘",
"arrow_lower_left": "↙",
"speak_no_evil": "🙊",
"speaker": "🔈",
"mute": "🔇",
"sound": "🔉",
"loud_sound": "🔊",
"speaking_head_in_silhouette": "🗣",
"spiral_calendar_pad": "🗓",
"spiral_note_pad": "🗒",
"shell": "🐚",
"sweat_drops": "💦",
"u5272": "🈹",
"u5408": "🈴",
"u55b6": "🈺",
"u6307": "🈯",
"u6708": "🈷",
"u6709": "🈶",
"u6e80": "🈵",
"u7121": "🈚",
"u7533": "🈸",
"u7981": "🈲",
"u7a7a": "🈳",
"cl": "🆑",
"cool": "🆒",
"free": "🆓",
"id": "🆔",
"koko": "🈁",
"sa": "🈂",
"new": "🆕",
"ng": "🆖",
"ok": "🆗",
"sos": "🆘",
"up": "🆙",
"vs": "🆚",
"steam_locomotive": "🚂",
"ramen": "🍜",
"partly_sunny": "⛅",
"city_sunrise": "🌇",
"surfer": "🏄",
"swimmer": "🏊",
"shirt": "👕",
"tshirt": "👕",
"table_tennis_paddle_and_ball": "🏓",
"tea": "🍵",
"tv": "📺",
"three_button_mouse": "🖱",
"+1": "👍",
"thumbsup": "👍",
"__1": "👎",
"-1": "👎",
"thumbsdown": "👎",
"thunder_cloud_and_rain": "⛈",
"tiger2": "🐅",
"tophat": "🎩",
"top": "🔝",
"tm": "™",
"train2": "🚆",
"triangular_flag_on_post": "🚩",
"trident": "🔱",
"twisted_rightwards_arrows": "🔀",
"unamused": "😒",
"small_red_triangle": "🔺",
"arrow_up_small": "🔼",
"arrow_up_down": "↕",
"upside__down_face": "🙃",
"arrow_up": "⬆",
"v": "✌",
"vhs": "📼",
"wc": "🚾",
"ocean": "🌊",
"waving_black_flag": "🏴",
"wave": "👋",
"waving_white_flag": "🏳",
"moon": "🌔",
"scream_cat": "🙀",
"weary": "😩",
"weight_lifter": "🏋",
"whale2": "🐋",
"wheelchair": "♿",
"point_down": "👇",
"grey_exclamation": "❕",
"white_frowning_face": "☹",
"white_check_mark": "✅",
"point_left": "👈",
"white_medium_small_square": "◽",
"star": "⭐",
"grey_question": "❔",
"point_right": "👉",
"relaxed": "☺",
"white_sun_behind_cloud": "🌥",
"white_sun_behind_cloud_with_rain": "🌦",
"white_sun_with_small_cloud": "🌤",
"point_up_2": "👆",
"point_up": "☝",
"wind_blowing_face": "🌬",
"wink": "😉",
"wolf": "🐺",
"dancers": "👯",
"boot": "👢",
"womans_clothes": "👚",
"womans_hat": "👒",
"sandal": "👡",
"womens": "🚺",
"worried": "😟",
"gift": "🎁",
"zipper__mouth_face": "🤐",
"regional_indicator_a": "🇦",
"regional_indicator_b": "🇧",
"regional_indicator_c": "🇨",
"regional_indicator_d": "🇩",
"regional_indicator_e": "🇪",
"regional_indicator_f": "🇫",
"regional_indicator_g": "🇬",
"regional_indicator_h": "🇭",
"regional_indicator_i": "🇮",
"regional_indicator_j": "🇯",
"regional_indicator_k": "🇰",
"regional_indicator_l": "🇱",
"regional_indicator_m": "🇲",
"regional_indicator_n": "🇳",
"regional_indicator_o": "🇴",
"regional_indicator_p": "🇵",
"regional_indicator_q": "🇶",
"regional_indicator_r": "🇷",
"regional_indicator_s": "🇸",
"regional_indicator_t": "🇹",
"regional_indicator_u": "🇺",
"regional_indicator_v": "🇻",
"regional_indicator_w": "🇼",
"regional_indicator_x": "🇽",
"regional_indicator_y": "🇾",
"regional_indicator_z": "🇿",
}
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_emoji_codes.py
|
Python
|
mit
| 140,235 |
from typing import Callable, Match, Optional
import re
from ._emoji_codes import EMOJI
_ReStringMatch = Match[str] # regex match object
_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
_EmojiSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
def _emoji_replace(
text: str,
default_variant: Optional[str] = None,
_emoji_sub: _EmojiSubMethod = re.compile(r"(:(\S*?)(?:(?:\-)(emoji|text))?:)").sub,
) -> str:
"""Replace emoji code in text."""
get_emoji = EMOJI.__getitem__
variants = {"text": "\uFE0E", "emoji": "\uFE0F"}
get_variant = variants.get
default_variant_code = variants.get(default_variant, "") if default_variant else ""
def do_replace(match: Match[str]) -> str:
emoji_code, emoji_name, variant = match.groups()
try:
return get_emoji(emoji_name.lower()) + get_variant(
variant, default_variant_code
)
except KeyError:
return emoji_code
return _emoji_sub(do_replace, text)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_emoji_replace.py
|
Python
|
mit
| 1,064 |
CONSOLE_HTML_FORMAT = """\
<!DOCTYPE html>
<head>
<meta charset="UTF-8">
<style>
{stylesheet}
body {{
color: {foreground};
background-color: {background};
}}
</style>
</head>
<html>
<body>
<code>
<pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
</code>
</body>
</html>
"""
CONSOLE_SVG_FORMAT = """\
<svg class="rich-terminal" viewBox="0 0 {width} {height}" xmlns="http://www.w3.org/2000/svg">
<!-- Generated with Rich https://www.textualize.io -->
<style>
@font-face {{
font-family: "Fira Code";
src: local("FiraCode-Regular"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2") format("woff2"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff") format("woff");
font-style: normal;
font-weight: 400;
}}
@font-face {{
font-family: "Fira Code";
src: local("FiraCode-Bold"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2") format("woff2"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff") format("woff");
font-style: bold;
font-weight: 700;
}}
.{unique_id}-matrix {{
font-family: Fira Code, monospace;
font-size: {char_height}px;
line-height: {line_height}px;
font-variant-east-asian: full-width;
}}
.{unique_id}-title {{
font-size: 18px;
font-weight: bold;
font-family: arial;
}}
{styles}
</style>
<defs>
<clipPath id="{unique_id}-clip-terminal">
<rect x="0" y="0" width="{terminal_width}" height="{terminal_height}" />
</clipPath>
{lines}
</defs>
{chrome}
<g transform="translate({terminal_x}, {terminal_y})" clip-path="url(#{unique_id}-clip-terminal)">
{backgrounds}
<g class="{unique_id}-matrix">
{matrix}
</g>
</g>
</svg>
"""
_SVG_FONT_FAMILY = "Rich Fira Code"
_SVG_CLASSES_PREFIX = "rich-svg"
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_export_format.py
|
Python
|
mit
| 2,114 |
from typing import Any
def load_ipython_extension(ip: Any) -> None: # pragma: no cover
# prevent circular import
from pip._vendor.rich.pretty import install
from pip._vendor.rich.traceback import install as tr_install
install()
tr_install()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_extension.py
|
Python
|
mit
| 265 |
from __future__ import absolute_import
import inspect
from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature
from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union
from .console import Group, RenderableType
from .control import escape_control_codes
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin
from .panel import Panel
from .pretty import Pretty
from .table import Table
from .text import Text, TextType
def _first_paragraph(doc: str) -> str:
"""Get the first paragraph from a docstring."""
paragraph, _, _ = doc.partition("\n\n")
return paragraph
class Inspect(JupyterMixin):
"""A renderable to inspect any Python Object.
Args:
obj (Any): An object to inspect.
title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
methods (bool, optional): Enable inspection of callables. Defaults to False.
docs (bool, optional): Also render doc strings. Defaults to True.
private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
sort (bool, optional): Sort attributes alphabetically. Defaults to True.
all (bool, optional): Show all attributes. Defaults to False.
value (bool, optional): Pretty print value of object. Defaults to True.
"""
def __init__(
self,
obj: Any,
*,
title: Optional[TextType] = None,
help: bool = False,
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = True,
value: bool = True,
) -> None:
self.highlighter = ReprHighlighter()
self.obj = obj
self.title = title or self._make_title(obj)
if all:
methods = private = dunder = True
self.help = help
self.methods = methods
self.docs = docs or help
self.private = private or dunder
self.dunder = dunder
self.sort = sort
self.value = value
def _make_title(self, obj: Any) -> Text:
"""Make a default title."""
title_str = (
str(obj)
if (isclass(obj) or callable(obj) or ismodule(obj))
else str(type(obj))
)
title_text = self.highlighter(title_str)
return title_text
def __rich__(self) -> Panel:
return Panel.fit(
Group(*self._render()),
title=self.title,
border_style="scope.border",
padding=(0, 1),
)
def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
"""Get a signature for a callable."""
try:
_signature = str(signature(obj)) + ":"
except ValueError:
_signature = "(...)"
except TypeError:
return None
source_filename: Optional[str] = None
try:
source_filename = getfile(obj)
except (OSError, TypeError):
# OSError is raised if obj has no source file, e.g. when defined in REPL.
pass
callable_name = Text(name, style="inspect.callable")
if source_filename:
callable_name.stylize(f"link file://{source_filename}")
signature_text = self.highlighter(_signature)
qualname = name or getattr(obj, "__qualname__", name)
# If obj is a module, there may be classes (which are callable) to display
if inspect.isclass(obj):
prefix = "class"
elif inspect.iscoroutinefunction(obj):
prefix = "async def"
else:
prefix = "def"
qual_signature = Text.assemble(
(f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"),
(qualname, "inspect.callable"),
signature_text,
)
return qual_signature
def _render(self) -> Iterable[RenderableType]:
"""Render object."""
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
key, (_error, value) = item
return (callable(value), key.strip("_").lower())
def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
"""Get attribute or any exception."""
try:
return (None, getattr(obj, attr_name))
except Exception as error:
return (error, None)
obj = self.obj
keys = dir(obj)
total_items = len(keys)
if not self.dunder:
keys = [key for key in keys if not key.startswith("__")]
if not self.private:
keys = [key for key in keys if not key.startswith("_")]
not_shown_count = total_items - len(keys)
items = [(key, safe_getattr(key)) for key in keys]
if self.sort:
items.sort(key=sort_items)
items_table = Table.grid(padding=(0, 1), expand=False)
items_table.add_column(justify="right")
add_row = items_table.add_row
highlighter = self.highlighter
if callable(obj):
signature = self._get_signature("", obj)
if signature is not None:
yield signature
yield ""
if self.docs:
_doc = self._get_formatted_doc(obj)
if _doc is not None:
doc_text = Text(_doc, style="inspect.help")
doc_text = highlighter(doc_text)
yield doc_text
yield ""
if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)):
yield Panel(
Pretty(obj, indent_guides=True, max_length=10, max_string=60),
border_style="inspect.value.border",
)
yield ""
for key, (error, value) in items:
key_text = Text.assemble(
(
key,
"inspect.attr.dunder" if key.startswith("__") else "inspect.attr",
),
(" =", "inspect.equals"),
)
if error is not None:
warning = key_text.copy()
warning.stylize("inspect.error")
add_row(warning, highlighter(repr(error)))
continue
if callable(value):
if not self.methods:
continue
_signature_text = self._get_signature(key, value)
if _signature_text is None:
add_row(key_text, Pretty(value, highlighter=highlighter))
else:
if self.docs:
docs = self._get_formatted_doc(value)
if docs is not None:
_signature_text.append("\n" if "\n" in docs else " ")
doc = highlighter(docs)
doc.stylize("inspect.doc")
_signature_text.append(doc)
add_row(key_text, _signature_text)
else:
add_row(key_text, Pretty(value, highlighter=highlighter))
if items_table.row_count:
yield items_table
elif not_shown_count:
yield Text.from_markup(
f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] "
f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options."
)
def _get_formatted_doc(self, object_: Any) -> Optional[str]:
"""
Extract the docstring of an object, process it and returns it.
The processing consists in cleaning up the doctring's indentation,
taking only its 1st paragraph if `self.help` is not True,
and escape its control codes.
Args:
object_ (Any): the object to get the docstring from.
Returns:
Optional[str]: the processed docstring, or None if no docstring was found.
"""
docs = getdoc(object_)
if docs is None:
return None
docs = cleandoc(docs).strip()
if not self.help:
docs = _first_paragraph(docs)
return escape_control_codes(docs)
def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
"""Returns the MRO of an object's class, or of the object itself if it's a class."""
if not hasattr(obj, "__mro__"):
# N.B. we cannot use `if type(obj) is type` here because it doesn't work with
# some types of classes, such as the ones that use abc.ABCMeta.
obj = type(obj)
return getattr(obj, "__mro__", ())
def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
"""
Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class.
Examples:
`object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']`
"""
return [
f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
for type_ in get_object_types_mro(obj)
]
def is_object_one_of_types(
obj: object, fully_qualified_types_names: Collection[str]
) -> bool:
"""
Returns `True` if the given object's class (or the object itself, if it's a class) has one of the
fully qualified names in its MRO.
"""
for type_name in get_object_types_mro_as_strings(obj):
if type_name in fully_qualified_types_names:
return True
return False
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_inspect.py
|
Python
|
mit
| 9,695 |
from datetime import datetime
from typing import Iterable, List, Optional, TYPE_CHECKING, Union, Callable
from .text import Text, TextType
if TYPE_CHECKING:
from .console import Console, ConsoleRenderable, RenderableType
from .table import Table
FormatTimeCallable = Callable[[datetime], Text]
class LogRender:
def __init__(
self,
show_time: bool = True,
show_level: bool = False,
show_path: bool = True,
time_format: Union[str, FormatTimeCallable] = "[%x %X]",
omit_repeated_times: bool = True,
level_width: Optional[int] = 8,
) -> None:
self.show_time = show_time
self.show_level = show_level
self.show_path = show_path
self.time_format = time_format
self.omit_repeated_times = omit_repeated_times
self.level_width = level_width
self._last_time: Optional[Text] = None
def __call__(
self,
console: "Console",
renderables: Iterable["ConsoleRenderable"],
log_time: Optional[datetime] = None,
time_format: Optional[Union[str, FormatTimeCallable]] = None,
level: TextType = "",
path: Optional[str] = None,
line_no: Optional[int] = None,
link_path: Optional[str] = None,
) -> "Table":
from .containers import Renderables
from .table import Table
output = Table.grid(padding=(0, 1))
output.expand = True
if self.show_time:
output.add_column(style="log.time")
if self.show_level:
output.add_column(style="log.level", width=self.level_width)
output.add_column(ratio=1, style="log.message", overflow="fold")
if self.show_path and path:
output.add_column(style="log.path")
row: List["RenderableType"] = []
if self.show_time:
log_time = log_time or console.get_datetime()
time_format = time_format or self.time_format
if callable(time_format):
log_time_display = time_format(log_time)
else:
log_time_display = Text(log_time.strftime(time_format))
if log_time_display == self._last_time and self.omit_repeated_times:
row.append(Text(" " * len(log_time_display)))
else:
row.append(log_time_display)
self._last_time = log_time_display
if self.show_level:
row.append(level)
row.append(Renderables(renderables))
if self.show_path and path:
path_text = Text()
path_text.append(
path, style=f"link file://{link_path}" if link_path else ""
)
if line_no:
path_text.append(":")
path_text.append(
f"{line_no}",
style=f"link file://{link_path}#{line_no}" if link_path else "",
)
row.append(path_text)
output.add_row(*row)
return output
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console
c = Console()
c.print("[on blue]Hello", justify="right")
c.log("[on blue]hello", justify="right")
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_log_render.py
|
Python
|
mit
| 3,225 |
from typing import Iterable, Tuple, TypeVar
T = TypeVar("T")
def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for first value."""
iter_values = iter(values)
try:
value = next(iter_values)
except StopIteration:
return
yield True, value
for value in iter_values:
yield False, value
def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value
def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]:
"""Iterate and generate a tuple with a flag for first and last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
first = True
for value in iter_values:
yield first, False, previous_value
first = False
previous_value = value
yield first, True, previous_value
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_loop.py
|
Python
|
mit
| 1,236 |
from .palette import Palette
# Taken from https://en.wikipedia.org/wiki/ANSI_escape_code (Windows 10 column)
WINDOWS_PALETTE = Palette(
[
(12, 12, 12),
(197, 15, 31),
(19, 161, 14),
(193, 156, 0),
(0, 55, 218),
(136, 23, 152),
(58, 150, 221),
(204, 204, 204),
(118, 118, 118),
(231, 72, 86),
(22, 198, 12),
(249, 241, 165),
(59, 120, 255),
(180, 0, 158),
(97, 214, 214),
(242, 242, 242),
]
)
# # The standard ansi colors (including bright variants)
STANDARD_PALETTE = Palette(
[
(0, 0, 0),
(170, 0, 0),
(0, 170, 0),
(170, 85, 0),
(0, 0, 170),
(170, 0, 170),
(0, 170, 170),
(170, 170, 170),
(85, 85, 85),
(255, 85, 85),
(85, 255, 85),
(255, 255, 85),
(85, 85, 255),
(255, 85, 255),
(85, 255, 255),
(255, 255, 255),
]
)
# The 256 color palette
EIGHT_BIT_PALETTE = Palette(
[
(0, 0, 0),
(128, 0, 0),
(0, 128, 0),
(128, 128, 0),
(0, 0, 128),
(128, 0, 128),
(0, 128, 128),
(192, 192, 192),
(128, 128, 128),
(255, 0, 0),
(0, 255, 0),
(255, 255, 0),
(0, 0, 255),
(255, 0, 255),
(0, 255, 255),
(255, 255, 255),
(0, 0, 0),
(0, 0, 95),
(0, 0, 135),
(0, 0, 175),
(0, 0, 215),
(0, 0, 255),
(0, 95, 0),
(0, 95, 95),
(0, 95, 135),
(0, 95, 175),
(0, 95, 215),
(0, 95, 255),
(0, 135, 0),
(0, 135, 95),
(0, 135, 135),
(0, 135, 175),
(0, 135, 215),
(0, 135, 255),
(0, 175, 0),
(0, 175, 95),
(0, 175, 135),
(0, 175, 175),
(0, 175, 215),
(0, 175, 255),
(0, 215, 0),
(0, 215, 95),
(0, 215, 135),
(0, 215, 175),
(0, 215, 215),
(0, 215, 255),
(0, 255, 0),
(0, 255, 95),
(0, 255, 135),
(0, 255, 175),
(0, 255, 215),
(0, 255, 255),
(95, 0, 0),
(95, 0, 95),
(95, 0, 135),
(95, 0, 175),
(95, 0, 215),
(95, 0, 255),
(95, 95, 0),
(95, 95, 95),
(95, 95, 135),
(95, 95, 175),
(95, 95, 215),
(95, 95, 255),
(95, 135, 0),
(95, 135, 95),
(95, 135, 135),
(95, 135, 175),
(95, 135, 215),
(95, 135, 255),
(95, 175, 0),
(95, 175, 95),
(95, 175, 135),
(95, 175, 175),
(95, 175, 215),
(95, 175, 255),
(95, 215, 0),
(95, 215, 95),
(95, 215, 135),
(95, 215, 175),
(95, 215, 215),
(95, 215, 255),
(95, 255, 0),
(95, 255, 95),
(95, 255, 135),
(95, 255, 175),
(95, 255, 215),
(95, 255, 255),
(135, 0, 0),
(135, 0, 95),
(135, 0, 135),
(135, 0, 175),
(135, 0, 215),
(135, 0, 255),
(135, 95, 0),
(135, 95, 95),
(135, 95, 135),
(135, 95, 175),
(135, 95, 215),
(135, 95, 255),
(135, 135, 0),
(135, 135, 95),
(135, 135, 135),
(135, 135, 175),
(135, 135, 215),
(135, 135, 255),
(135, 175, 0),
(135, 175, 95),
(135, 175, 135),
(135, 175, 175),
(135, 175, 215),
(135, 175, 255),
(135, 215, 0),
(135, 215, 95),
(135, 215, 135),
(135, 215, 175),
(135, 215, 215),
(135, 215, 255),
(135, 255, 0),
(135, 255, 95),
(135, 255, 135),
(135, 255, 175),
(135, 255, 215),
(135, 255, 255),
(175, 0, 0),
(175, 0, 95),
(175, 0, 135),
(175, 0, 175),
(175, 0, 215),
(175, 0, 255),
(175, 95, 0),
(175, 95, 95),
(175, 95, 135),
(175, 95, 175),
(175, 95, 215),
(175, 95, 255),
(175, 135, 0),
(175, 135, 95),
(175, 135, 135),
(175, 135, 175),
(175, 135, 215),
(175, 135, 255),
(175, 175, 0),
(175, 175, 95),
(175, 175, 135),
(175, 175, 175),
(175, 175, 215),
(175, 175, 255),
(175, 215, 0),
(175, 215, 95),
(175, 215, 135),
(175, 215, 175),
(175, 215, 215),
(175, 215, 255),
(175, 255, 0),
(175, 255, 95),
(175, 255, 135),
(175, 255, 175),
(175, 255, 215),
(175, 255, 255),
(215, 0, 0),
(215, 0, 95),
(215, 0, 135),
(215, 0, 175),
(215, 0, 215),
(215, 0, 255),
(215, 95, 0),
(215, 95, 95),
(215, 95, 135),
(215, 95, 175),
(215, 95, 215),
(215, 95, 255),
(215, 135, 0),
(215, 135, 95),
(215, 135, 135),
(215, 135, 175),
(215, 135, 215),
(215, 135, 255),
(215, 175, 0),
(215, 175, 95),
(215, 175, 135),
(215, 175, 175),
(215, 175, 215),
(215, 175, 255),
(215, 215, 0),
(215, 215, 95),
(215, 215, 135),
(215, 215, 175),
(215, 215, 215),
(215, 215, 255),
(215, 255, 0),
(215, 255, 95),
(215, 255, 135),
(215, 255, 175),
(215, 255, 215),
(215, 255, 255),
(255, 0, 0),
(255, 0, 95),
(255, 0, 135),
(255, 0, 175),
(255, 0, 215),
(255, 0, 255),
(255, 95, 0),
(255, 95, 95),
(255, 95, 135),
(255, 95, 175),
(255, 95, 215),
(255, 95, 255),
(255, 135, 0),
(255, 135, 95),
(255, 135, 135),
(255, 135, 175),
(255, 135, 215),
(255, 135, 255),
(255, 175, 0),
(255, 175, 95),
(255, 175, 135),
(255, 175, 175),
(255, 175, 215),
(255, 175, 255),
(255, 215, 0),
(255, 215, 95),
(255, 215, 135),
(255, 215, 175),
(255, 215, 215),
(255, 215, 255),
(255, 255, 0),
(255, 255, 95),
(255, 255, 135),
(255, 255, 175),
(255, 255, 215),
(255, 255, 255),
(8, 8, 8),
(18, 18, 18),
(28, 28, 28),
(38, 38, 38),
(48, 48, 48),
(58, 58, 58),
(68, 68, 68),
(78, 78, 78),
(88, 88, 88),
(98, 98, 98),
(108, 108, 108),
(118, 118, 118),
(128, 128, 128),
(138, 138, 138),
(148, 148, 148),
(158, 158, 158),
(168, 168, 168),
(178, 178, 178),
(188, 188, 188),
(198, 198, 198),
(208, 208, 208),
(218, 218, 218),
(228, 228, 228),
(238, 238, 238),
]
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_palettes.py
|
Python
|
mit
| 7,063 |
from typing import Optional
def pick_bool(*values: Optional[bool]) -> bool:
"""Pick the first non-none bool or return the last value.
Args:
*values (bool): Any number of boolean or None values.
Returns:
bool: First non-none boolean.
"""
assert values, "1 or more values required"
for value in values:
if value is not None:
return value
return bool(value)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_pick.py
|
Python
|
mit
| 423 |
import sys
from fractions import Fraction
from math import ceil
from typing import cast, List, Optional, Sequence
if sys.version_info >= (3, 8):
from typing import Protocol
else:
from pip._vendor.typing_extensions import Protocol # pragma: no cover
class Edge(Protocol):
"""Any object that defines an edge (such as Layout)."""
size: Optional[int] = None
ratio: int = 1
minimum_size: int = 1
def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
"""Divide total space to satisfy size, ratio, and minimum_size, constraints.
The returned list of integers should add up to total in most cases, unless it is
impossible to satisfy all the constraints. For instance, if there are two edges
with a minimum size of 20 each and `total` is 30 then the returned list will be
greater than total. In practice, this would mean that a Layout object would
clip the rows that would overflow the screen height.
Args:
total (int): Total number of characters.
edges (List[Edge]): Edges within total space.
Returns:
List[int]: Number of characters for each edge.
"""
# Size of edge or None for yet to be determined
sizes = [(edge.size or None) for edge in edges]
_Fraction = Fraction
# While any edges haven't been calculated
while None in sizes:
# Get flexible edges and index to map these back on to sizes list
flexible_edges = [
(index, edge)
for index, (size, edge) in enumerate(zip(sizes, edges))
if size is None
]
# Remaining space in total
remaining = total - sum(size or 0 for size in sizes)
if remaining <= 0:
# No room for flexible edges
return [
((edge.minimum_size or 1) if size is None else size)
for size, edge in zip(sizes, edges)
]
# Calculate number of characters in a ratio portion
portion = _Fraction(
remaining, sum((edge.ratio or 1) for _, edge in flexible_edges)
)
# If any edges will be less than their minimum, replace size with the minimum
for index, edge in flexible_edges:
if portion * edge.ratio <= edge.minimum_size:
sizes[index] = edge.minimum_size
# New fixed size will invalidate calculations, so we need to repeat the process
break
else:
# Distribute flexible space and compensate for rounding error
# Since edge sizes can only be integers we need to add the remainder
# to the following line
remainder = _Fraction(0)
for index, edge in flexible_edges:
size, remainder = divmod(portion * edge.ratio + remainder, 1)
sizes[index] = size
break
# Sizes now contains integers only
return cast(List[int], sizes)
def ratio_reduce(
total: int, ratios: List[int], maximums: List[int], values: List[int]
) -> List[int]:
"""Divide an integer total in to parts based on ratios.
Args:
total (int): The total to divide.
ratios (List[int]): A list of integer ratios.
maximums (List[int]): List of maximums values for each slot.
values (List[int]): List of values
Returns:
List[int]: A list of integers guaranteed to sum to total.
"""
ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)]
total_ratio = sum(ratios)
if not total_ratio:
return values[:]
total_remaining = total
result: List[int] = []
append = result.append
for ratio, maximum, value in zip(ratios, maximums, values):
if ratio and total_ratio > 0:
distributed = min(maximum, round(ratio * total_remaining / total_ratio))
append(value - distributed)
total_remaining -= distributed
total_ratio -= ratio
else:
append(value)
return result
def ratio_distribute(
total: int, ratios: List[int], minimums: Optional[List[int]] = None
) -> List[int]:
"""Distribute an integer total in to parts based on ratios.
Args:
total (int): The total to divide.
ratios (List[int]): A list of integer ratios.
minimums (List[int]): List of minimum values for each slot.
Returns:
List[int]: A list of integers guaranteed to sum to total.
"""
if minimums:
ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)]
total_ratio = sum(ratios)
assert total_ratio > 0, "Sum of ratios must be > 0"
total_remaining = total
distributed_total: List[int] = []
append = distributed_total.append
if minimums is None:
_minimums = [0] * len(ratios)
else:
_minimums = minimums
for ratio, minimum in zip(ratios, _minimums):
if total_ratio > 0:
distributed = max(minimum, ceil(ratio * total_remaining / total_ratio))
else:
distributed = total_remaining
append(distributed)
total_ratio -= ratio
total_remaining -= distributed
return distributed_total
if __name__ == "__main__":
from dataclasses import dataclass
@dataclass
class E:
size: Optional[int] = None
ratio: int = 1
minimum_size: int = 1
resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)])
print(sum(resolved))
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_ratio.py
|
Python
|
mit
| 5,472 |
"""
Spinners are from:
* cli-spinners:
MIT License
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
SPINNERS = {
"dots": {
"interval": 80,
"frames": "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏",
},
"dots2": {"interval": 80, "frames": "⣾⣽⣻⢿⡿⣟⣯⣷"},
"dots3": {
"interval": 80,
"frames": "⠋⠙⠚⠞⠖⠦⠴⠲⠳⠓",
},
"dots4": {
"interval": 80,
"frames": "⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆",
},
"dots5": {
"interval": 80,
"frames": "⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋",
},
"dots6": {
"interval": 80,
"frames": "⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁",
},
"dots7": {
"interval": 80,
"frames": "⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈",
},
"dots8": {
"interval": 80,
"frames": "⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈",
},
"dots9": {"interval": 80, "frames": "⢹⢺⢼⣸⣇⡧⡗⡏"},
"dots10": {"interval": 80, "frames": "⢄⢂⢁⡁⡈⡐⡠"},
"dots11": {"interval": 100, "frames": "⠁⠂⠄⡀⢀⠠⠐⠈"},
"dots12": {
"interval": 80,
"frames": [
"⢀⠀",
"⡀⠀",
"⠄⠀",
"⢂⠀",
"⡂⠀",
"⠅⠀",
"⢃⠀",
"⡃⠀",
"⠍⠀",
"⢋⠀",
"⡋⠀",
"⠍⠁",
"⢋⠁",
"⡋⠁",
"⠍⠉",
"⠋⠉",
"⠋⠉",
"⠉⠙",
"⠉⠙",
"⠉⠩",
"⠈⢙",
"⠈⡙",
"⢈⠩",
"⡀⢙",
"⠄⡙",
"⢂⠩",
"⡂⢘",
"⠅⡘",
"⢃⠨",
"⡃⢐",
"⠍⡐",
"⢋⠠",
"⡋⢀",
"⠍⡁",
"⢋⠁",
"⡋⠁",
"⠍⠉",
"⠋⠉",
"⠋⠉",
"⠉⠙",
"⠉⠙",
"⠉⠩",
"⠈⢙",
"⠈⡙",
"⠈⠩",
"⠀⢙",
"⠀⡙",
"⠀⠩",
"⠀⢘",
"⠀⡘",
"⠀⠨",
"⠀⢐",
"⠀⡐",
"⠀⠠",
"⠀⢀",
"⠀⡀",
],
},
"dots8Bit": {
"interval": 80,
"frames": "⠀⠁⠂⠃⠄⠅⠆⠇⡀⡁⡂⡃⡄⡅⡆⡇⠈⠉⠊⠋⠌⠍⠎⠏⡈⡉⡊⡋⡌⡍⡎⡏⠐⠑⠒⠓⠔⠕⠖⠗⡐⡑⡒⡓⡔⡕⡖⡗⠘⠙⠚⠛⠜⠝⠞⠟⡘⡙"
"⡚⡛⡜⡝⡞⡟⠠⠡⠢⠣⠤⠥⠦⠧⡠⡡⡢⡣⡤⡥⡦⡧⠨⠩⠪⠫⠬⠭⠮⠯⡨⡩⡪⡫⡬⡭⡮⡯⠰⠱⠲⠳⠴⠵⠶⠷⡰⡱⡲⡳⡴⡵⡶⡷⠸⠹⠺⠻"
"⠼⠽⠾⠿⡸⡹⡺⡻⡼⡽⡾⡿⢀⢁⢂⢃⢄⢅⢆⢇⣀⣁⣂⣃⣄⣅⣆⣇⢈⢉⢊⢋⢌⢍⢎⢏⣈⣉⣊⣋⣌⣍⣎⣏⢐⢑⢒⢓⢔⢕⢖⢗⣐⣑⣒⣓⣔⣕"
"⣖⣗⢘⢙⢚⢛⢜⢝⢞⢟⣘⣙⣚⣛⣜⣝⣞⣟⢠⢡⢢⢣⢤⢥⢦⢧⣠⣡⣢⣣⣤⣥⣦⣧⢨⢩⢪⢫⢬⢭⢮⢯⣨⣩⣪⣫⣬⣭⣮⣯⢰⢱⢲⢳⢴⢵⢶⢷"
"⣰⣱⣲⣳⣴⣵⣶⣷⢸⢹⢺⢻⢼⢽⢾⢿⣸⣹⣺⣻⣼⣽⣾⣿",
},
"line": {"interval": 130, "frames": ["-", "\\", "|", "/"]},
"line2": {"interval": 100, "frames": "⠂-–—–-"},
"pipe": {"interval": 100, "frames": "┤┘┴└├┌┬┐"},
"simpleDots": {"interval": 400, "frames": [". ", ".. ", "...", " "]},
"simpleDotsScrolling": {
"interval": 200,
"frames": [". ", ".. ", "...", " ..", " .", " "],
},
"star": {"interval": 70, "frames": "✶✸✹✺✹✷"},
"star2": {"interval": 80, "frames": "+x*"},
"flip": {
"interval": 70,
"frames": "___-``'´-___",
},
"hamburger": {"interval": 100, "frames": "☱☲☴"},
"growVertical": {
"interval": 120,
"frames": "▁▃▄▅▆▇▆▅▄▃",
},
"growHorizontal": {
"interval": 120,
"frames": "▏▎▍▌▋▊▉▊▋▌▍▎",
},
"balloon": {"interval": 140, "frames": " .oO@* "},
"balloon2": {"interval": 120, "frames": ".oO°Oo."},
"noise": {"interval": 100, "frames": "▓▒░"},
"bounce": {"interval": 120, "frames": "⠁⠂⠄⠂"},
"boxBounce": {"interval": 120, "frames": "▖▘▝▗"},
"boxBounce2": {"interval": 100, "frames": "▌▀▐▄"},
"triangle": {"interval": 50, "frames": "◢◣◤◥"},
"arc": {"interval": 100, "frames": "◜◠◝◞◡◟"},
"circle": {"interval": 120, "frames": "◡⊙◠"},
"squareCorners": {"interval": 180, "frames": "◰◳◲◱"},
"circleQuarters": {"interval": 120, "frames": "◴◷◶◵"},
"circleHalves": {"interval": 50, "frames": "◐◓◑◒"},
"squish": {"interval": 100, "frames": "╫╪"},
"toggle": {"interval": 250, "frames": "⊶⊷"},
"toggle2": {"interval": 80, "frames": "▫▪"},
"toggle3": {"interval": 120, "frames": "□■"},
"toggle4": {"interval": 100, "frames": "■□▪▫"},
"toggle5": {"interval": 100, "frames": "▮▯"},
"toggle6": {"interval": 300, "frames": "ဝ၀"},
"toggle7": {"interval": 80, "frames": "⦾⦿"},
"toggle8": {"interval": 100, "frames": "◍◌"},
"toggle9": {"interval": 100, "frames": "◉◎"},
"toggle10": {"interval": 100, "frames": "㊂㊀㊁"},
"toggle11": {"interval": 50, "frames": "⧇⧆"},
"toggle12": {"interval": 120, "frames": "☗☖"},
"toggle13": {"interval": 80, "frames": "=*-"},
"arrow": {"interval": 100, "frames": "←↖↑↗→↘↓↙"},
"arrow2": {
"interval": 80,
"frames": ["⬆️ ", "↗️ ", "➡️ ", "↘️ ", "⬇️ ", "↙️ ", "⬅️ ", "↖️ "],
},
"arrow3": {
"interval": 120,
"frames": ["▹▹▹▹▹", "▸▹▹▹▹", "▹▸▹▹▹", "▹▹▸▹▹", "▹▹▹▸▹", "▹▹▹▹▸"],
},
"bouncingBar": {
"interval": 80,
"frames": [
"[ ]",
"[= ]",
"[== ]",
"[=== ]",
"[ ===]",
"[ ==]",
"[ =]",
"[ ]",
"[ =]",
"[ ==]",
"[ ===]",
"[====]",
"[=== ]",
"[== ]",
"[= ]",
],
},
"bouncingBall": {
"interval": 80,
"frames": [
"( ● )",
"( ● )",
"( ● )",
"( ● )",
"( ●)",
"( ● )",
"( ● )",
"( ● )",
"( ● )",
"(● )",
],
},
"smiley": {"interval": 200, "frames": ["😄 ", "😝 "]},
"monkey": {"interval": 300, "frames": ["🙈 ", "🙈 ", "🙉 ", "🙊 "]},
"hearts": {"interval": 100, "frames": ["💛 ", "💙 ", "💜 ", "💚 ", "❤️ "]},
"clock": {
"interval": 100,
"frames": [
"🕛 ",
"🕐 ",
"🕑 ",
"🕒 ",
"🕓 ",
"🕔 ",
"🕕 ",
"🕖 ",
"🕗 ",
"🕘 ",
"🕙 ",
"🕚 ",
],
},
"earth": {"interval": 180, "frames": ["🌍 ", "🌎 ", "🌏 "]},
"material": {
"interval": 17,
"frames": [
"█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"███▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"████▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"███████▁▁▁▁▁▁▁▁▁▁▁▁▁",
"████████▁▁▁▁▁▁▁▁▁▁▁▁",
"█████████▁▁▁▁▁▁▁▁▁▁▁",
"█████████▁▁▁▁▁▁▁▁▁▁▁",
"██████████▁▁▁▁▁▁▁▁▁▁",
"███████████▁▁▁▁▁▁▁▁▁",
"█████████████▁▁▁▁▁▁▁",
"██████████████▁▁▁▁▁▁",
"██████████████▁▁▁▁▁▁",
"▁██████████████▁▁▁▁▁",
"▁██████████████▁▁▁▁▁",
"▁██████████████▁▁▁▁▁",
"▁▁██████████████▁▁▁▁",
"▁▁▁██████████████▁▁▁",
"▁▁▁▁█████████████▁▁▁",
"▁▁▁▁██████████████▁▁",
"▁▁▁▁██████████████▁▁",
"▁▁▁▁▁██████████████▁",
"▁▁▁▁▁██████████████▁",
"▁▁▁▁▁██████████████▁",
"▁▁▁▁▁▁██████████████",
"▁▁▁▁▁▁██████████████",
"▁▁▁▁▁▁▁█████████████",
"▁▁▁▁▁▁▁█████████████",
"▁▁▁▁▁▁▁▁████████████",
"▁▁▁▁▁▁▁▁████████████",
"▁▁▁▁▁▁▁▁▁███████████",
"▁▁▁▁▁▁▁▁▁███████████",
"▁▁▁▁▁▁▁▁▁▁██████████",
"▁▁▁▁▁▁▁▁▁▁██████████",
"▁▁▁▁▁▁▁▁▁▁▁▁████████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁██████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
"█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
"██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
"██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
"███▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
"████▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
"█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
"█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
"██████▁▁▁▁▁▁▁▁▁▁▁▁▁█",
"████████▁▁▁▁▁▁▁▁▁▁▁▁",
"█████████▁▁▁▁▁▁▁▁▁▁▁",
"█████████▁▁▁▁▁▁▁▁▁▁▁",
"█████████▁▁▁▁▁▁▁▁▁▁▁",
"█████████▁▁▁▁▁▁▁▁▁▁▁",
"███████████▁▁▁▁▁▁▁▁▁",
"████████████▁▁▁▁▁▁▁▁",
"████████████▁▁▁▁▁▁▁▁",
"██████████████▁▁▁▁▁▁",
"██████████████▁▁▁▁▁▁",
"▁██████████████▁▁▁▁▁",
"▁██████████████▁▁▁▁▁",
"▁▁▁█████████████▁▁▁▁",
"▁▁▁▁▁████████████▁▁▁",
"▁▁▁▁▁████████████▁▁▁",
"▁▁▁▁▁▁███████████▁▁▁",
"▁▁▁▁▁▁▁▁█████████▁▁▁",
"▁▁▁▁▁▁▁▁█████████▁▁▁",
"▁▁▁▁▁▁▁▁▁█████████▁▁",
"▁▁▁▁▁▁▁▁▁█████████▁▁",
"▁▁▁▁▁▁▁▁▁▁█████████▁",
"▁▁▁▁▁▁▁▁▁▁▁████████▁",
"▁▁▁▁▁▁▁▁▁▁▁████████▁",
"▁▁▁▁▁▁▁▁▁▁▁▁███████▁",
"▁▁▁▁▁▁▁▁▁▁▁▁███████▁",
"▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
],
},
"moon": {
"interval": 80,
"frames": ["🌑 ", "🌒 ", "🌓 ", "🌔 ", "🌕 ", "🌖 ", "🌗 ", "🌘 "],
},
"runner": {"interval": 140, "frames": ["🚶 ", "🏃 "]},
"pong": {
"interval": 80,
"frames": [
"▐⠂ ▌",
"▐⠈ ▌",
"▐ ⠂ ▌",
"▐ ⠠ ▌",
"▐ ⡀ ▌",
"▐ ⠠ ▌",
"▐ ⠂ ▌",
"▐ ⠈ ▌",
"▐ ⠂ ▌",
"▐ ⠠ ▌",
"▐ ⡀ ▌",
"▐ ⠠ ▌",
"▐ ⠂ ▌",
"▐ ⠈ ▌",
"▐ ⠂▌",
"▐ ⠠▌",
"▐ ⡀▌",
"▐ ⠠ ▌",
"▐ ⠂ ▌",
"▐ ⠈ ▌",
"▐ ⠂ ▌",
"▐ ⠠ ▌",
"▐ ⡀ ▌",
"▐ ⠠ ▌",
"▐ ⠂ ▌",
"▐ ⠈ ▌",
"▐ ⠂ ▌",
"▐ ⠠ ▌",
"▐ ⡀ ▌",
"▐⠠ ▌",
],
},
"shark": {
"interval": 120,
"frames": [
"▐|\\____________▌",
"▐_|\\___________▌",
"▐__|\\__________▌",
"▐___|\\_________▌",
"▐____|\\________▌",
"▐_____|\\_______▌",
"▐______|\\______▌",
"▐_______|\\_____▌",
"▐________|\\____▌",
"▐_________|\\___▌",
"▐__________|\\__▌",
"▐___________|\\_▌",
"▐____________|\\▌",
"▐____________/|▌",
"▐___________/|_▌",
"▐__________/|__▌",
"▐_________/|___▌",
"▐________/|____▌",
"▐_______/|_____▌",
"▐______/|______▌",
"▐_____/|_______▌",
"▐____/|________▌",
"▐___/|_________▌",
"▐__/|__________▌",
"▐_/|___________▌",
"▐/|____________▌",
],
},
"dqpb": {"interval": 100, "frames": "dqpb"},
"weather": {
"interval": 100,
"frames": [
"☀️ ",
"☀️ ",
"☀️ ",
"🌤 ",
"⛅️ ",
"🌥 ",
"☁️ ",
"🌧 ",
"🌨 ",
"🌧 ",
"🌨 ",
"🌧 ",
"🌨 ",
"⛈ ",
"🌨 ",
"🌧 ",
"🌨 ",
"☁️ ",
"🌥 ",
"⛅️ ",
"🌤 ",
"☀️ ",
"☀️ ",
],
},
"christmas": {"interval": 400, "frames": "🌲🎄"},
"grenade": {
"interval": 80,
"frames": [
"، ",
"′ ",
" ´ ",
" ‾ ",
" ⸌",
" ⸊",
" |",
" ⁎",
" ⁕",
" ෴ ",
" ⁓",
" ",
" ",
" ",
],
},
"point": {"interval": 125, "frames": ["∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"]},
"layer": {"interval": 150, "frames": "-=≡"},
"betaWave": {
"interval": 80,
"frames": [
"ρββββββ",
"βρβββββ",
"ββρββββ",
"βββρβββ",
"ββββρββ",
"βββββρβ",
"ββββββρ",
],
},
"aesthetic": {
"interval": 80,
"frames": [
"▰▱▱▱▱▱▱",
"▰▰▱▱▱▱▱",
"▰▰▰▱▱▱▱",
"▰▰▰▰▱▱▱",
"▰▰▰▰▰▱▱",
"▰▰▰▰▰▰▱",
"▰▰▰▰▰▰▰",
"▰▱▱▱▱▱▱",
],
},
}
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_spinners.py
|
Python
|
mit
| 19,919 |
from typing import List, TypeVar
T = TypeVar("T")
class Stack(List[T]):
"""A small shim over builtin list."""
@property
def top(self) -> T:
"""Get top of stack."""
return self[-1]
def push(self, item: T) -> None:
"""Push an item on to the stack (append in stack nomenclature)."""
self.append(item)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_stack.py
|
Python
|
mit
| 351 |
"""
Timer context manager, only used in debug.
"""
from time import time
import contextlib
from typing import Generator
@contextlib.contextmanager
def timer(subject: str = "time") -> Generator[None, None, None]:
"""print the elapsed time. (only used in debugging)"""
start = time()
yield
elapsed = time() - start
elapsed_ms = elapsed * 1000
print(f"{subject} elapsed {elapsed_ms:.1f}ms")
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_timer.py
|
Python
|
mit
| 417 |
"""Light wrapper around the Win32 Console API - this module should only be imported on Windows
The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions
"""
import ctypes
import sys
from typing import Any
windll: Any = None
if sys.platform == "win32":
windll = ctypes.LibraryLoader(ctypes.WinDLL)
else:
raise ImportError(f"{__name__} can only be imported on Windows")
import time
from ctypes import Structure, byref, wintypes
from typing import IO, NamedTuple, Type, cast
from pip._vendor.rich.color import ColorSystem
from pip._vendor.rich.style import Style
STDOUT = -11
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
COORD = wintypes._COORD
class LegacyWindowsError(Exception):
pass
class WindowsCoordinates(NamedTuple):
"""Coordinates in the Windows Console API are (y, x), not (x, y).
This class is intended to prevent that confusion.
Rows and columns are indexed from 0.
This class can be used in place of wintypes._COORD in arguments and argtypes.
"""
row: int
col: int
@classmethod
def from_param(cls, value: "WindowsCoordinates") -> COORD:
"""Converts a WindowsCoordinates into a wintypes _COORD structure.
This classmethod is internally called by ctypes to perform the conversion.
Args:
value (WindowsCoordinates): The input coordinates to convert.
Returns:
wintypes._COORD: The converted coordinates struct.
"""
return COORD(value.col, value.row)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
class CONSOLE_CURSOR_INFO(ctypes.Structure):
_fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)]
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE:
"""Retrieves a handle to the specified standard device (standard input, standard output, or standard error).
Args:
handle (int): Integer identifier for the handle. Defaults to -11 (stdout).
Returns:
wintypes.HANDLE: The handle
"""
return cast(wintypes.HANDLE, _GetStdHandle(handle))
_GetConsoleMode = windll.kernel32.GetConsoleMode
_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD]
_GetConsoleMode.restype = wintypes.BOOL
def GetConsoleMode(std_handle: wintypes.HANDLE) -> int:
"""Retrieves the current input mode of a console's input buffer
or the current output mode of a console screen buffer.
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
Raises:
LegacyWindowsError: If any error occurs while calling the Windows console API.
Returns:
int: Value representing the current console mode as documented at
https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters
"""
console_mode = wintypes.DWORD()
success = bool(_GetConsoleMode(std_handle, console_mode))
if not success:
raise LegacyWindowsError("Unable to get legacy Windows Console Mode")
return console_mode.value
_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW
_FillConsoleOutputCharacterW.argtypes = [
wintypes.HANDLE,
ctypes.c_char,
wintypes.DWORD,
cast(Type[COORD], WindowsCoordinates),
ctypes.POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterW.restype = wintypes.BOOL
def FillConsoleOutputCharacter(
std_handle: wintypes.HANDLE,
char: str,
length: int,
start: WindowsCoordinates,
) -> int:
"""Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates.
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
char (str): The character to write. Must be a string of length 1.
length (int): The number of times to write the character.
start (WindowsCoordinates): The coordinates to start writing at.
Returns:
int: The number of characters written.
"""
character = ctypes.c_char(char.encode())
num_characters = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
_FillConsoleOutputCharacterW(
std_handle,
character,
num_characters,
start,
byref(num_written),
)
return num_written.value
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
cast(Type[COORD], WindowsCoordinates),
ctypes.POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
def FillConsoleOutputAttribute(
std_handle: wintypes.HANDLE,
attributes: int,
length: int,
start: WindowsCoordinates,
) -> int:
"""Sets the character attributes for a specified number of character cells,
beginning at the specified coordinates in a screen buffer.
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
attributes (int): Integer value representing the foreground and background colours of the cells.
length (int): The number of cells to set the output attribute of.
start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set.
Returns:
int: The number of cells whose attributes were actually set.
"""
num_cells = wintypes.DWORD(length)
style_attrs = wintypes.WORD(attributes)
num_written = wintypes.DWORD(0)
_FillConsoleOutputAttribute(
std_handle, style_attrs, num_cells, start, byref(num_written)
)
return num_written.value
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
def SetConsoleTextAttribute(
std_handle: wintypes.HANDLE, attributes: wintypes.WORD
) -> bool:
"""Set the colour attributes for all text written after this function is called.
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
attributes (int): Integer value representing the foreground and background colours.
Returns:
bool: True if the attribute was set successfully, otherwise False.
"""
return bool(_SetConsoleTextAttribute(std_handle, attributes))
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
def GetConsoleScreenBufferInfo(
std_handle: wintypes.HANDLE,
) -> CONSOLE_SCREEN_BUFFER_INFO:
"""Retrieves information about the specified console screen buffer.
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
Returns:
CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about
screen size, cursor position, colour attributes, and more."""
console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info))
return console_screen_buffer_info
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
cast(Type[COORD], WindowsCoordinates),
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
def SetConsoleCursorPosition(
std_handle: wintypes.HANDLE, coords: WindowsCoordinates
) -> bool:
"""Set the position of the cursor in the console screen
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
coords (WindowsCoordinates): The coordinates to move the cursor to.
Returns:
bool: True if the function succeeds, otherwise False.
"""
return bool(_SetConsoleCursorPosition(std_handle, coords))
_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo
_GetConsoleCursorInfo.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CONSOLE_CURSOR_INFO),
]
_GetConsoleCursorInfo.restype = wintypes.BOOL
def GetConsoleCursorInfo(
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
) -> bool:
"""Get the cursor info - used to get cursor visibility and width
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information
about the console's cursor.
Returns:
bool: True if the function succeeds, otherwise False.
"""
return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info)))
_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo
_SetConsoleCursorInfo.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CONSOLE_CURSOR_INFO),
]
_SetConsoleCursorInfo.restype = wintypes.BOOL
def SetConsoleCursorInfo(
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
) -> bool:
"""Set the cursor info - used for adjusting cursor visibility and width
Args:
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info.
Returns:
bool: True if the function succeeds, otherwise False.
"""
return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info)))
_SetConsoleTitle = windll.kernel32.SetConsoleTitleW
_SetConsoleTitle.argtypes = [wintypes.LPCWSTR]
_SetConsoleTitle.restype = wintypes.BOOL
def SetConsoleTitle(title: str) -> bool:
"""Sets the title of the current console window
Args:
title (str): The new title of the console window.
Returns:
bool: True if the function succeeds, otherwise False.
"""
return bool(_SetConsoleTitle(title))
class LegacyWindowsTerm:
"""This class allows interaction with the legacy Windows Console API. It should only be used in the context
of environments where virtual terminal processing is not available. However, if it is used in a Windows environment,
the entire API should work.
Args:
file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout.
"""
BRIGHT_BIT = 8
# Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers
ANSI_TO_WINDOWS = [
0, # black The Windows colours are defined in wincon.h as follows:
4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001
2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010
6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100
1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000
5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000
3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000
7, # white define BACKGROUND_RED 0x0040 -- 0100 0000
8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000
12, # bright red
10, # bright green
14, # bright yellow
9, # bright blue
13, # bright magenta
11, # bright cyan
15, # bright white
]
def __init__(self, file: "IO[str]") -> None:
handle = GetStdHandle(STDOUT)
self._handle = handle
default_text = GetConsoleScreenBufferInfo(handle).wAttributes
self._default_text = default_text
self._default_fore = default_text & 7
self._default_back = (default_text >> 4) & 7
self._default_attrs = self._default_fore | (self._default_back << 4)
self._file = file
self.write = file.write
self.flush = file.flush
@property
def cursor_position(self) -> WindowsCoordinates:
"""Returns the current position of the cursor (0-based)
Returns:
WindowsCoordinates: The current cursor position.
"""
coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
return WindowsCoordinates(row=cast(int, coord.Y), col=cast(int, coord.X))
@property
def screen_size(self) -> WindowsCoordinates:
"""Returns the current size of the console screen buffer, in character columns and rows
Returns:
WindowsCoordinates: The width and height of the screen as WindowsCoordinates.
"""
screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
return WindowsCoordinates(
row=cast(int, screen_size.Y), col=cast(int, screen_size.X)
)
def write_text(self, text: str) -> None:
"""Write text directly to the terminal without any modification of styles
Args:
text (str): The text to write to the console
"""
self.write(text)
self.flush()
def write_styled(self, text: str, style: Style) -> None:
"""Write styled text to the terminal.
Args:
text (str): The text to write
style (Style): The style of the text
"""
color = style.color
bgcolor = style.bgcolor
if style.reverse:
color, bgcolor = bgcolor, color
if color:
fore = color.downgrade(ColorSystem.WINDOWS).number
fore = fore if fore is not None else 7 # Default to ANSI 7: White
if style.bold:
fore = fore | self.BRIGHT_BIT
if style.dim:
fore = fore & ~self.BRIGHT_BIT
fore = self.ANSI_TO_WINDOWS[fore]
else:
fore = self._default_fore
if bgcolor:
back = bgcolor.downgrade(ColorSystem.WINDOWS).number
back = back if back is not None else 0 # Default to ANSI 0: Black
back = self.ANSI_TO_WINDOWS[back]
else:
back = self._default_back
assert fore is not None
assert back is not None
SetConsoleTextAttribute(
self._handle, attributes=ctypes.c_ushort(fore | (back << 4))
)
self.write_text(text)
SetConsoleTextAttribute(self._handle, attributes=self._default_text)
def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
"""Set the position of the cursor
Args:
new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor.
"""
if new_position.col < 0 or new_position.row < 0:
return
SetConsoleCursorPosition(self._handle, coords=new_position)
def erase_line(self) -> None:
"""Erase all content on the line the cursor is currently located at"""
screen_size = self.screen_size
cursor_position = self.cursor_position
cells_to_erase = screen_size.col
start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0)
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=start_coordinates
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=start_coordinates,
)
def erase_end_of_line(self) -> None:
"""Erase all content from the cursor position to the end of that line"""
cursor_position = self.cursor_position
cells_to_erase = self.screen_size.col - cursor_position.col
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=cursor_position
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=cursor_position,
)
def erase_start_of_line(self) -> None:
"""Erase all content from the cursor position to the start of that line"""
row, col = self.cursor_position
start = WindowsCoordinates(row, 0)
FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
FillConsoleOutputAttribute(
self._handle, self._default_attrs, length=col, start=start
)
def move_cursor_up(self) -> None:
"""Move the cursor up a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row - 1, col=cursor_position.col
),
)
def move_cursor_down(self) -> None:
"""Move the cursor down a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row + 1,
col=cursor_position.col,
),
)
def move_cursor_forward(self) -> None:
"""Move the cursor forward a single cell. Wrap to the next line if required."""
row, col = self.cursor_position
if col == self.screen_size.col - 1:
row += 1
col = 0
else:
col += 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def move_cursor_to_column(self, column: int) -> None:
"""Move cursor to the column specified by the zero-based column index, staying on the same row
Args:
column (int): The zero-based column index to move the cursor to.
"""
row, _ = self.cursor_position
SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
def move_cursor_backward(self) -> None:
"""Move the cursor backward a single cell. Wrap to the previous line if required."""
row, col = self.cursor_position
if col == 0:
row -= 1
col = self.screen_size.col - 1
else:
col -= 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def hide_cursor(self) -> None:
"""Hide the cursor"""
current_cursor_size = self._get_cursor_size()
invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
def show_cursor(self) -> None:
"""Show the cursor"""
current_cursor_size = self._get_cursor_size()
visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
def set_title(self, title: str) -> None:
"""Set the title of the terminal window
Args:
title (str): The new title of the console window
"""
assert len(title) < 255, "Console title must be less than 255 characters"
SetConsoleTitle(title)
def _get_cursor_size(self) -> int:
"""Get the percentage of the character cell that is filled by the cursor"""
cursor_info = CONSOLE_CURSOR_INFO()
GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
return int(cursor_info.dwSize)
if __name__ == "__main__":
handle = GetStdHandle()
from pip._vendor.rich.console import Console
console = Console()
term = LegacyWindowsTerm(sys.stdout)
term.set_title("Win32 Console Examples")
style = Style(color="black", bgcolor="red")
heading = Style.parse("black on green")
# Check colour output
console.rule("Checking colour output")
console.print("[on red]on red!")
console.print("[blue]blue!")
console.print("[yellow]yellow!")
console.print("[bold yellow]bold yellow!")
console.print("[bright_yellow]bright_yellow!")
console.print("[dim bright_yellow]dim bright_yellow!")
console.print("[italic cyan]italic cyan!")
console.print("[bold white on blue]bold white on blue!")
console.print("[reverse bold white on blue]reverse bold white on blue!")
console.print("[bold black on cyan]bold black on cyan!")
console.print("[black on green]black on green!")
console.print("[blue on green]blue on green!")
console.print("[white on black]white on black!")
console.print("[black on white]black on white!")
console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!")
# Check cursor movement
console.rule("Checking cursor movement")
console.print()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("went back and wrapped to prev line")
time.sleep(1)
term.move_cursor_up()
term.write_text("we go up")
time.sleep(1)
term.move_cursor_down()
term.write_text("and down")
time.sleep(1)
term.move_cursor_up()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went up and back 2")
time.sleep(1)
term.move_cursor_down()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went down and back 2")
time.sleep(1)
# Check erasing of lines
term.hide_cursor()
console.print()
console.rule("Checking line erasing")
console.print("\n...Deleting to the start of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled("<", Style.parse("black on red"))
term.move_cursor_backward()
time.sleep(1)
term.erase_start_of_line()
time.sleep(1)
console.print("\n\n...And to the end of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled(">", Style.parse("black on red"))
time.sleep(1)
term.erase_end_of_line()
time.sleep(1)
console.print("\n\n...Now the whole line will be erased...")
term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan"))
time.sleep(1)
term.erase_line()
term.show_cursor()
print("\n")
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_win32_console.py
|
Python
|
mit
| 22,820 |
import sys
from dataclasses import dataclass
@dataclass
class WindowsConsoleFeatures:
"""Windows features available."""
vt: bool = False
"""The console supports VT codes."""
truecolor: bool = False
"""The console supports truecolor."""
try:
import ctypes
from ctypes import LibraryLoader
if sys.platform == "win32":
windll = LibraryLoader(ctypes.WinDLL)
else:
windll = None
raise ImportError("Not windows")
from pip._vendor.rich._win32_console import (
ENABLE_VIRTUAL_TERMINAL_PROCESSING,
GetConsoleMode,
GetStdHandle,
LegacyWindowsError,
)
except (AttributeError, ImportError, ValueError):
# Fallback if we can't load the Windows DLL
def get_windows_console_features() -> WindowsConsoleFeatures:
features = WindowsConsoleFeatures()
return features
else:
def get_windows_console_features() -> WindowsConsoleFeatures:
"""Get windows console features.
Returns:
WindowsConsoleFeatures: An instance of WindowsConsoleFeatures.
"""
handle = GetStdHandle()
try:
console_mode = GetConsoleMode(handle)
success = True
except LegacyWindowsError:
console_mode = 0
success = False
vt = bool(success and console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
truecolor = False
if vt:
win_version = sys.getwindowsversion()
truecolor = win_version.major > 10 or (
win_version.major == 10 and win_version.build >= 15063
)
features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor)
return features
if __name__ == "__main__":
import platform
features = get_windows_console_features()
from pip._vendor.rich import print
print(f'platform="{platform.system()}"')
print(repr(features))
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_windows.py
|
Python
|
mit
| 1,926 |
from typing import Iterable, Sequence, Tuple, cast
from pip._vendor.rich._win32_console import LegacyWindowsTerm, WindowsCoordinates
from pip._vendor.rich.segment import ControlCode, ControlType, Segment
def legacy_windows_render(buffer: Iterable[Segment], term: LegacyWindowsTerm) -> None:
"""Makes appropriate Windows Console API calls based on the segments in the buffer.
Args:
buffer (Iterable[Segment]): Iterable of Segments to convert to Win32 API calls.
term (LegacyWindowsTerm): Used to call the Windows Console API.
"""
for text, style, control in buffer:
if not control:
if style:
term.write_styled(text, style)
else:
term.write_text(text)
else:
control_codes: Sequence[ControlCode] = control
for control_code in control_codes:
control_type = control_code[0]
if control_type == ControlType.CURSOR_MOVE_TO:
_, x, y = cast(Tuple[ControlType, int, int], control_code)
term.move_cursor_to(WindowsCoordinates(row=y - 1, col=x - 1))
elif control_type == ControlType.CARRIAGE_RETURN:
term.write_text("\r")
elif control_type == ControlType.HOME:
term.move_cursor_to(WindowsCoordinates(0, 0))
elif control_type == ControlType.CURSOR_UP:
term.move_cursor_up()
elif control_type == ControlType.CURSOR_DOWN:
term.move_cursor_down()
elif control_type == ControlType.CURSOR_FORWARD:
term.move_cursor_forward()
elif control_type == ControlType.CURSOR_BACKWARD:
term.move_cursor_backward()
elif control_type == ControlType.CURSOR_MOVE_TO_COLUMN:
_, column = cast(Tuple[ControlType, int], control_code)
term.move_cursor_to_column(column - 1)
elif control_type == ControlType.HIDE_CURSOR:
term.hide_cursor()
elif control_type == ControlType.SHOW_CURSOR:
term.show_cursor()
elif control_type == ControlType.ERASE_IN_LINE:
_, mode = cast(Tuple[ControlType, int], control_code)
if mode == 0:
term.erase_end_of_line()
elif mode == 1:
term.erase_start_of_line()
elif mode == 2:
term.erase_line()
elif control_type == ControlType.SET_WINDOW_TITLE:
_, title = cast(Tuple[ControlType, str], control_code)
term.set_title(title)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_windows_renderer.py
|
Python
|
mit
| 2,783 |
import re
from typing import Iterable, List, Tuple
from ._loop import loop_last
from .cells import cell_len, chop_cells
re_word = re.compile(r"\s*\S+\s*")
def words(text: str) -> Iterable[Tuple[int, int, str]]:
position = 0
word_match = re_word.match(text, position)
while word_match is not None:
start, end = word_match.span()
word = word_match.group(0)
yield start, end, word
word_match = re_word.match(text, end)
def divide_line(text: str, width: int, fold: bool = True) -> List[int]:
divides: List[int] = []
append = divides.append
line_position = 0
_cell_len = cell_len
for start, _end, word in words(text):
word_length = _cell_len(word.rstrip())
if line_position + word_length > width:
if word_length > width:
if fold:
chopped_words = chop_cells(word, max_size=width, position=0)
for last, line in loop_last(chopped_words):
if start:
append(start)
if last:
line_position = _cell_len(line)
else:
start += len(line)
else:
if start:
append(start)
line_position = _cell_len(word)
elif line_position and start:
append(start)
line_position = _cell_len(word)
else:
line_position += _cell_len(word)
return divides
if __name__ == "__main__": # pragma: no cover
from .console import Console
console = Console(width=10)
console.print("12345 abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ 12345")
print(chop_cells("abcdefghijklmnopqrstuvwxyz", 10, position=2))
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/_wrap.py
|
Python
|
mit
| 1,840 |
from abc import ABC
class RichRenderable(ABC):
"""An abstract base class for Rich renderables.
Note that there is no need to extend this class, the intended use is to check if an
object supports the Rich renderable protocol. For example::
if isinstance(my_object, RichRenderable):
console.print(my_object)
"""
@classmethod
def __subclasshook__(cls, other: type) -> bool:
"""Check if this class supports the rich render protocol."""
return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.text import Text
t = Text()
print(isinstance(Text, RichRenderable))
print(isinstance(t, RichRenderable))
class Foo:
pass
f = Foo()
print(isinstance(f, RichRenderable))
print(isinstance("", RichRenderable))
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/abc.py
|
Python
|
mit
| 890 |
import sys
from itertools import chain
from typing import TYPE_CHECKING, Iterable, Optional
if sys.version_info >= (3, 8):
from typing import Literal
else:
from pip._vendor.typing_extensions import Literal # pragma: no cover
from .constrain import Constrain
from .jupyter import JupyterMixin
from .measure import Measurement
from .segment import Segment
from .style import StyleType
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderableType, RenderResult
AlignMethod = Literal["left", "center", "right"]
VerticalAlignMethod = Literal["top", "middle", "bottom"]
class Align(JupyterMixin):
"""Align a renderable by adding spaces if necessary.
Args:
renderable (RenderableType): A console renderable.
align (AlignMethod): One of "left", "center", or "right""
style (StyleType, optional): An optional style to apply to the background.
vertical (Optional[VerticalAlginMethod], optional): Optional vertical align, one of "top", "middle", or "bottom". Defaults to None.
pad (bool, optional): Pad the right with spaces. Defaults to True.
width (int, optional): Restrict contents to given width, or None to use default width. Defaults to None.
height (int, optional): Set height of align renderable, or None to fit to contents. Defaults to None.
Raises:
ValueError: if ``align`` is not one of the expected values.
"""
def __init__(
self,
renderable: "RenderableType",
align: AlignMethod = "left",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> None:
if align not in ("left", "center", "right"):
raise ValueError(
f'invalid value for align, expected "left", "center", or "right" (not {align!r})'
)
if vertical is not None and vertical not in ("top", "middle", "bottom"):
raise ValueError(
f'invalid value for vertical, expected "top", "middle", or "bottom" (not {vertical!r})'
)
self.renderable = renderable
self.align = align
self.style = style
self.vertical = vertical
self.pad = pad
self.width = width
self.height = height
def __repr__(self) -> str:
return f"Align({self.renderable!r}, {self.align!r})"
@classmethod
def left(
cls,
renderable: "RenderableType",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
"""Align a renderable to the left."""
return cls(
renderable,
"left",
style=style,
vertical=vertical,
pad=pad,
width=width,
height=height,
)
@classmethod
def center(
cls,
renderable: "RenderableType",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
"""Align a renderable to the center."""
return cls(
renderable,
"center",
style=style,
vertical=vertical,
pad=pad,
width=width,
height=height,
)
@classmethod
def right(
cls,
renderable: "RenderableType",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
"""Align a renderable to the right."""
return cls(
renderable,
"right",
style=style,
vertical=vertical,
pad=pad,
width=width,
height=height,
)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
align = self.align
width = console.measure(self.renderable, options=options).maximum
rendered = console.render(
Constrain(
self.renderable, width if self.width is None else min(width, self.width)
),
options.update(height=None),
)
lines = list(Segment.split_lines(rendered))
width, height = Segment.get_shape(lines)
lines = Segment.set_shape(lines, width, height)
new_line = Segment.line()
excess_space = options.max_width - width
style = console.get_style(self.style) if self.style is not None else None
def generate_segments() -> Iterable[Segment]:
if excess_space <= 0:
# Exact fit
for line in lines:
yield from line
yield new_line
elif align == "left":
# Pad on the right
pad = Segment(" " * excess_space, style) if self.pad else None
for line in lines:
yield from line
if pad:
yield pad
yield new_line
elif align == "center":
# Pad left and right
left = excess_space // 2
pad = Segment(" " * left, style)
pad_right = (
Segment(" " * (excess_space - left), style) if self.pad else None
)
for line in lines:
if left:
yield pad
yield from line
if pad_right:
yield pad_right
yield new_line
elif align == "right":
# Padding on left
pad = Segment(" " * excess_space, style)
for line in lines:
yield pad
yield from line
yield new_line
blank_line = (
Segment(f"{' ' * (self.width or options.max_width)}\n", style)
if self.pad
else Segment("\n")
)
def blank_lines(count: int) -> Iterable[Segment]:
if count > 0:
for _ in range(count):
yield blank_line
vertical_height = self.height or options.height
iter_segments: Iterable[Segment]
if self.vertical and vertical_height is not None:
if self.vertical == "top":
bottom_space = vertical_height - height
iter_segments = chain(generate_segments(), blank_lines(bottom_space))
elif self.vertical == "middle":
top_space = (vertical_height - height) // 2
bottom_space = vertical_height - top_space - height
iter_segments = chain(
blank_lines(top_space),
generate_segments(),
blank_lines(bottom_space),
)
else: # self.vertical == "bottom":
top_space = vertical_height - height
iter_segments = chain(blank_lines(top_space), generate_segments())
else:
iter_segments = generate_segments()
if self.style:
style = console.get_style(self.style)
iter_segments = Segment.apply_style(iter_segments, style)
yield from iter_segments
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
measurement = Measurement.get(console, options, self.renderable)
return measurement
class VerticalCenter(JupyterMixin):
"""Vertically aligns a renderable.
Warn:
This class is deprecated and may be removed in a future version. Use Align class with
`vertical="middle"`.
Args:
renderable (RenderableType): A renderable object.
"""
def __init__(
self,
renderable: "RenderableType",
style: Optional[StyleType] = None,
) -> None:
self.renderable = renderable
self.style = style
def __repr__(self) -> str:
return f"VerticalCenter({self.renderable!r})"
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
style = console.get_style(self.style) if self.style is not None else None
lines = console.render_lines(
self.renderable, options.update(height=None), pad=False
)
width, _height = Segment.get_shape(lines)
new_line = Segment.line()
height = options.height or options.size.height
top_space = (height - len(lines)) // 2
bottom_space = height - top_space - len(lines)
blank_line = Segment(f"{' ' * width}", style)
def blank_lines(count: int) -> Iterable[Segment]:
for _ in range(count):
yield blank_line
yield new_line
if top_space > 0:
yield from blank_lines(top_space)
for line in lines:
yield from line
yield new_line
if bottom_space > 0:
yield from blank_lines(bottom_space)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
measurement = Measurement.get(console, options, self.renderable)
return measurement
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console, Group
from pip._vendor.rich.highlighter import ReprHighlighter
from pip._vendor.rich.panel import Panel
highlighter = ReprHighlighter()
console = Console()
panel = Panel(
Group(
Align.left(highlighter("align='left'")),
Align.center(highlighter("align='center'")),
Align.right(highlighter("align='right'")),
),
width=60,
style="on dark_blue",
title="Algin",
)
console.print(
Align.center(panel, vertical="middle", style="on red", height=console.height)
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/align.py
|
Python
|
mit
| 10,368 |
import re
import sys
from contextlib import suppress
from typing import Iterable, NamedTuple, Optional
from .color import Color
from .style import Style
from .text import Text
re_ansi = re.compile(
r"""
(?:\x1b\](.*?)\x1b\\)|
(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~]))
""",
re.VERBOSE,
)
class _AnsiToken(NamedTuple):
"""Result of ansi tokenized string."""
plain: str = ""
sgr: Optional[str] = ""
osc: Optional[str] = ""
def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]:
"""Tokenize a string in to plain text and ANSI codes.
Args:
ansi_text (str): A String containing ANSI codes.
Yields:
AnsiToken: A named tuple of (plain, sgr, osc)
"""
position = 0
sgr: Optional[str]
osc: Optional[str]
for match in re_ansi.finditer(ansi_text):
start, end = match.span(0)
osc, sgr = match.groups()
if start > position:
yield _AnsiToken(ansi_text[position:start])
if sgr:
if sgr.endswith("m"):
yield _AnsiToken("", sgr[1:-1], osc)
else:
yield _AnsiToken("", sgr, osc)
position = end
if position < len(ansi_text):
yield _AnsiToken(ansi_text[position:])
SGR_STYLE_MAP = {
1: "bold",
2: "dim",
3: "italic",
4: "underline",
5: "blink",
6: "blink2",
7: "reverse",
8: "conceal",
9: "strike",
21: "underline2",
22: "not dim not bold",
23: "not italic",
24: "not underline",
25: "not blink",
26: "not blink2",
27: "not reverse",
28: "not conceal",
29: "not strike",
30: "color(0)",
31: "color(1)",
32: "color(2)",
33: "color(3)",
34: "color(4)",
35: "color(5)",
36: "color(6)",
37: "color(7)",
39: "default",
40: "on color(0)",
41: "on color(1)",
42: "on color(2)",
43: "on color(3)",
44: "on color(4)",
45: "on color(5)",
46: "on color(6)",
47: "on color(7)",
49: "on default",
51: "frame",
52: "encircle",
53: "overline",
54: "not frame not encircle",
55: "not overline",
90: "color(8)",
91: "color(9)",
92: "color(10)",
93: "color(11)",
94: "color(12)",
95: "color(13)",
96: "color(14)",
97: "color(15)",
100: "on color(8)",
101: "on color(9)",
102: "on color(10)",
103: "on color(11)",
104: "on color(12)",
105: "on color(13)",
106: "on color(14)",
107: "on color(15)",
}
class AnsiDecoder:
"""Translate ANSI code in to styled Text."""
def __init__(self) -> None:
self.style = Style.null()
def decode(self, terminal_text: str) -> Iterable[Text]:
"""Decode ANSI codes in an interable of lines.
Args:
lines (Iterable[str]): An iterable of lines of terminal output.
Yields:
Text: Marked up Text.
"""
for line in terminal_text.splitlines():
yield self.decode_line(line)
def decode_line(self, line: str) -> Text:
"""Decode a line containing ansi codes.
Args:
line (str): A line of terminal output.
Returns:
Text: A Text instance marked up according to ansi codes.
"""
from_ansi = Color.from_ansi
from_rgb = Color.from_rgb
_Style = Style
text = Text()
append = text.append
line = line.rsplit("\r", 1)[-1]
for plain_text, sgr, osc in _ansi_tokenize(line):
if plain_text:
append(plain_text, self.style or None)
elif osc is not None:
if osc.startswith("8;"):
_params, semicolon, link = osc[2:].partition(";")
if semicolon:
self.style = self.style.update_link(link or None)
elif sgr is not None:
# Translate in to semi-colon separated codes
# Ignore invalid codes, because we want to be lenient
codes = [
min(255, int(_code) if _code else 0)
for _code in sgr.split(";")
if _code.isdigit() or _code == ""
]
iter_codes = iter(codes)
for code in iter_codes:
if code == 0:
# reset
self.style = _Style.null()
elif code in SGR_STYLE_MAP:
# styles
self.style += _Style.parse(SGR_STYLE_MAP[code])
elif code == 38:
# Foreground
with suppress(StopIteration):
color_type = next(iter_codes)
if color_type == 5:
self.style += _Style.from_color(
from_ansi(next(iter_codes))
)
elif color_type == 2:
self.style += _Style.from_color(
from_rgb(
next(iter_codes),
next(iter_codes),
next(iter_codes),
)
)
elif code == 48:
# Background
with suppress(StopIteration):
color_type = next(iter_codes)
if color_type == 5:
self.style += _Style.from_color(
None, from_ansi(next(iter_codes))
)
elif color_type == 2:
self.style += _Style.from_color(
None,
from_rgb(
next(iter_codes),
next(iter_codes),
next(iter_codes),
),
)
return text
if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover
import io
import os
import pty
import sys
decoder = AnsiDecoder()
stdout = io.BytesIO()
def read(fd: int) -> bytes:
data = os.read(fd, 1024)
stdout.write(data)
return data
pty.spawn(sys.argv[1:], read)
from .console import Console
console = Console(record=True)
stdout_result = stdout.getvalue().decode("utf-8")
print(stdout_result)
for line in decoder.decode(stdout_result):
console.print(line)
console.save_html("stdout.html")
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/ansi.py
|
Python
|
mit
| 6,820 |
from typing import Optional, Union
from .color import Color
from .console import Console, ConsoleOptions, RenderResult
from .jupyter import JupyterMixin
from .measure import Measurement
from .segment import Segment
from .style import Style
# There are left-aligned characters for 1/8 to 7/8, but
# the right-aligned characters exist only for 1/8 and 4/8.
BEGIN_BLOCK_ELEMENTS = ["█", "█", "█", "▐", "▐", "▐", "▕", "▕"]
END_BLOCK_ELEMENTS = [" ", "▏", "▎", "▍", "▌", "▋", "▊", "▉"]
FULL_BLOCK = "█"
class Bar(JupyterMixin):
"""Renders a solid block bar.
Args:
size (float): Value for the end of the bar.
begin (float): Begin point (between 0 and size, inclusive).
end (float): End point (between 0 and size, inclusive).
width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
color (Union[Color, str], optional): Color of the bar. Defaults to "default".
bgcolor (Union[Color, str], optional): Color of bar background. Defaults to "default".
"""
def __init__(
self,
size: float,
begin: float,
end: float,
*,
width: Optional[int] = None,
color: Union[Color, str] = "default",
bgcolor: Union[Color, str] = "default",
):
self.size = size
self.begin = max(begin, 0)
self.end = min(end, size)
self.width = width
self.style = Style(color=color, bgcolor=bgcolor)
def __repr__(self) -> str:
return f"Bar({self.size}, {self.begin}, {self.end})"
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
width = min(
self.width if self.width is not None else options.max_width,
options.max_width,
)
if self.begin >= self.end:
yield Segment(" " * width, self.style)
yield Segment.line()
return
prefix_complete_eights = int(width * 8 * self.begin / self.size)
prefix_bar_count = prefix_complete_eights // 8
prefix_eights_count = prefix_complete_eights % 8
body_complete_eights = int(width * 8 * self.end / self.size)
body_bar_count = body_complete_eights // 8
body_eights_count = body_complete_eights % 8
# When start and end fall into the same cell, we ideally should render
# a symbol that's "center-aligned", but there is no good symbol in Unicode.
# In this case, we fall back to right-aligned block symbol for simplicity.
prefix = " " * prefix_bar_count
if prefix_eights_count:
prefix += BEGIN_BLOCK_ELEMENTS[prefix_eights_count]
body = FULL_BLOCK * body_bar_count
if body_eights_count:
body += END_BLOCK_ELEMENTS[body_eights_count]
suffix = " " * (width - len(body))
yield Segment(prefix + body[len(prefix) :] + suffix, self.style)
yield Segment.line()
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
return (
Measurement(self.width, self.width)
if self.width is not None
else Measurement(4, options.max_width)
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/bar.py
|
Python
|
mit
| 3,264 |
import sys
from typing import TYPE_CHECKING, Iterable, List
if sys.version_info >= (3, 8):
from typing import Literal
else:
from pip._vendor.typing_extensions import Literal # pragma: no cover
from ._loop import loop_last
if TYPE_CHECKING:
from pip._vendor.rich.console import ConsoleOptions
class Box:
"""Defines characters to render boxes.
┌─┬┐ top
│ ││ head
├─┼┤ head_row
│ ││ mid
├─┼┤ row
├─┼┤ foot_row
│ ││ foot
└─┴┘ bottom
Args:
box (str): Characters making up box.
ascii (bool, optional): True if this box uses ascii characters only. Default is False.
"""
def __init__(self, box: str, *, ascii: bool = False) -> None:
self._box = box
self.ascii = ascii
line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()
# top
self.top_left, self.top, self.top_divider, self.top_right = iter(line1)
# head
self.head_left, _, self.head_vertical, self.head_right = iter(line2)
# head_row
(
self.head_row_left,
self.head_row_horizontal,
self.head_row_cross,
self.head_row_right,
) = iter(line3)
# mid
self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)
# row
self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)
# foot_row
(
self.foot_row_left,
self.foot_row_horizontal,
self.foot_row_cross,
self.foot_row_right,
) = iter(line6)
# foot
self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)
# bottom
self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(
line8
)
def __repr__(self) -> str:
return "Box(...)"
def __str__(self) -> str:
return self._box
def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
"""Substitute this box for another if it won't render due to platform issues.
Args:
options (ConsoleOptions): Console options used in rendering.
safe (bool, optional): Substitute this for another Box if there are known problems
displaying on the platform (currently only relevant on Windows). Default is True.
Returns:
Box: A different Box or the same Box.
"""
box = self
if options.legacy_windows and safe:
box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
if options.ascii_only and not box.ascii:
box = ASCII
return box
def get_plain_headed_box(self) -> "Box":
"""If this box uses special characters for the borders of the header, then
return the equivalent box that does not.
Returns:
Box: The most similar Box that doesn't use header-specific box characters.
If the current Box already satisfies this criterion, then it's returned.
"""
return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
def get_top(self, widths: Iterable[int]) -> str:
"""Get the top of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.top_left)
for last, width in loop_last(widths):
append(self.top * width)
if not last:
append(self.top_divider)
append(self.top_right)
return "".join(parts)
def get_row(
self,
widths: Iterable[int],
level: Literal["head", "row", "foot", "mid"] = "row",
edge: bool = True,
) -> str:
"""Get the top of a simple box.
Args:
width (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
if level == "head":
left = self.head_row_left
horizontal = self.head_row_horizontal
cross = self.head_row_cross
right = self.head_row_right
elif level == "row":
left = self.row_left
horizontal = self.row_horizontal
cross = self.row_cross
right = self.row_right
elif level == "mid":
left = self.mid_left
horizontal = " "
cross = self.mid_vertical
right = self.mid_right
elif level == "foot":
left = self.foot_row_left
horizontal = self.foot_row_horizontal
cross = self.foot_row_cross
right = self.foot_row_right
else:
raise ValueError("level must be 'head', 'row' or 'foot'")
parts: List[str] = []
append = parts.append
if edge:
append(left)
for last, width in loop_last(widths):
append(horizontal * width)
if not last:
append(cross)
if edge:
append(right)
return "".join(parts)
def get_bottom(self, widths: Iterable[int]) -> str:
"""Get the bottom of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.bottom_left)
for last, width in loop_last(widths):
append(self.bottom * width)
if not last:
append(self.bottom_divider)
append(self.bottom_right)
return "".join(parts)
ASCII: Box = Box(
"""\
+--+
| ||
|-+|
| ||
|-+|
|-+|
| ||
+--+
""",
ascii=True,
)
ASCII2: Box = Box(
"""\
+-++
| ||
+-++
| ||
+-++
+-++
| ||
+-++
""",
ascii=True,
)
ASCII_DOUBLE_HEAD: Box = Box(
"""\
+-++
| ||
+=++
| ||
+-++
+-++
| ||
+-++
""",
ascii=True,
)
SQUARE: Box = Box(
"""\
┌─┬┐
│ ││
├─┼┤
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
SQUARE_DOUBLE_HEAD: Box = Box(
"""\
┌─┬┐
│ ││
╞═╪╡
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
MINIMAL: Box = Box(
"""\
╷
│
╶─┼╴
│
╶─┼╴
╶─┼╴
│
╵
"""
)
MINIMAL_HEAVY_HEAD: Box = Box(
"""\
╷
│
╺━┿╸
│
╶─┼╴
╶─┼╴
│
╵
"""
)
MINIMAL_DOUBLE_HEAD: Box = Box(
"""\
╷
│
═╪
│
─┼
─┼
│
╵
"""
)
SIMPLE: Box = Box(
"""\
──
──
"""
)
SIMPLE_HEAD: Box = Box(
"""\
──
"""
)
SIMPLE_HEAVY: Box = Box(
"""\
━━
━━
"""
)
HORIZONTALS: Box = Box(
"""\
──
──
──
──
──
"""
)
ROUNDED: Box = Box(
"""\
╭─┬╮
│ ││
├─┼┤
│ ││
├─┼┤
├─┼┤
│ ││
╰─┴╯
"""
)
HEAVY: Box = Box(
"""\
┏━┳┓
┃ ┃┃
┣━╋┫
┃ ┃┃
┣━╋┫
┣━╋┫
┃ ┃┃
┗━┻┛
"""
)
HEAVY_EDGE: Box = Box(
"""\
┏━┯┓
┃ │┃
┠─┼┨
┃ │┃
┠─┼┨
┠─┼┨
┃ │┃
┗━┷┛
"""
)
HEAVY_HEAD: Box = Box(
"""\
┏━┳┓
┃ ┃┃
┡━╇┩
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
DOUBLE: Box = Box(
"""\
╔═╦╗
║ ║║
╠═╬╣
║ ║║
╠═╬╣
╠═╬╣
║ ║║
╚═╩╝
"""
)
DOUBLE_EDGE: Box = Box(
"""\
╔═╤╗
║ │║
╟─┼╢
║ │║
╟─┼╢
╟─┼╢
║ │║
╚═╧╝
"""
)
MARKDOWN: Box = Box(
"""\
| ||
|-||
| ||
|-||
|-||
| ||
""",
ascii=True,
)
# Map Boxes that don't render with raster fonts on to equivalent that do
LEGACY_WINDOWS_SUBSTITUTIONS = {
ROUNDED: SQUARE,
MINIMAL_HEAVY_HEAD: MINIMAL,
SIMPLE_HEAVY: SIMPLE,
HEAVY: SQUARE,
HEAVY_EDGE: SQUARE,
HEAVY_HEAD: SQUARE,
}
# Map headed boxes to their headerless equivalents
PLAIN_HEADED_SUBSTITUTIONS = {
HEAVY_HEAD: SQUARE,
SQUARE_DOUBLE_HEAD: SQUARE,
MINIMAL_DOUBLE_HEAD: MINIMAL,
MINIMAL_HEAVY_HEAD: MINIMAL,
ASCII_DOUBLE_HEAD: ASCII2,
}
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.columns import Columns
from pip._vendor.rich.panel import Panel
from . import box as box
from .console import Console
from .table import Table
from .text import Text
console = Console(record=True)
BOXES = [
"ASCII",
"ASCII2",
"ASCII_DOUBLE_HEAD",
"SQUARE",
"SQUARE_DOUBLE_HEAD",
"MINIMAL",
"MINIMAL_HEAVY_HEAD",
"MINIMAL_DOUBLE_HEAD",
"SIMPLE",
"SIMPLE_HEAD",
"SIMPLE_HEAVY",
"HORIZONTALS",
"ROUNDED",
"HEAVY",
"HEAVY_EDGE",
"HEAVY_HEAD",
"DOUBLE",
"DOUBLE_EDGE",
"MARKDOWN",
]
console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
console.print()
columns = Columns(expand=True, padding=2)
for box_name in sorted(BOXES):
table = Table(
show_footer=True, style="dim", border_style="not dim", expand=True
)
table.add_column("Header 1", "Footer 1")
table.add_column("Header 2", "Footer 2")
table.add_row("Cell", "Cell")
table.add_row("Cell", "Cell")
table.box = getattr(box, box_name)
table.title = Text(f"box.{box_name}", style="magenta")
columns.add_renderable(table)
console.print(columns)
# console.save_html("box.html", inline_styles=True)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/box.py
|
Python
|
mit
| 9,864 |
import re
from functools import lru_cache
from typing import Callable, List
from ._cell_widths import CELL_WIDTHS
# Regex to match sequence of the most common character ranges
_is_single_cell_widths = re.compile("^[\u0020-\u006f\u00a0\u02ff\u0370-\u0482]*$").match
@lru_cache(4096)
def cached_cell_len(text: str) -> int:
"""Get the number of cells required to display text.
This method always caches, which may use up a lot of memory. It is recommended to use
`cell_len` over this method.
Args:
text (str): Text to display.
Returns:
int: Get the number of cells required to display text.
"""
_get_size = get_character_cell_size
total_size = sum(_get_size(character) for character in text)
return total_size
def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int:
"""Get the number of cells required to display text.
Args:
text (str): Text to display.
Returns:
int: Get the number of cells required to display text.
"""
if len(text) < 512:
return _cell_len(text)
_get_size = get_character_cell_size
total_size = sum(_get_size(character) for character in text)
return total_size
@lru_cache(maxsize=4096)
def get_character_cell_size(character: str) -> int:
"""Get the cell size of a character.
Args:
character (str): A single character.
Returns:
int: Number of cells (0, 1 or 2) occupied by that character.
"""
return _get_codepoint_cell_size(ord(character))
@lru_cache(maxsize=4096)
def _get_codepoint_cell_size(codepoint: int) -> int:
"""Get the cell size of a character.
Args:
character (str): A single character.
Returns:
int: Number of cells (0, 1 or 2) occupied by that character.
"""
_table = CELL_WIDTHS
lower_bound = 0
upper_bound = len(_table) - 1
index = (lower_bound + upper_bound) // 2
while True:
start, end, width = _table[index]
if codepoint < start:
upper_bound = index - 1
elif codepoint > end:
lower_bound = index + 1
else:
return 0 if width == -1 else width
if upper_bound < lower_bound:
break
index = (lower_bound + upper_bound) // 2
return 1
def set_cell_size(text: str, total: int) -> str:
"""Set the length of a string to fit within given number of cells."""
if _is_single_cell_widths(text):
size = len(text)
if size < total:
return text + " " * (total - size)
return text[:total]
if total <= 0:
return ""
cell_size = cell_len(text)
if cell_size == total:
return text
if cell_size < total:
return text + " " * (total - cell_size)
start = 0
end = len(text)
# Binary search until we find the right size
while True:
pos = (start + end) // 2
before = text[: pos + 1]
before_len = cell_len(before)
if before_len == total + 1 and cell_len(before[-1]) == 2:
return before[:-1] + " "
if before_len == total:
return before
if before_len > total:
end = pos
else:
start = pos
# TODO: This is inefficient
# TODO: This might not work with CWJ type characters
def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]:
"""Break text in to equal (cell) length strings, returning the characters in reverse
order"""
_get_character_cell_size = get_character_cell_size
characters = [
(character, _get_character_cell_size(character)) for character in text
]
total_size = position
lines: List[List[str]] = [[]]
append = lines[-1].append
for character, size in reversed(characters):
if total_size + size > max_size:
lines.append([character])
append = lines[-1].append
total_size = size
else:
total_size += size
append(character)
return ["".join(line) for line in lines]
if __name__ == "__main__": # pragma: no cover
print(get_character_cell_size("😽"))
for line in chop_cells("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", 8):
print(line)
for n in range(80, 1, -1):
print(set_cell_size("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", n) + "|")
print("x" * n)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/cells.py
|
Python
|
mit
| 4,503 |
import platform
import re
from colorsys import rgb_to_hls
from enum import IntEnum
from functools import lru_cache
from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple
from ._palettes import EIGHT_BIT_PALETTE, STANDARD_PALETTE, WINDOWS_PALETTE
from .color_triplet import ColorTriplet
from .repr import Result, rich_repr
from .terminal_theme import DEFAULT_TERMINAL_THEME
if TYPE_CHECKING: # pragma: no cover
from .terminal_theme import TerminalTheme
from .text import Text
WINDOWS = platform.system() == "Windows"
class ColorSystem(IntEnum):
"""One of the 3 color system supported by terminals."""
STANDARD = 1
EIGHT_BIT = 2
TRUECOLOR = 3
WINDOWS = 4
def __repr__(self) -> str:
return f"ColorSystem.{self.name}"
class ColorType(IntEnum):
"""Type of color stored in Color class."""
DEFAULT = 0
STANDARD = 1
EIGHT_BIT = 2
TRUECOLOR = 3
WINDOWS = 4
def __repr__(self) -> str:
return f"ColorType.{self.name}"
ANSI_COLOR_NAMES = {
"black": 0,
"red": 1,
"green": 2,
"yellow": 3,
"blue": 4,
"magenta": 5,
"cyan": 6,
"white": 7,
"bright_black": 8,
"bright_red": 9,
"bright_green": 10,
"bright_yellow": 11,
"bright_blue": 12,
"bright_magenta": 13,
"bright_cyan": 14,
"bright_white": 15,
"grey0": 16,
"gray0": 16,
"navy_blue": 17,
"dark_blue": 18,
"blue3": 20,
"blue1": 21,
"dark_green": 22,
"deep_sky_blue4": 25,
"dodger_blue3": 26,
"dodger_blue2": 27,
"green4": 28,
"spring_green4": 29,
"turquoise4": 30,
"deep_sky_blue3": 32,
"dodger_blue1": 33,
"green3": 40,
"spring_green3": 41,
"dark_cyan": 36,
"light_sea_green": 37,
"deep_sky_blue2": 38,
"deep_sky_blue1": 39,
"spring_green2": 47,
"cyan3": 43,
"dark_turquoise": 44,
"turquoise2": 45,
"green1": 46,
"spring_green1": 48,
"medium_spring_green": 49,
"cyan2": 50,
"cyan1": 51,
"dark_red": 88,
"deep_pink4": 125,
"purple4": 55,
"purple3": 56,
"blue_violet": 57,
"orange4": 94,
"grey37": 59,
"gray37": 59,
"medium_purple4": 60,
"slate_blue3": 62,
"royal_blue1": 63,
"chartreuse4": 64,
"dark_sea_green4": 71,
"pale_turquoise4": 66,
"steel_blue": 67,
"steel_blue3": 68,
"cornflower_blue": 69,
"chartreuse3": 76,
"cadet_blue": 73,
"sky_blue3": 74,
"steel_blue1": 81,
"pale_green3": 114,
"sea_green3": 78,
"aquamarine3": 79,
"medium_turquoise": 80,
"chartreuse2": 112,
"sea_green2": 83,
"sea_green1": 85,
"aquamarine1": 122,
"dark_slate_gray2": 87,
"dark_magenta": 91,
"dark_violet": 128,
"purple": 129,
"light_pink4": 95,
"plum4": 96,
"medium_purple3": 98,
"slate_blue1": 99,
"yellow4": 106,
"wheat4": 101,
"grey53": 102,
"gray53": 102,
"light_slate_grey": 103,
"light_slate_gray": 103,
"medium_purple": 104,
"light_slate_blue": 105,
"dark_olive_green3": 149,
"dark_sea_green": 108,
"light_sky_blue3": 110,
"sky_blue2": 111,
"dark_sea_green3": 150,
"dark_slate_gray3": 116,
"sky_blue1": 117,
"chartreuse1": 118,
"light_green": 120,
"pale_green1": 156,
"dark_slate_gray1": 123,
"red3": 160,
"medium_violet_red": 126,
"magenta3": 164,
"dark_orange3": 166,
"indian_red": 167,
"hot_pink3": 168,
"medium_orchid3": 133,
"medium_orchid": 134,
"medium_purple2": 140,
"dark_goldenrod": 136,
"light_salmon3": 173,
"rosy_brown": 138,
"grey63": 139,
"gray63": 139,
"medium_purple1": 141,
"gold3": 178,
"dark_khaki": 143,
"navajo_white3": 144,
"grey69": 145,
"gray69": 145,
"light_steel_blue3": 146,
"light_steel_blue": 147,
"yellow3": 184,
"dark_sea_green2": 157,
"light_cyan3": 152,
"light_sky_blue1": 153,
"green_yellow": 154,
"dark_olive_green2": 155,
"dark_sea_green1": 193,
"pale_turquoise1": 159,
"deep_pink3": 162,
"magenta2": 200,
"hot_pink2": 169,
"orchid": 170,
"medium_orchid1": 207,
"orange3": 172,
"light_pink3": 174,
"pink3": 175,
"plum3": 176,
"violet": 177,
"light_goldenrod3": 179,
"tan": 180,
"misty_rose3": 181,
"thistle3": 182,
"plum2": 183,
"khaki3": 185,
"light_goldenrod2": 222,
"light_yellow3": 187,
"grey84": 188,
"gray84": 188,
"light_steel_blue1": 189,
"yellow2": 190,
"dark_olive_green1": 192,
"honeydew2": 194,
"light_cyan1": 195,
"red1": 196,
"deep_pink2": 197,
"deep_pink1": 199,
"magenta1": 201,
"orange_red1": 202,
"indian_red1": 204,
"hot_pink": 206,
"dark_orange": 208,
"salmon1": 209,
"light_coral": 210,
"pale_violet_red1": 211,
"orchid2": 212,
"orchid1": 213,
"orange1": 214,
"sandy_brown": 215,
"light_salmon1": 216,
"light_pink1": 217,
"pink1": 218,
"plum1": 219,
"gold1": 220,
"navajo_white1": 223,
"misty_rose1": 224,
"thistle1": 225,
"yellow1": 226,
"light_goldenrod1": 227,
"khaki1": 228,
"wheat1": 229,
"cornsilk1": 230,
"grey100": 231,
"gray100": 231,
"grey3": 232,
"gray3": 232,
"grey7": 233,
"gray7": 233,
"grey11": 234,
"gray11": 234,
"grey15": 235,
"gray15": 235,
"grey19": 236,
"gray19": 236,
"grey23": 237,
"gray23": 237,
"grey27": 238,
"gray27": 238,
"grey30": 239,
"gray30": 239,
"grey35": 240,
"gray35": 240,
"grey39": 241,
"gray39": 241,
"grey42": 242,
"gray42": 242,
"grey46": 243,
"gray46": 243,
"grey50": 244,
"gray50": 244,
"grey54": 245,
"gray54": 245,
"grey58": 246,
"gray58": 246,
"grey62": 247,
"gray62": 247,
"grey66": 248,
"gray66": 248,
"grey70": 249,
"gray70": 249,
"grey74": 250,
"gray74": 250,
"grey78": 251,
"gray78": 251,
"grey82": 252,
"gray82": 252,
"grey85": 253,
"gray85": 253,
"grey89": 254,
"gray89": 254,
"grey93": 255,
"gray93": 255,
}
class ColorParseError(Exception):
"""The color could not be parsed."""
RE_COLOR = re.compile(
r"""^
\#([0-9a-f]{6})$|
color\(([0-9]{1,3})\)$|
rgb\(([\d\s,]+)\)$
""",
re.VERBOSE,
)
@rich_repr
class Color(NamedTuple):
"""Terminal color definition."""
name: str
"""The name of the color (typically the input to Color.parse)."""
type: ColorType
"""The type of the color."""
number: Optional[int] = None
"""The color number, if a standard color, or None."""
triplet: Optional[ColorTriplet] = None
"""A triplet of color components, if an RGB color."""
def __rich__(self) -> "Text":
"""Dispays the actual color if Rich printed."""
from .style import Style
from .text import Text
return Text.assemble(
f"<color {self.name!r} ({self.type.name.lower()})",
("⬤", Style(color=self)),
" >",
)
def __rich_repr__(self) -> Result:
yield self.name
yield self.type
yield "number", self.number, None
yield "triplet", self.triplet, None
@property
def system(self) -> ColorSystem:
"""Get the native color system for this color."""
if self.type == ColorType.DEFAULT:
return ColorSystem.STANDARD
return ColorSystem(int(self.type))
@property
def is_system_defined(self) -> bool:
"""Check if the color is ultimately defined by the system."""
return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR)
@property
def is_default(self) -> bool:
"""Check if the color is a default color."""
return self.type == ColorType.DEFAULT
def get_truecolor(
self, theme: Optional["TerminalTheme"] = None, foreground: bool = True
) -> ColorTriplet:
"""Get an equivalent color triplet for this color.
Args:
theme (TerminalTheme, optional): Optional terminal theme, or None to use default. Defaults to None.
foreground (bool, optional): True for a foreground color, or False for background. Defaults to True.
Returns:
ColorTriplet: A color triplet containing RGB components.
"""
if theme is None:
theme = DEFAULT_TERMINAL_THEME
if self.type == ColorType.TRUECOLOR:
assert self.triplet is not None
return self.triplet
elif self.type == ColorType.EIGHT_BIT:
assert self.number is not None
return EIGHT_BIT_PALETTE[self.number]
elif self.type == ColorType.STANDARD:
assert self.number is not None
return theme.ansi_colors[self.number]
elif self.type == ColorType.WINDOWS:
assert self.number is not None
return WINDOWS_PALETTE[self.number]
else: # self.type == ColorType.DEFAULT:
assert self.number is None
return theme.foreground_color if foreground else theme.background_color
@classmethod
def from_ansi(cls, number: int) -> "Color":
"""Create a Color number from it's 8-bit ansi number.
Args:
number (int): A number between 0-255 inclusive.
Returns:
Color: A new Color instance.
"""
return cls(
name=f"color({number})",
type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
number=number,
)
@classmethod
def from_triplet(cls, triplet: "ColorTriplet") -> "Color":
"""Create a truecolor RGB color from a triplet of values.
Args:
triplet (ColorTriplet): A color triplet containing red, green and blue components.
Returns:
Color: A new color object.
"""
return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet)
@classmethod
def from_rgb(cls, red: float, green: float, blue: float) -> "Color":
"""Create a truecolor from three color components in the range(0->255).
Args:
red (float): Red component in range 0-255.
green (float): Green component in range 0-255.
blue (float): Blue component in range 0-255.
Returns:
Color: A new color object.
"""
return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue)))
@classmethod
def default(cls) -> "Color":
"""Get a Color instance representing the default color.
Returns:
Color: Default color.
"""
return cls(name="default", type=ColorType.DEFAULT)
@classmethod
@lru_cache(maxsize=1024)
def parse(cls, color: str) -> "Color":
"""Parse a color definition."""
original_color = color
color = color.lower().strip()
if color == "default":
return cls(color, type=ColorType.DEFAULT)
color_number = ANSI_COLOR_NAMES.get(color)
if color_number is not None:
return cls(
color,
type=(ColorType.STANDARD if color_number < 16 else ColorType.EIGHT_BIT),
number=color_number,
)
color_match = RE_COLOR.match(color)
if color_match is None:
raise ColorParseError(f"{original_color!r} is not a valid color")
color_24, color_8, color_rgb = color_match.groups()
if color_24:
triplet = ColorTriplet(
int(color_24[0:2], 16), int(color_24[2:4], 16), int(color_24[4:6], 16)
)
return cls(color, ColorType.TRUECOLOR, triplet=triplet)
elif color_8:
number = int(color_8)
if number > 255:
raise ColorParseError(f"color number must be <= 255 in {color!r}")
return cls(
color,
type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
number=number,
)
else: # color_rgb:
components = color_rgb.split(",")
if len(components) != 3:
raise ColorParseError(
f"expected three components in {original_color!r}"
)
red, green, blue = components
triplet = ColorTriplet(int(red), int(green), int(blue))
if not all(component <= 255 for component in triplet):
raise ColorParseError(
f"color components must be <= 255 in {original_color!r}"
)
return cls(color, ColorType.TRUECOLOR, triplet=triplet)
@lru_cache(maxsize=1024)
def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]:
"""Get the ANSI escape codes for this color."""
_type = self.type
if _type == ColorType.DEFAULT:
return ("39" if foreground else "49",)
elif _type == ColorType.WINDOWS:
number = self.number
assert number is not None
fore, back = (30, 40) if number < 8 else (82, 92)
return (str(fore + number if foreground else back + number),)
elif _type == ColorType.STANDARD:
number = self.number
assert number is not None
fore, back = (30, 40) if number < 8 else (82, 92)
return (str(fore + number if foreground else back + number),)
elif _type == ColorType.EIGHT_BIT:
assert self.number is not None
return ("38" if foreground else "48", "5", str(self.number))
else: # self.standard == ColorStandard.TRUECOLOR:
assert self.triplet is not None
red, green, blue = self.triplet
return ("38" if foreground else "48", "2", str(red), str(green), str(blue))
@lru_cache(maxsize=1024)
def downgrade(self, system: ColorSystem) -> "Color":
"""Downgrade a color system to a system with fewer colors."""
if self.type in [ColorType.DEFAULT, system]:
return self
# Convert to 8-bit color from truecolor color
if system == ColorSystem.EIGHT_BIT and self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
red, green, blue = self.triplet.normalized
_h, l, s = rgb_to_hls(red, green, blue)
# If saturation is under 10% assume it is grayscale
if s < 0.1:
gray = round(l * 25.0)
if gray == 0:
color_number = 16
elif gray == 25:
color_number = 231
else:
color_number = 231 + gray
return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
color_number = (
16 + 36 * round(red * 5.0) + 6 * round(green * 5.0) + round(blue * 5.0)
)
return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
# Convert to standard from truecolor or 8-bit
elif system == ColorSystem.STANDARD:
if self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
triplet = self.triplet
else: # self.system == ColorSystem.EIGHT_BIT
assert self.number is not None
triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
color_number = STANDARD_PALETTE.match(triplet)
return Color(self.name, ColorType.STANDARD, number=color_number)
elif system == ColorSystem.WINDOWS:
if self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
triplet = self.triplet
else: # self.system == ColorSystem.EIGHT_BIT
assert self.number is not None
if self.number < 16:
return Color(self.name, ColorType.WINDOWS, number=self.number)
triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
color_number = WINDOWS_PALETTE.match(triplet)
return Color(self.name, ColorType.WINDOWS, number=color_number)
return self
def parse_rgb_hex(hex_color: str) -> ColorTriplet:
"""Parse six hex characters in to RGB triplet."""
assert len(hex_color) == 6, "must be 6 characters"
color = ColorTriplet(
int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)
)
return color
def blend_rgb(
color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5
) -> ColorTriplet:
"""Blend one RGB color in to another."""
r1, g1, b1 = color1
r2, g2, b2 = color2
new_color = ColorTriplet(
int(r1 + (r2 - r1) * cross_fade),
int(g1 + (g2 - g1) * cross_fade),
int(b1 + (b2 - b1) * cross_fade),
)
return new_color
if __name__ == "__main__": # pragma: no cover
from .console import Console
from .table import Table
from .text import Text
console = Console()
table = Table(show_footer=False, show_edge=True)
table.add_column("Color", width=10, overflow="ellipsis")
table.add_column("Number", justify="right", style="yellow")
table.add_column("Name", style="green")
table.add_column("Hex", style="blue")
table.add_column("RGB", style="magenta")
colors = sorted((v, k) for k, v in ANSI_COLOR_NAMES.items())
for color_number, name in colors:
if "grey" in name:
continue
color_cell = Text(" " * 10, style=f"on {name}")
if color_number < 16:
table.add_row(color_cell, f"{color_number}", Text(f'"{name}"'))
else:
color = EIGHT_BIT_PALETTE[color_number] # type: ignore[has-type]
table.add_row(
color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb
)
console.print(table)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/color.py
|
Python
|
mit
| 17,957 |
from typing import NamedTuple, Tuple
class ColorTriplet(NamedTuple):
"""The red, green, and blue components of a color."""
red: int
"""Red component in 0 to 255 range."""
green: int
"""Green component in 0 to 255 range."""
blue: int
"""Blue component in 0 to 255 range."""
@property
def hex(self) -> str:
"""get the color triplet in CSS style."""
red, green, blue = self
return f"#{red:02x}{green:02x}{blue:02x}"
@property
def rgb(self) -> str:
"""The color in RGB format.
Returns:
str: An rgb color, e.g. ``"rgb(100,23,255)"``.
"""
red, green, blue = self
return f"rgb({red},{green},{blue})"
@property
def normalized(self) -> Tuple[float, float, float]:
"""Convert components into floats between 0 and 1.
Returns:
Tuple[float, float, float]: A tuple of three normalized colour components.
"""
red, green, blue = self
return red / 255.0, green / 255.0, blue / 255.0
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/color_triplet.py
|
Python
|
mit
| 1,054 |
from collections import defaultdict
from itertools import chain
from operator import itemgetter
from typing import Dict, Iterable, List, Optional, Tuple
from .align import Align, AlignMethod
from .console import Console, ConsoleOptions, RenderableType, RenderResult
from .constrain import Constrain
from .measure import Measurement
from .padding import Padding, PaddingDimensions
from .table import Table
from .text import TextType
from .jupyter import JupyterMixin
class Columns(JupyterMixin):
"""Display renderables in neat columns.
Args:
renderables (Iterable[RenderableType]): Any number of Rich renderables (including str).
width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None.
padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1).
expand (bool, optional): Expand columns to full width. Defaults to False.
equal (bool, optional): Arrange in to equal sized columns. Defaults to False.
column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False.
right_to_left (bool, optional): Start column from right hand side. Defaults to False.
align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None.
title (TextType, optional): Optional title for Columns.
"""
def __init__(
self,
renderables: Optional[Iterable[RenderableType]] = None,
padding: PaddingDimensions = (0, 1),
*,
width: Optional[int] = None,
expand: bool = False,
equal: bool = False,
column_first: bool = False,
right_to_left: bool = False,
align: Optional[AlignMethod] = None,
title: Optional[TextType] = None,
) -> None:
self.renderables = list(renderables or [])
self.width = width
self.padding = padding
self.expand = expand
self.equal = equal
self.column_first = column_first
self.right_to_left = right_to_left
self.align: Optional[AlignMethod] = align
self.title = title
def add_renderable(self, renderable: RenderableType) -> None:
"""Add a renderable to the columns.
Args:
renderable (RenderableType): Any renderable object.
"""
self.renderables.append(renderable)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
render_str = console.render_str
renderables = [
render_str(renderable) if isinstance(renderable, str) else renderable
for renderable in self.renderables
]
if not renderables:
return
_top, right, _bottom, left = Padding.unpack(self.padding)
width_padding = max(left, right)
max_width = options.max_width
widths: Dict[int, int] = defaultdict(int)
column_count = len(renderables)
get_measurement = Measurement.get
renderable_widths = [
get_measurement(console, options, renderable).maximum
for renderable in renderables
]
if self.equal:
renderable_widths = [max(renderable_widths)] * len(renderable_widths)
def iter_renderables(
column_count: int,
) -> Iterable[Tuple[int, Optional[RenderableType]]]:
item_count = len(renderables)
if self.column_first:
width_renderables = list(zip(renderable_widths, renderables))
column_lengths: List[int] = [item_count // column_count] * column_count
for col_no in range(item_count % column_count):
column_lengths[col_no] += 1
row_count = (item_count + column_count - 1) // column_count
cells = [[-1] * column_count for _ in range(row_count)]
row = col = 0
for index in range(item_count):
cells[row][col] = index
column_lengths[col] -= 1
if column_lengths[col]:
row += 1
else:
col += 1
row = 0
for index in chain.from_iterable(cells):
if index == -1:
break
yield width_renderables[index]
else:
yield from zip(renderable_widths, renderables)
# Pad odd elements with spaces
if item_count % column_count:
for _ in range(column_count - (item_count % column_count)):
yield 0, None
table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False)
table.expand = self.expand
table.title = self.title
if self.width is not None:
column_count = (max_width) // (self.width + width_padding)
for _ in range(column_count):
table.add_column(width=self.width)
else:
while column_count > 1:
widths.clear()
column_no = 0
for renderable_width, _ in iter_renderables(column_count):
widths[column_no] = max(widths[column_no], renderable_width)
total_width = sum(widths.values()) + width_padding * (
len(widths) - 1
)
if total_width > max_width:
column_count = len(widths) - 1
break
else:
column_no = (column_no + 1) % column_count
else:
break
get_renderable = itemgetter(1)
_renderables = [
get_renderable(_renderable)
for _renderable in iter_renderables(column_count)
]
if self.equal:
_renderables = [
None
if renderable is None
else Constrain(renderable, renderable_widths[0])
for renderable in _renderables
]
if self.align:
align = self.align
_Align = Align
_renderables = [
None if renderable is None else _Align(renderable, align)
for renderable in _renderables
]
right_to_left = self.right_to_left
add_row = table.add_row
for start in range(0, len(_renderables), column_count):
row = _renderables[start : start + column_count]
if right_to_left:
row = row[::-1]
add_row(*row)
yield table
if __name__ == "__main__": # pragma: no cover
import os
console = Console()
files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))]
columns = Columns(files, padding=(0, 1), expand=False, equal=False)
console.print(columns)
console.rule()
columns.column_first = True
console.print(columns)
columns.right_to_left = True
console.rule()
console.print(columns)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/columns.py
|
Python
|
mit
| 7,131 |
import inspect
import io
import os
import platform
import sys
import threading
import zlib
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from datetime import datetime
from functools import wraps
from getpass import getpass
from html import escape
from inspect import isclass
from itertools import islice
from math import ceil
from time import monotonic
from types import FrameType, ModuleType, TracebackType
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
TextIO,
Tuple,
Type,
Union,
cast,
)
if sys.version_info >= (3, 8):
from typing import Literal, Protocol, runtime_checkable
else:
from pip._vendor.typing_extensions import (
Literal,
Protocol,
runtime_checkable,
) # pragma: no cover
from . import errors, themes
from ._emoji_replace import _emoji_replace
from ._export_format import CONSOLE_HTML_FORMAT, CONSOLE_SVG_FORMAT
from ._log_render import FormatTimeCallable, LogRender
from .align import Align, AlignMethod
from .color import ColorSystem, blend_rgb
from .control import Control
from .emoji import EmojiVariant
from .highlighter import NullHighlighter, ReprHighlighter
from .markup import render as render_markup
from .measure import Measurement, measure_renderables
from .pager import Pager, SystemPager
from .pretty import Pretty, is_expandable
from .protocol import rich_cast
from .region import Region
from .scope import render_scope
from .screen import Screen
from .segment import Segment
from .style import Style, StyleType
from .styled import Styled
from .terminal_theme import DEFAULT_TERMINAL_THEME, SVG_EXPORT_THEME, TerminalTheme
from .text import Text, TextType
from .theme import Theme, ThemeStack
if TYPE_CHECKING:
from ._windows import WindowsConsoleFeatures
from .live import Live
from .status import Status
JUPYTER_DEFAULT_COLUMNS = 115
JUPYTER_DEFAULT_LINES = 100
WINDOWS = platform.system() == "Windows"
HighlighterType = Callable[[Union[str, "Text"]], "Text"]
JustifyMethod = Literal["default", "left", "center", "right", "full"]
OverflowMethod = Literal["fold", "crop", "ellipsis", "ignore"]
class NoChange:
pass
NO_CHANGE = NoChange()
try:
_STDIN_FILENO = sys.__stdin__.fileno()
except Exception:
_STDIN_FILENO = 0
try:
_STDOUT_FILENO = sys.__stdout__.fileno()
except Exception:
_STDOUT_FILENO = 1
try:
_STDERR_FILENO = sys.__stderr__.fileno()
except Exception:
_STDERR_FILENO = 2
_STD_STREAMS = (_STDIN_FILENO, _STDOUT_FILENO, _STDERR_FILENO)
_STD_STREAMS_OUTPUT = (_STDOUT_FILENO, _STDERR_FILENO)
_TERM_COLORS = {"256color": ColorSystem.EIGHT_BIT, "16color": ColorSystem.STANDARD}
class ConsoleDimensions(NamedTuple):
"""Size of the terminal."""
width: int
"""The width of the console in 'cells'."""
height: int
"""The height of the console in lines."""
@dataclass
class ConsoleOptions:
"""Options for __rich_console__ method."""
size: ConsoleDimensions
"""Size of console."""
legacy_windows: bool
"""legacy_windows: flag for legacy windows."""
min_width: int
"""Minimum width of renderable."""
max_width: int
"""Maximum width of renderable."""
is_terminal: bool
"""True if the target is a terminal, otherwise False."""
encoding: str
"""Encoding of terminal."""
max_height: int
"""Height of container (starts as terminal)"""
justify: Optional[JustifyMethod] = None
"""Justify value override for renderable."""
overflow: Optional[OverflowMethod] = None
"""Overflow value override for renderable."""
no_wrap: Optional[bool] = False
"""Disable wrapping for text."""
highlight: Optional[bool] = None
"""Highlight override for render_str."""
markup: Optional[bool] = None
"""Enable markup when rendering strings."""
height: Optional[int] = None
@property
def ascii_only(self) -> bool:
"""Check if renderables should use ascii only."""
return not self.encoding.startswith("utf")
def copy(self) -> "ConsoleOptions":
"""Return a copy of the options.
Returns:
ConsoleOptions: a copy of self.
"""
options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions)
options.__dict__ = self.__dict__.copy()
return options
def update(
self,
*,
width: Union[int, NoChange] = NO_CHANGE,
min_width: Union[int, NoChange] = NO_CHANGE,
max_width: Union[int, NoChange] = NO_CHANGE,
justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE,
overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE,
no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE,
highlight: Union[Optional[bool], NoChange] = NO_CHANGE,
markup: Union[Optional[bool], NoChange] = NO_CHANGE,
height: Union[Optional[int], NoChange] = NO_CHANGE,
) -> "ConsoleOptions":
"""Update values, return a copy."""
options = self.copy()
if not isinstance(width, NoChange):
options.min_width = options.max_width = max(0, width)
if not isinstance(min_width, NoChange):
options.min_width = min_width
if not isinstance(max_width, NoChange):
options.max_width = max_width
if not isinstance(justify, NoChange):
options.justify = justify
if not isinstance(overflow, NoChange):
options.overflow = overflow
if not isinstance(no_wrap, NoChange):
options.no_wrap = no_wrap
if not isinstance(highlight, NoChange):
options.highlight = highlight
if not isinstance(markup, NoChange):
options.markup = markup
if not isinstance(height, NoChange):
if height is not None:
options.max_height = height
options.height = None if height is None else max(0, height)
return options
def update_width(self, width: int) -> "ConsoleOptions":
"""Update just the width, return a copy.
Args:
width (int): New width (sets both min_width and max_width)
Returns:
~ConsoleOptions: New console options instance.
"""
options = self.copy()
options.min_width = options.max_width = max(0, width)
return options
def update_height(self, height: int) -> "ConsoleOptions":
"""Update the height, and return a copy.
Args:
height (int): New height
Returns:
~ConsoleOptions: New Console options instance.
"""
options = self.copy()
options.max_height = options.height = height
return options
def reset_height(self) -> "ConsoleOptions":
"""Return a copy of the options with height set to ``None``.
Returns:
~ConsoleOptions: New console options instance.
"""
options = self.copy()
options.height = None
return options
def update_dimensions(self, width: int, height: int) -> "ConsoleOptions":
"""Update the width and height, and return a copy.
Args:
width (int): New width (sets both min_width and max_width).
height (int): New height.
Returns:
~ConsoleOptions: New console options instance.
"""
options = self.copy()
options.min_width = options.max_width = max(0, width)
options.height = options.max_height = height
return options
@runtime_checkable
class RichCast(Protocol):
"""An object that may be 'cast' to a console renderable."""
def __rich__(
self,
) -> Union["ConsoleRenderable", "RichCast", str]: # pragma: no cover
...
@runtime_checkable
class ConsoleRenderable(Protocol):
"""An object that supports the console protocol."""
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult": # pragma: no cover
...
# A type that may be rendered by Console.
RenderableType = Union[ConsoleRenderable, RichCast, str]
# The result of calling a __rich_console__ method.
RenderResult = Iterable[Union[RenderableType, Segment]]
_null_highlighter = NullHighlighter()
class CaptureError(Exception):
"""An error in the Capture context manager."""
class NewLine:
"""A renderable to generate new line(s)"""
def __init__(self, count: int = 1) -> None:
self.count = count
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> Iterable[Segment]:
yield Segment("\n" * self.count)
class ScreenUpdate:
"""Render a list of lines at a given offset."""
def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None:
self._lines = lines
self.x = x
self.y = y
def __rich_console__(
self, console: "Console", options: ConsoleOptions
) -> RenderResult:
x = self.x
move_to = Control.move_to
for offset, line in enumerate(self._lines, self.y):
yield move_to(x, offset)
yield from line
class Capture:
"""Context manager to capture the result of printing to the console.
See :meth:`~rich.console.Console.capture` for how to use.
Args:
console (Console): A console instance to capture output.
"""
def __init__(self, console: "Console") -> None:
self._console = console
self._result: Optional[str] = None
def __enter__(self) -> "Capture":
self._console.begin_capture()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self._result = self._console.end_capture()
def get(self) -> str:
"""Get the result of the capture."""
if self._result is None:
raise CaptureError(
"Capture result is not available until context manager exits."
)
return self._result
class ThemeContext:
"""A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage."""
def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None:
self.console = console
self.theme = theme
self.inherit = inherit
def __enter__(self) -> "ThemeContext":
self.console.push_theme(self.theme)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.console.pop_theme()
class PagerContext:
"""A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage."""
def __init__(
self,
console: "Console",
pager: Optional[Pager] = None,
styles: bool = False,
links: bool = False,
) -> None:
self._console = console
self.pager = SystemPager() if pager is None else pager
self.styles = styles
self.links = links
def __enter__(self) -> "PagerContext":
self._console._enter_buffer()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if exc_type is None:
with self._console._lock:
buffer: List[Segment] = self._console._buffer[:]
del self._console._buffer[:]
segments: Iterable[Segment] = buffer
if not self.styles:
segments = Segment.strip_styles(segments)
elif not self.links:
segments = Segment.strip_links(segments)
content = self._console._render_buffer(segments)
self.pager.show(content)
self._console._exit_buffer()
class ScreenContext:
"""A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage."""
def __init__(
self, console: "Console", hide_cursor: bool, style: StyleType = ""
) -> None:
self.console = console
self.hide_cursor = hide_cursor
self.screen = Screen(style=style)
self._changed = False
def update(
self, *renderables: RenderableType, style: Optional[StyleType] = None
) -> None:
"""Update the screen.
Args:
renderable (RenderableType, optional): Optional renderable to replace current renderable,
or None for no change. Defaults to None.
style: (Style, optional): Replacement style, or None for no change. Defaults to None.
"""
if renderables:
self.screen.renderable = (
Group(*renderables) if len(renderables) > 1 else renderables[0]
)
if style is not None:
self.screen.style = style
self.console.print(self.screen, end="")
def __enter__(self) -> "ScreenContext":
self._changed = self.console.set_alt_screen(True)
if self._changed and self.hide_cursor:
self.console.show_cursor(False)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self._changed:
self.console.set_alt_screen(False)
if self.hide_cursor:
self.console.show_cursor(True)
class Group:
"""Takes a group of renderables and returns a renderable object that renders the group.
Args:
renderables (Iterable[RenderableType]): An iterable of renderable objects.
fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
"""
def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None:
self._renderables = renderables
self.fit = fit
self._render: Optional[List[RenderableType]] = None
@property
def renderables(self) -> List["RenderableType"]:
if self._render is None:
self._render = list(self._renderables)
return self._render
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
if self.fit:
return measure_renderables(console, options, self.renderables)
else:
return Measurement(options.max_width, options.max_width)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> RenderResult:
yield from self.renderables
def group(fit: bool = True) -> Callable[..., Callable[..., Group]]:
"""A decorator that turns an iterable of renderables in to a group.
Args:
fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
"""
def decorator(
method: Callable[..., Iterable[RenderableType]]
) -> Callable[..., Group]:
"""Convert a method that returns an iterable of renderables in to a Group."""
@wraps(method)
def _replace(*args: Any, **kwargs: Any) -> Group:
renderables = method(*args, **kwargs)
return Group(*renderables, fit=fit)
return _replace
return decorator
def _is_jupyter() -> bool: # pragma: no cover
"""Check if we're running in a Jupyter notebook."""
try:
get_ipython # type: ignore[name-defined]
except NameError:
return False
ipython = get_ipython() # type: ignore[name-defined]
shell = ipython.__class__.__name__
if "google.colab" in str(ipython.__class__) or shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
COLOR_SYSTEMS = {
"standard": ColorSystem.STANDARD,
"256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR,
"windows": ColorSystem.WINDOWS,
}
_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
@dataclass
class ConsoleThreadLocals(threading.local):
"""Thread local values for Console context."""
theme_stack: ThemeStack
buffer: List[Segment] = field(default_factory=list)
buffer_index: int = 0
class RenderHook(ABC):
"""Provides hooks in to the render process."""
@abstractmethod
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
"""Called with a list of objects to render.
This method can return a new list of renderables, or modify and return the same list.
Args:
renderables (List[ConsoleRenderable]): A number of renderable objects.
Returns:
List[ConsoleRenderable]: A replacement list of renderables.
"""
_windows_console_features: Optional["WindowsConsoleFeatures"] = None
def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover
global _windows_console_features
if _windows_console_features is not None:
return _windows_console_features
from ._windows import get_windows_console_features
_windows_console_features = get_windows_console_features()
return _windows_console_features
def detect_legacy_windows() -> bool:
"""Detect legacy Windows."""
return WINDOWS and not get_windows_console_features().vt
class Console:
"""A high level console interface.
Args:
color_system (str, optional): The color system supported by your terminal,
either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None.
force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None.
force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None.
soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False.
theme (Theme, optional): An optional style theme object, or ``None`` for default theme.
stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False.
file (IO, optional): A file object where the console should write to. Defaults to stdout.
quiet (bool, Optional): Boolean to suppress all output. Defaults to False.
width (int, optional): The width of the terminal. Leave as default to auto-detect width.
height (int, optional): The height of the terminal. Leave as default to auto-detect height.
style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None.
no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None.
tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8.
record (bool, optional): Boolean to enable recording of terminal output,
required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False.
markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
emoji (bool, optional): Enable emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
highlight (bool, optional): Enable automatic highlighting. Defaults to True.
log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ".
highlighter (HighlighterType, optional): Default highlighter.
legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``.
safe_box (bool, optional): Restrict box options that don't render on legacy Windows.
get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log),
or None for datetime.now.
get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic.
"""
_environ: Mapping[str, str] = os.environ
def __init__(
self,
*,
color_system: Optional[
Literal["auto", "standard", "256", "truecolor", "windows"]
] = "auto",
force_terminal: Optional[bool] = None,
force_jupyter: Optional[bool] = None,
force_interactive: Optional[bool] = None,
soft_wrap: bool = False,
theme: Optional[Theme] = None,
stderr: bool = False,
file: Optional[IO[str]] = None,
quiet: bool = False,
width: Optional[int] = None,
height: Optional[int] = None,
style: Optional[StyleType] = None,
no_color: Optional[bool] = None,
tab_size: int = 8,
record: bool = False,
markup: bool = True,
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
highlight: bool = True,
log_time: bool = True,
log_path: bool = True,
log_time_format: Union[str, FormatTimeCallable] = "[%X]",
highlighter: Optional["HighlighterType"] = ReprHighlighter(),
legacy_windows: Optional[bool] = None,
safe_box: bool = True,
get_datetime: Optional[Callable[[], datetime]] = None,
get_time: Optional[Callable[[], float]] = None,
_environ: Optional[Mapping[str, str]] = None,
):
# Copy of os.environ allows us to replace it for testing
if _environ is not None:
self._environ = _environ
self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter
if self.is_jupyter:
if width is None:
jupyter_columns = self._environ.get("JUPYTER_COLUMNS")
if jupyter_columns is not None and jupyter_columns.isdigit():
width = int(jupyter_columns)
else:
width = JUPYTER_DEFAULT_COLUMNS
if height is None:
jupyter_lines = self._environ.get("JUPYTER_LINES")
if jupyter_lines is not None and jupyter_lines.isdigit():
height = int(jupyter_lines)
else:
height = JUPYTER_DEFAULT_LINES
self.tab_size = tab_size
self.record = record
self._markup = markup
self._emoji = emoji
self._emoji_variant: Optional[EmojiVariant] = emoji_variant
self._highlight = highlight
self.legacy_windows: bool = (
(detect_legacy_windows() and not self.is_jupyter)
if legacy_windows is None
else legacy_windows
)
if width is None:
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
width = int(columns) - self.legacy_windows
if height is None:
lines = self._environ.get("LINES")
if lines is not None and lines.isdigit():
height = int(lines)
self.soft_wrap = soft_wrap
self._width = width
self._height = height
self._color_system: Optional[ColorSystem]
self._force_terminal = force_terminal
self._file = file
self.quiet = quiet
self.stderr = stderr
if color_system is None:
self._color_system = None
elif color_system == "auto":
self._color_system = self._detect_color_system()
else:
self._color_system = COLOR_SYSTEMS[color_system]
self._lock = threading.RLock()
self._log_render = LogRender(
show_time=log_time,
show_path=log_path,
time_format=log_time_format,
)
self.highlighter: HighlighterType = highlighter or _null_highlighter
self.safe_box = safe_box
self.get_datetime = get_datetime or datetime.now
self.get_time = get_time or monotonic
self.style = style
self.no_color = (
no_color if no_color is not None else "NO_COLOR" in self._environ
)
self.is_interactive = (
(self.is_terminal and not self.is_dumb_terminal)
if force_interactive is None
else force_interactive
)
self._record_buffer_lock = threading.RLock()
self._thread_locals = ConsoleThreadLocals(
theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme)
)
self._record_buffer: List[Segment] = []
self._render_hooks: List[RenderHook] = []
self._live: Optional["Live"] = None
self._is_alt_screen = False
def __repr__(self) -> str:
return f"<console width={self.width} {str(self._color_system)}>"
@property
def file(self) -> IO[str]:
"""Get the file object to write to."""
file = self._file or (sys.stderr if self.stderr else sys.stdout)
file = getattr(file, "rich_proxied_file", file)
return file
@file.setter
def file(self, new_file: IO[str]) -> None:
"""Set a new file object."""
self._file = new_file
@property
def _buffer(self) -> List[Segment]:
"""Get a thread local buffer."""
return self._thread_locals.buffer
@property
def _buffer_index(self) -> int:
"""Get a thread local buffer."""
return self._thread_locals.buffer_index
@_buffer_index.setter
def _buffer_index(self, value: int) -> None:
self._thread_locals.buffer_index = value
@property
def _theme_stack(self) -> ThemeStack:
"""Get the thread local theme stack."""
return self._thread_locals.theme_stack
def _detect_color_system(self) -> Optional[ColorSystem]:
"""Detect color system from env vars."""
if self.is_jupyter:
return ColorSystem.TRUECOLOR
if not self.is_terminal or self.is_dumb_terminal:
return None
if WINDOWS: # pragma: no cover
if self.legacy_windows: # pragma: no cover
return ColorSystem.WINDOWS
windows_console_features = get_windows_console_features()
return (
ColorSystem.TRUECOLOR
if windows_console_features.truecolor
else ColorSystem.EIGHT_BIT
)
else:
color_term = self._environ.get("COLORTERM", "").strip().lower()
if color_term in ("truecolor", "24bit"):
return ColorSystem.TRUECOLOR
term = self._environ.get("TERM", "").strip().lower()
_term_name, _hyphen, colors = term.rpartition("-")
color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD)
return color_system
def _enter_buffer(self) -> None:
"""Enter in to a buffer context, and buffer all output."""
self._buffer_index += 1
def _exit_buffer(self) -> None:
"""Leave buffer context, and render content if required."""
self._buffer_index -= 1
self._check_buffer()
def set_live(self, live: "Live") -> None:
"""Set Live instance. Used by Live context manager.
Args:
live (Live): Live instance using this Console.
Raises:
errors.LiveError: If this Console has a Live context currently active.
"""
with self._lock:
if self._live is not None:
raise errors.LiveError("Only one live display may be active at once")
self._live = live
def clear_live(self) -> None:
"""Clear the Live instance."""
with self._lock:
self._live = None
def push_render_hook(self, hook: RenderHook) -> None:
"""Add a new render hook to the stack.
Args:
hook (RenderHook): Render hook instance.
"""
with self._lock:
self._render_hooks.append(hook)
def pop_render_hook(self) -> None:
"""Pop the last renderhook from the stack."""
with self._lock:
self._render_hooks.pop()
def __enter__(self) -> "Console":
"""Own context manager to enter buffer context."""
self._enter_buffer()
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
"""Exit buffer context."""
self._exit_buffer()
def begin_capture(self) -> None:
"""Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output."""
self._enter_buffer()
def end_capture(self) -> str:
"""End capture mode and return captured string.
Returns:
str: Console output.
"""
render_result = self._render_buffer(self._buffer)
del self._buffer[:]
self._exit_buffer()
return render_result
def push_theme(self, theme: Theme, *, inherit: bool = True) -> None:
"""Push a new theme on to the top of the stack, replacing the styles from the previous theme.
Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather
than calling this method directly.
Args:
theme (Theme): A theme instance.
inherit (bool, optional): Inherit existing styles. Defaults to True.
"""
self._theme_stack.push_theme(theme, inherit=inherit)
def pop_theme(self) -> None:
"""Remove theme from top of stack, restoring previous theme."""
self._theme_stack.pop_theme()
def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext:
"""Use a different theme for the duration of the context manager.
Args:
theme (Theme): Theme instance to user.
inherit (bool, optional): Inherit existing console styles. Defaults to True.
Returns:
ThemeContext: [description]
"""
return ThemeContext(self, theme, inherit)
@property
def color_system(self) -> Optional[str]:
"""Get color system string.
Returns:
Optional[str]: "standard", "256" or "truecolor".
"""
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
else:
return None
@property
def encoding(self) -> str:
"""Get the encoding of the console file, e.g. ``"utf-8"``.
Returns:
str: A standard encoding string.
"""
return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower()
@property
def is_terminal(self) -> bool:
"""Check if the console is writing to a terminal.
Returns:
bool: True if the console writing to a device capable of
understanding terminal codes, otherwise False.
"""
if self._force_terminal is not None:
return self._force_terminal
if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith(
"idlelib"
):
# Return False for Idle which claims to be a tty but can't handle ansi codes
return False
isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None)
try:
return False if isatty is None else isatty()
except ValueError:
# in some situation (at the end of a pytest run for example) isatty() can raise
# ValueError: I/O operation on closed file
# return False because we aren't in a terminal anymore
return False
@property
def is_dumb_terminal(self) -> bool:
"""Detect dumb terminal.
Returns:
bool: True if writing to a dumb terminal, otherwise False.
"""
_term = self._environ.get("TERM", "")
is_dumb = _term.lower() in ("dumb", "unknown")
return self.is_terminal and is_dumb
@property
def options(self) -> ConsoleOptions:
"""Get default console options."""
return ConsoleOptions(
max_height=self.size.height,
size=self.size,
legacy_windows=self.legacy_windows,
min_width=1,
max_width=self.width,
encoding=self.encoding,
is_terminal=self.is_terminal,
)
@property
def size(self) -> ConsoleDimensions:
"""Get the size of the console.
Returns:
ConsoleDimensions: A named tuple containing the dimensions.
"""
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width - self.legacy_windows, self._height)
if self.is_dumb_terminal:
return ConsoleDimensions(80, 25)
width: Optional[int] = None
height: Optional[int] = None
if WINDOWS: # pragma: no cover
try:
width, height = os.get_terminal_size()
except (AttributeError, ValueError, OSError): # Probably not a terminal
pass
else:
for file_descriptor in _STD_STREAMS:
try:
width, height = os.get_terminal_size(file_descriptor)
except (AttributeError, ValueError, OSError):
pass
else:
break
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
width = int(columns)
lines = self._environ.get("LINES")
if lines is not None and lines.isdigit():
height = int(lines)
# get_terminal_size can report 0, 0 if run from pseudo-terminal
width = width or 80
height = height or 25
return ConsoleDimensions(
width - self.legacy_windows if self._width is None else self._width,
height if self._height is None else self._height,
)
@size.setter
def size(self, new_size: Tuple[int, int]) -> None:
"""Set a new size for the terminal.
Args:
new_size (Tuple[int, int]): New width and height.
"""
width, height = new_size
self._width = width
self._height = height
@property
def width(self) -> int:
"""Get the width of the console.
Returns:
int: The width (in characters) of the console.
"""
return self.size.width
@width.setter
def width(self, width: int) -> None:
"""Set width.
Args:
width (int): New width.
"""
self._width = width
@property
def height(self) -> int:
"""Get the height of the console.
Returns:
int: The height (in lines) of the console.
"""
return self.size.height
@height.setter
def height(self, height: int) -> None:
"""Set height.
Args:
height (int): new height.
"""
self._height = height
def bell(self) -> None:
"""Play a 'bell' sound (if supported by the terminal)."""
self.control(Control.bell())
def capture(self) -> Capture:
"""A context manager to *capture* the result of print() or log() in a string,
rather than writing it to the console.
Example:
>>> from rich.console import Console
>>> console = Console()
>>> with console.capture() as capture:
... console.print("[bold magenta]Hello World[/]")
>>> print(capture.get())
Returns:
Capture: Context manager with disables writing to the terminal.
"""
capture = Capture(self)
return capture
def pager(
self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False
) -> PagerContext:
"""A context manager to display anything printed within a "pager". The pager application
is defined by the system and will typically support at least pressing a key to scroll.
Args:
pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None.
styles (bool, optional): Show styles in pager. Defaults to False.
links (bool, optional): Show links in pager. Defaults to False.
Example:
>>> from rich.console import Console
>>> from rich.__main__ import make_test_card
>>> console = Console()
>>> with console.pager():
console.print(make_test_card())
Returns:
PagerContext: A context manager.
"""
return PagerContext(self, pager=pager, styles=styles, links=links)
def line(self, count: int = 1) -> None:
"""Write new line(s).
Args:
count (int, optional): Number of new lines. Defaults to 1.
"""
assert count >= 0, "count must be >= 0"
self.print(NewLine(count))
def clear(self, home: bool = True) -> None:
"""Clear the screen.
Args:
home (bool, optional): Also move the cursor to 'home' position. Defaults to True.
"""
if home:
self.control(Control.clear(), Control.home())
else:
self.control(Control.clear())
def status(
self,
status: RenderableType,
*,
spinner: str = "dots",
spinner_style: str = "status.spinner",
speed: float = 1.0,
refresh_per_second: float = 12.5,
) -> "Status":
"""Display a status and spinner.
Args:
status (RenderableType): A status renderable (str or Text typically).
spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
Returns:
Status: A Status object that may be used as a context manager.
"""
from .status import Status
status_renderable = Status(
status,
console=self,
spinner=spinner,
spinner_style=spinner_style,
speed=speed,
refresh_per_second=refresh_per_second,
)
return status_renderable
def show_cursor(self, show: bool = True) -> bool:
"""Show or hide the cursor.
Args:
show (bool, optional): Set visibility of the cursor.
"""
if self.is_terminal:
self.control(Control.show_cursor(show))
return True
return False
def set_alt_screen(self, enable: bool = True) -> bool:
"""Enables alternative screen mode.
Note, if you enable this mode, you should ensure that is disabled before
the application exits. See :meth:`~rich.Console.screen` for a context manager
that handles this for you.
Args:
enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True.
Returns:
bool: True if the control codes were written.
"""
changed = False
if self.is_terminal and not self.legacy_windows:
self.control(Control.alt_screen(enable))
changed = True
self._is_alt_screen = enable
return changed
@property
def is_alt_screen(self) -> bool:
"""Check if the alt screen was enabled.
Returns:
bool: True if the alt screen was enabled, otherwise False.
"""
return self._is_alt_screen
def set_window_title(self, title: str) -> bool:
"""Set the title of the console terminal window.
Warning: There is no means within Rich of "resetting" the window title to its
previous value, meaning the title you set will persist even after your application
exits.
``fish`` shell resets the window title before and after each command by default,
negating this issue. Windows Terminal and command prompt will also reset the title for you.
Most other shells and terminals, however, do not do this.
Some terminals may require configuration changes before you can set the title.
Some terminals may not support setting the title at all.
Other software (including the terminal itself, the shell, custom prompts, plugins, etc.)
may also set the terminal window title. This could result in whatever value you write
using this method being overwritten.
Args:
title (str): The new title of the terminal window.
Returns:
bool: True if the control code to change the terminal title was
written, otherwise False. Note that a return value of True
does not guarantee that the window title has actually changed,
since the feature may be unsupported/disabled in some terminals.
"""
if self.is_terminal:
self.control(Control.title(title))
return True
return False
def screen(
self, hide_cursor: bool = True, style: Optional[StyleType] = None
) -> "ScreenContext":
"""Context manager to enable and disable 'alternative screen' mode.
Args:
hide_cursor (bool, optional): Also hide the cursor. Defaults to False.
style (Style, optional): Optional style for screen. Defaults to None.
Returns:
~ScreenContext: Context which enables alternate screen on enter, and disables it on exit.
"""
return ScreenContext(self, hide_cursor=hide_cursor, style=style or "")
def measure(
self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None
) -> Measurement:
"""Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains
information regarding the number of characters required to print the renderable.
Args:
renderable (RenderableType): Any renderable or string.
options (Optional[ConsoleOptions], optional): Options to use when measuring, or None
to use default options. Defaults to None.
Returns:
Measurement: A measurement of the renderable.
"""
measurement = Measurement.get(self, options or self.options, renderable)
return measurement
def render(
self, renderable: RenderableType, options: Optional[ConsoleOptions] = None
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
_options = options or self.options
if _options.max_width < 1:
# No space to render anything. This prevents potential recursion errors.
return
render_iterable: RenderResult
renderable = rich_cast(renderable)
if hasattr(renderable, "__rich_console__") and not isclass(renderable):
render_iterable = renderable.__rich_console__(self, _options) # type: ignore[union-attr]
elif isinstance(renderable, str):
text_renderable = self.render_str(
renderable, highlight=_options.highlight, markup=_options.markup
)
render_iterable = text_renderable.__rich_console__(self, _options)
else:
raise errors.NotRenderableError(
f"Unable to render {renderable!r}; "
"A str, Segment or object with __rich_console__ method is required"
)
try:
iter_render = iter(render_iterable)
except TypeError:
raise errors.NotRenderableError(
f"object {render_iterable!r} is not renderable"
)
_Segment = Segment
_options = _options.reset_height()
for render_output in iter_render:
if isinstance(render_output, _Segment):
yield render_output
else:
yield from self.render(render_output, _options)
def render_lines(
self,
renderable: RenderableType,
options: Optional[ConsoleOptions] = None,
*,
style: Optional[Style] = None,
pad: bool = True,
new_lines: bool = False,
) -> List[List[Segment]]:
"""Render objects in to a list of lines.
The output of render_lines is useful when further formatting of rendered console text
is required, such as the Panel class which draws a border around any renderable object.
Args:
renderable (RenderableType): Any object renderable in the console.
options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``.
style (Style, optional): Optional style to apply to renderables. Defaults to ``None``.
pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``.
new_lines (bool, optional): Include "\n" characters at end of lines.
Returns:
List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
"""
with self._lock:
render_options = options or self.options
_rendered = self.render(renderable, render_options)
if style:
_rendered = Segment.apply_style(_rendered, style)
render_height = render_options.height
if render_height is not None:
render_height = max(0, render_height)
lines = list(
islice(
Segment.split_and_crop_lines(
_rendered,
render_options.max_width,
include_new_lines=new_lines,
pad=pad,
style=style,
),
None,
render_height,
)
)
if render_options.height is not None:
extra_lines = render_options.height - len(lines)
if extra_lines > 0:
pad_line = [
[Segment(" " * render_options.max_width, style), Segment("\n")]
if new_lines
else [Segment(" " * render_options.max_width, style)]
]
lines.extend(pad_line * extra_lines)
return lines
def render_str(
self,
text: str,
*,
style: Union[str, Style] = "",
justify: Optional[JustifyMethod] = None,
overflow: Optional[OverflowMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
highlighter: Optional[HighlighterType] = None,
) -> "Text":
"""Convert a string to a Text instance. This is called automatically if
you print or log a string.
Args:
text (str): Text to render.
style (Union[str, Style], optional): Style to apply to rendered text.
justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default.
highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default.
highlighter (HighlighterType, optional): Optional highlighter to apply.
Returns:
ConsoleRenderable: Renderable object.
"""
emoji_enabled = emoji or (emoji is None and self._emoji)
markup_enabled = markup or (markup is None and self._markup)
highlight_enabled = highlight or (highlight is None and self._highlight)
if markup_enabled:
rich_text = render_markup(
text,
style=style,
emoji=emoji_enabled,
emoji_variant=self._emoji_variant,
)
rich_text.justify = justify
rich_text.overflow = overflow
else:
rich_text = Text(
_emoji_replace(text, default_variant=self._emoji_variant)
if emoji_enabled
else text,
justify=justify,
overflow=overflow,
style=style,
)
_highlighter = (highlighter or self.highlighter) if highlight_enabled else None
if _highlighter is not None:
highlight_text = _highlighter(str(rich_text))
highlight_text.copy_styles(rich_text)
return highlight_text
return rich_text
def get_style(
self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None
) -> Style:
"""Get a Style instance by its theme name or parse a definition.
Args:
name (str): The name of a style or a style definition.
Returns:
Style: A Style object.
Raises:
MissingStyle: If no style could be parsed from name.
"""
if isinstance(name, Style):
return name
try:
style = self._theme_stack.get(name)
if style is None:
style = Style.parse(name)
return style.copy() if style.link else style
except errors.StyleSyntaxError as error:
if default is not None:
return self.get_style(default)
raise errors.MissingStyle(
f"Failed to get style {name!r}; {error}"
) from None
def _collect_renderables(
self,
objects: Iterable[Any],
sep: str,
end: str,
*,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
) -> List[ConsoleRenderable]:
"""Combine a number of renderables and text into one renderable.
Args:
objects (Iterable[Any]): Anything that Rich can render.
sep (str): String to write between print data.
end (str): String to write at end of print data.
justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default.
Returns:
List[ConsoleRenderable]: A list of things to render.
"""
renderables: List[ConsoleRenderable] = []
_append = renderables.append
text: List[Text] = []
append_text = text.append
append = _append
if justify in ("left", "center", "right"):
def align_append(renderable: RenderableType) -> None:
_append(Align(renderable, cast(AlignMethod, justify)))
append = align_append
_highlighter: HighlighterType = _null_highlighter
if highlight or (highlight is None and self._highlight):
_highlighter = self.highlighter
def check_text() -> None:
if text:
sep_text = Text(sep, justify=justify, end=end)
append(sep_text.join(text))
del text[:]
for renderable in objects:
renderable = rich_cast(renderable)
if isinstance(renderable, str):
append_text(
self.render_str(
renderable, emoji=emoji, markup=markup, highlighter=_highlighter
)
)
elif isinstance(renderable, Text):
append_text(renderable)
elif isinstance(renderable, ConsoleRenderable):
check_text()
append(renderable)
elif is_expandable(renderable):
check_text()
append(Pretty(renderable, highlighter=_highlighter))
else:
append_text(_highlighter(str(renderable)))
check_text()
if self.style is not None:
style = self.get_style(self.style)
renderables = [Styled(renderable, style) for renderable in renderables]
return renderables
def rule(
self,
title: TextType = "",
*,
characters: str = "─",
style: Union[str, Style] = "rule.line",
align: AlignMethod = "center",
) -> None:
"""Draw a line with optional centered title.
Args:
title (str, optional): Text to render over the rule. Defaults to "".
characters (str, optional): Character(s) to form the line. Defaults to "─".
style (str, optional): Style of line. Defaults to "rule.line".
align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
"""
from .rule import Rule
rule = Rule(title=title, characters=characters, style=style, align=align)
self.print(rule)
def control(self, *control: Control) -> None:
"""Insert non-printing control codes.
Args:
control_codes (str): Control codes, such as those that may move the cursor.
"""
if not self.is_dumb_terminal:
with self:
self._buffer.extend(_control.segment for _control in control)
def out(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
highlight: Optional[bool] = None,
) -> None:
"""Output to the terminal. This is a low-level way of writing to the terminal which unlike
:meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will
optionally apply highlighting and a basic style.
Args:
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\\\\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use
console default. Defaults to ``None``.
"""
raw_output: str = sep.join(str(_object) for _object in objects)
self.print(
raw_output,
style=style,
highlight=highlight,
emoji=False,
markup=False,
no_wrap=True,
overflow="ignore",
crop=False,
end=end,
)
def print(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
overflow: Optional[OverflowMethod] = None,
no_wrap: Optional[bool] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
width: Optional[int] = None,
height: Optional[int] = None,
crop: bool = True,
soft_wrap: Optional[bool] = None,
new_line_start: bool = False,
) -> None:
"""Print to the console.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\\\\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``.
overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``.
width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``.
crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True.
soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for
Console default. Defaults to ``None``.
new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``.
"""
if not objects:
objects = (NewLine(),)
if soft_wrap is None:
soft_wrap = self.soft_wrap
if soft_wrap:
if no_wrap is None:
no_wrap = True
if overflow is None:
overflow = "ignore"
crop = False
render_hooks = self._render_hooks[:]
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
for hook in render_hooks:
renderables = hook.process_renderables(renderables)
render_options = self.options.update(
justify=justify,
overflow=overflow,
width=min(width, self.width) if width is not None else NO_CHANGE,
height=height,
no_wrap=no_wrap,
markup=markup,
highlight=highlight,
)
new_segments: List[Segment] = []
extend = new_segments.extend
render = self.render
if style is None:
for renderable in renderables:
extend(render(renderable, render_options))
else:
for renderable in renderables:
extend(
Segment.apply_style(
render(renderable, render_options), self.get_style(style)
)
)
if new_line_start:
if (
len("".join(segment.text for segment in new_segments).splitlines())
> 1
):
new_segments.insert(0, Segment.line())
if crop:
buffer_extend = self._buffer.extend
for line in Segment.split_and_crop_lines(
new_segments, self.width, pad=False
):
buffer_extend(line)
else:
self._buffer.extend(new_segments)
def print_json(
self,
json: Optional[str] = None,
*,
data: Any = None,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = True,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
"""Pretty prints JSON. Output will be valid JSON.
Args:
json (Optional[str]): A string containing JSON.
data (Any): If json is not supplied, then encode this data.
indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2.
highlight (bool, optional): Enable highlighting of output: Defaults to True.
skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
check_circular (bool, optional): Check for circular references. Defaults to True.
allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
default (Callable, optional): A callable that converts values that can not be encoded
in to something that can be JSON encoded. Defaults to None.
sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
"""
from pip._vendor.rich.json import JSON
if json is None:
json_renderable = JSON.from_data(
data,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
else:
if not isinstance(json, str):
raise TypeError(
f"json must be str. Did you mean print_json(data={json!r}) ?"
)
json_renderable = JSON(
json,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
self.print(json_renderable, soft_wrap=True)
def update_screen(
self,
renderable: RenderableType,
*,
region: Optional[Region] = None,
options: Optional[ConsoleOptions] = None,
) -> None:
"""Update the screen at a given offset.
Args:
renderable (RenderableType): A Rich renderable.
region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None.
x (int, optional): x offset. Defaults to 0.
y (int, optional): y offset. Defaults to 0.
Raises:
errors.NoAltScreen: If the Console isn't in alt screen mode.
"""
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
render_options = options or self.options
if region is None:
x = y = 0
render_options = render_options.update_dimensions(
render_options.max_width, render_options.height or self.height
)
else:
x, y, width, height = region
render_options = render_options.update_dimensions(width, height)
lines = self.render_lines(renderable, options=render_options)
self.update_screen_lines(lines, x, y)
def update_screen_lines(
self, lines: List[List[Segment]], x: int = 0, y: int = 0
) -> None:
"""Update lines of the screen at a given offset.
Args:
lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`).
x (int, optional): x offset (column no). Defaults to 0.
y (int, optional): y offset (column no). Defaults to 0.
Raises:
errors.NoAltScreen: If the Console isn't in alt screen mode.
"""
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
screen_update = ScreenUpdate(lines, x, y)
segments = self.render(screen_update)
self._buffer.extend(segments)
self._check_buffer()
def print_exception(
self,
*,
width: Optional[int] = 100,
extra_lines: int = 3,
theme: Optional[str] = None,
word_wrap: bool = False,
show_locals: bool = False,
suppress: Iterable[Union[str, ModuleType]] = (),
max_frames: int = 100,
) -> None:
"""Prints a rich render of the last exception and traceback.
Args:
width (Optional[int], optional): Number of characters used to render code. Defaults to 100.
extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
theme (str, optional): Override pygments theme used in traceback
word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
show_locals (bool, optional): Enable display of local variables. Defaults to False.
suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
"""
from .traceback import Traceback
traceback = Traceback(
width=width,
extra_lines=extra_lines,
theme=theme,
word_wrap=word_wrap,
show_locals=show_locals,
suppress=suppress,
max_frames=max_frames,
)
self.print(traceback)
@staticmethod
def _caller_frame_info(
offset: int,
currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe,
) -> Tuple[str, int, Dict[str, Any]]:
"""Get caller frame information.
Args:
offset (int): the caller offset within the current frame stack.
currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to
retrieve the current frame. Defaults to ``inspect.currentframe``.
Returns:
Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and
the dictionary of local variables associated with the caller frame.
Raises:
RuntimeError: If the stack offset is invalid.
"""
# Ignore the frame of this local helper
offset += 1
frame = currentframe()
if frame is not None:
# Use the faster currentframe where implemented
while offset and frame is not None:
frame = frame.f_back
offset -= 1
assert frame is not None
return frame.f_code.co_filename, frame.f_lineno, frame.f_locals
else:
# Fallback to the slower stack
frame_info = inspect.stack()[offset]
return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals
def log(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
log_locals: bool = False,
_stack_offset: int = 1,
) -> None:
"""Log rich content to the terminal.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\\\\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
was called. Defaults to False.
_stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
"""
if not objects:
objects = (NewLine(),)
render_hooks = self._render_hooks[:]
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
if style is not None:
renderables = [Styled(renderable, style) for renderable in renderables]
filename, line_no, locals = self._caller_frame_info(_stack_offset)
link_path = None if filename.startswith("<") else os.path.abspath(filename)
path = filename.rpartition(os.sep)[-1]
if log_locals:
locals_map = {
key: value
for key, value in locals.items()
if not key.startswith("__")
}
renderables.append(render_scope(locals_map, title="[i]locals"))
renderables = [
self._log_render(
self,
renderables,
log_time=self.get_datetime(),
path=path,
line_no=line_no,
link_path=link_path,
)
]
for hook in render_hooks:
renderables = hook.process_renderables(renderables)
new_segments: List[Segment] = []
extend = new_segments.extend
render = self.render
render_options = self.options
for renderable in renderables:
extend(render(renderable, render_options))
buffer_extend = self._buffer.extend
for line in Segment.split_and_crop_lines(
new_segments, self.width, pad=False
):
buffer_extend(line)
def _check_buffer(self) -> None:
"""Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False)
Rendering is supported on Windows, Unix and Jupyter environments. For
legacy Windows consoles, the win32 API is called directly.
This method will also record what it renders if recording is enabled via Console.record.
"""
if self.quiet:
del self._buffer[:]
return
with self._lock:
if self.record:
with self._record_buffer_lock:
self._record_buffer.extend(self._buffer[:])
if self._buffer_index == 0:
if self.is_jupyter: # pragma: no cover
from .jupyter import display
display(self._buffer, self._render_buffer(self._buffer[:]))
del self._buffer[:]
else:
if WINDOWS:
use_legacy_windows_render = False
if self.legacy_windows:
try:
use_legacy_windows_render = (
self.file.fileno() in _STD_STREAMS_OUTPUT
)
except (ValueError, io.UnsupportedOperation):
pass
if use_legacy_windows_render:
from pip._vendor.rich._win32_console import LegacyWindowsTerm
from pip._vendor.rich._windows_renderer import legacy_windows_render
legacy_windows_render(
self._buffer[:], LegacyWindowsTerm(self.file)
)
else:
# Either a non-std stream on legacy Windows, or modern Windows.
text = self._render_buffer(self._buffer[:])
# https://bugs.python.org/issue37871
write = self.file.write
for line in text.splitlines(True):
try:
write(line)
except UnicodeEncodeError as error:
error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
raise
else:
text = self._render_buffer(self._buffer[:])
try:
self.file.write(text)
except UnicodeEncodeError as error:
error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
raise
self.file.flush()
del self._buffer[:]
def _render_buffer(self, buffer: Iterable[Segment]) -> str:
"""Render buffered output, and clear buffer."""
output: List[str] = []
append = output.append
color_system = self._color_system
legacy_windows = self.legacy_windows
not_terminal = not self.is_terminal
if self.no_color and color_system:
buffer = Segment.remove_color(buffer)
for text, style, control in buffer:
if style:
append(
style.render(
text,
color_system=color_system,
legacy_windows=legacy_windows,
)
)
elif not (not_terminal and control):
append(text)
rendered = "".join(output)
return rendered
def input(
self,
prompt: TextType = "",
*,
markup: bool = True,
emoji: bool = True,
password: bool = False,
stream: Optional[TextIO] = None,
) -> str:
"""Displays a prompt and waits for input from the user. The prompt may contain color / style.
It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded.
Args:
prompt (Union[str, Text]): Text to render in the prompt.
markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True.
emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True.
password: (bool, optional): Hide typed text. Defaults to False.
stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None.
Returns:
str: Text read from stdin.
"""
if prompt:
self.print(prompt, markup=markup, emoji=emoji, end="")
if password:
result = getpass("", stream=stream)
else:
if stream:
result = stream.readline()
else:
result = input()
return result
def export_text(self, *, clear: bool = True, styles: bool = False) -> str:
"""Generate text from console contents (requires record=True argument in constructor).
Args:
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text.
Defaults to ``False``.
Returns:
str: String containing console contents.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
with self._record_buffer_lock:
if styles:
text = "".join(
(style.render(text) if style else text)
for text, style, _ in self._record_buffer
)
else:
text = "".join(
segment.text
for segment in self._record_buffer
if not segment.control
)
if clear:
del self._record_buffer[:]
return text
def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None:
"""Generate text from console and save to a given location (requires record=True argument in constructor).
Args:
path (str): Path to write text files.
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
"""
text = self.export_text(clear=clear, styles=styles)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(text)
def export_html(
self,
*,
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: Optional[str] = None,
inline_styles: bool = False,
) -> str:
"""Generate HTML from console contents (requires record=True argument in constructor).
Args:
theme (TerminalTheme, optional): TerminalTheme object containing console colors.
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
'{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
Returns:
str: String containing console contents as HTML.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
fragments: List[str] = []
append = fragments.append
_theme = theme or DEFAULT_TERMINAL_THEME
stylesheet = ""
render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
with self._record_buffer_lock:
if inline_styles:
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
if style.link:
text = f'<a href="{style.link}">{text}</a>'
text = f'<span style="{rule}">{text}</span>' if rule else text
append(text)
else:
styles: Dict[str, int] = {}
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
style_number = styles.setdefault(rule, len(styles) + 1)
if style.link:
text = f'<a class="r{style_number}" href="{style.link}">{text}</a>'
else:
text = f'<span class="r{style_number}">{text}</span>'
append(text)
stylesheet_rules: List[str] = []
stylesheet_append = stylesheet_rules.append
for style_rule, style_number in styles.items():
if style_rule:
stylesheet_append(f".r{style_number} {{{style_rule}}}")
stylesheet = "\n".join(stylesheet_rules)
rendered_code = render_code_format.format(
code="".join(fragments),
stylesheet=stylesheet,
foreground=_theme.foreground_color.hex,
background=_theme.background_color.hex,
)
if clear:
del self._record_buffer[:]
return rendered_code
def save_html(
self,
path: str,
*,
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_HTML_FORMAT,
inline_styles: bool = False,
) -> None:
"""Generate HTML from console contents and write to a file (requires record=True argument in constructor).
Args:
path (str): Path to write html file.
theme (TerminalTheme, optional): TerminalTheme object containing console colors.
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
'{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
"""
html = self.export_html(
theme=theme,
clear=clear,
code_format=code_format,
inline_styles=inline_styles,
)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(html)
def export_svg(
self,
*,
title: str = "Rich",
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_SVG_FORMAT,
) -> str:
"""
Generate an SVG from the console contents (requires record=True in Console constructor).
Args:
path (str): The path to write the SVG to.
title (str): The title of the tab in the output image
theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
code_format (str): Format string used to generate the SVG. Rich will inject a number of variables
into the string in order to form the final SVG output. The default template used and the variables
injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
"""
from pip._vendor.rich.cells import cell_len
style_cache: Dict[Style, str] = {}
def get_svg_style(style: Style) -> str:
"""Convert a Style to CSS rules for SVG."""
if style in style_cache:
return style_cache[style]
css_rules = []
color = (
_theme.foreground_color
if (style.color is None or style.color.is_default)
else style.color.get_truecolor(_theme)
)
bgcolor = (
_theme.background_color
if (style.bgcolor is None or style.bgcolor.is_default)
else style.bgcolor.get_truecolor(_theme)
)
if style.reverse:
color, bgcolor = bgcolor, color
if style.dim:
color = blend_rgb(color, bgcolor, 0.4)
css_rules.append(f"fill: {color.hex}")
if style.bold:
css_rules.append("font-weight: bold")
if style.italic:
css_rules.append("font-style: italic;")
if style.underline:
css_rules.append("text-decoration: underline;")
if style.strike:
css_rules.append("text-decoration: line-through;")
css = ";".join(css_rules)
style_cache[style] = css
return css
_theme = theme or SVG_EXPORT_THEME
width = self.width
char_height = 20
char_width = char_height * 0.61
line_height = char_height * 1.22
margin_top = 1
margin_right = 1
margin_bottom = 1
margin_left = 1
padding_top = 40
padding_right = 8
padding_bottom = 8
padding_left = 8
padding_width = padding_left + padding_right
padding_height = padding_top + padding_bottom
margin_width = margin_left + margin_right
margin_height = margin_top + margin_bottom
text_backgrounds: List[str] = []
text_group: List[str] = []
classes: Dict[str, int] = {}
style_no = 1
def escape_text(text: str) -> str:
"""HTML escape text and replace spaces with nbsp."""
return escape(text).replace(" ", " ")
def make_tag(
name: str, content: Optional[str] = None, **attribs: object
) -> str:
"""Make a tag from name, content, and attributes."""
def stringify(value: object) -> str:
if isinstance(value, (float)):
return format(value, "g")
return str(value)
tag_attribs = " ".join(
f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"'
for k, v in attribs.items()
)
return (
f"<{name} {tag_attribs}>{content}</{name}>"
if content
else f"<{name} {tag_attribs}/>"
)
with self._record_buffer_lock:
segments = list(Segment.filter_control(self._record_buffer))
if clear:
self._record_buffer.clear()
unique_id = "terminal-" + str(
zlib.adler32(
("".join(segment.text for segment in segments)).encode(
"utf-8", "ignore"
)
+ title.encode("utf-8", "ignore")
)
)
y = 0
for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)):
x = 0
for text, style, _control in line:
style = style or Style()
rules = get_svg_style(style)
if rules not in classes:
classes[rules] = style_no
style_no += 1
class_name = f"r{classes[rules]}"
if style.reverse:
has_background = True
background = (
_theme.foreground_color.hex
if style.color is None
else style.color.get_truecolor(_theme).hex
)
else:
bgcolor = style.bgcolor
has_background = bgcolor is not None and not bgcolor.is_default
background = (
_theme.background_color.hex
if style.bgcolor is None
else style.bgcolor.get_truecolor(_theme).hex
)
text_length = cell_len(text)
if has_background:
text_backgrounds.append(
make_tag(
"rect",
fill=background,
x=x * char_width,
y=y * line_height + 1.5,
width=char_width * text_length,
height=line_height + 0.25,
shape_rendering="crispEdges",
)
)
if text != " " * len(text):
text_group.append(
make_tag(
"text",
escape_text(text),
_class=f"{unique_id}-{class_name}",
x=x * char_width,
y=y * line_height + char_height,
textLength=char_width * len(text),
clip_path=f"url(#{unique_id}-line-{y})",
)
)
x += cell_len(text)
line_offsets = [line_no * line_height + 1.5 for line_no in range(y)]
lines = "\n".join(
f"""<clipPath id="{unique_id}-line-{line_no}">
{make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)}
</clipPath>"""
for line_no, offset in enumerate(line_offsets)
)
styles = "\n".join(
f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items()
)
backgrounds = "".join(text_backgrounds)
matrix = "".join(text_group)
terminal_width = ceil(width * char_width + padding_width)
terminal_height = (y + 1) * line_height + padding_height
chrome = make_tag(
"rect",
fill=_theme.background_color.hex,
stroke="rgba(255,255,255,0.35)",
stroke_width="1",
x=margin_left,
y=margin_top,
width=terminal_width,
height=terminal_height,
rx=8,
)
title_color = _theme.foreground_color.hex
if title:
chrome += make_tag(
"text",
escape_text(title),
_class=f"{unique_id}-title",
fill=title_color,
text_anchor="middle",
x=terminal_width // 2,
y=margin_top + char_height + 6,
)
chrome += f"""
<g transform="translate(26,22)">
<circle cx="0" cy="0" r="7" fill="#ff5f57"/>
<circle cx="22" cy="0" r="7" fill="#febc2e"/>
<circle cx="44" cy="0" r="7" fill="#28c840"/>
</g>
"""
svg = code_format.format(
unique_id=unique_id,
char_width=char_width,
char_height=char_height,
line_height=line_height,
terminal_width=char_width * width - 1,
terminal_height=(y + 1) * line_height - 1,
width=terminal_width + margin_width,
height=terminal_height + margin_height,
terminal_x=margin_left + padding_left,
terminal_y=margin_top + padding_top,
styles=styles,
chrome=chrome,
backgrounds=backgrounds,
matrix=matrix,
lines=lines,
)
return svg
def save_svg(
self,
path: str,
*,
title: str = "Rich",
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_SVG_FORMAT,
) -> None:
"""Generate an SVG file from the console contents (requires record=True in Console constructor).
Args:
path (str): The path to write the SVG to.
title (str): The title of the tab in the output image
theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
code_format (str): Format string used to generate the SVG. Rich will inject a number of variables
into the string in order to form the final SVG output. The default template used and the variables
injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
"""
svg = self.export_svg(
title=title,
theme=theme,
clear=clear,
code_format=code_format,
)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(svg)
def _svg_hash(svg_main_code: str) -> str:
"""Returns a unique hash for the given SVG main code.
Args:
svg_main_code (str): The content we're going to inject in the SVG envelope.
Returns:
str: a hash of the given content
"""
return str(zlib.adler32(svg_main_code.encode()))
if __name__ == "__main__": # pragma: no cover
console = Console(record=True)
console.log(
"JSONRPC [i]request[/i]",
5,
1.3,
True,
False,
None,
{
"jsonrpc": "2.0",
"method": "subtract",
"params": {"minuend": 42, "subtrahend": 23},
"id": 3,
},
)
console.log("Hello, World!", "{'a': 1}", repr(console))
console.print(
{
"name": None,
"empty": [],
"quiz": {
"sport": {
"answered": True,
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriors",
"Huston Rocket",
],
"answer": "Huston Rocket",
},
},
"maths": {
"answered": False,
"q1": {
"question": "5 + 7 = ?",
"options": [10, 11, 12, 13],
"answer": 12,
},
"q2": {
"question": "12 - 8 = ?",
"options": [1, 2, 3, 4],
"answer": 4,
},
},
},
}
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/console.py
|
Python
|
mit
| 95,885 |
from typing import Optional, TYPE_CHECKING
from .jupyter import JupyterMixin
from .measure import Measurement
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderableType, RenderResult
class Constrain(JupyterMixin):
"""Constrain the width of a renderable to a given number of characters.
Args:
renderable (RenderableType): A renderable object.
width (int, optional): The maximum width (in characters) to render. Defaults to 80.
"""
def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None:
self.renderable = renderable
self.width = width
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.width is None:
yield self.renderable
else:
child_options = options.update_width(min(self.width, options.max_width))
yield from console.render(self.renderable, child_options)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
if self.width is not None:
options = options.update_width(self.width)
measurement = Measurement.get(console, options, self.renderable)
return measurement
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/constrain.py
|
Python
|
mit
| 1,288 |
from itertools import zip_longest
from typing import (
Iterator,
Iterable,
List,
Optional,
Union,
overload,
TypeVar,
TYPE_CHECKING,
)
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
JustifyMethod,
OverflowMethod,
RenderResult,
RenderableType,
)
from .text import Text
from .cells import cell_len
from .measure import Measurement
T = TypeVar("T")
class Renderables:
"""A list subclass which renders its contents to the console."""
def __init__(
self, renderables: Optional[Iterable["RenderableType"]] = None
) -> None:
self._renderables: List["RenderableType"] = (
list(renderables) if renderables is not None else []
)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
"""Console render method to insert line-breaks."""
yield from self._renderables
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
dimensions = [
Measurement.get(console, options, renderable)
for renderable in self._renderables
]
if not dimensions:
return Measurement(1, 1)
_min = max(dimension.minimum for dimension in dimensions)
_max = max(dimension.maximum for dimension in dimensions)
return Measurement(_min, _max)
def append(self, renderable: "RenderableType") -> None:
self._renderables.append(renderable)
def __iter__(self) -> Iterable["RenderableType"]:
return iter(self._renderables)
class Lines:
"""A list subclass which can render to the console."""
def __init__(self, lines: Iterable["Text"] = ()) -> None:
self._lines: List["Text"] = list(lines)
def __repr__(self) -> str:
return f"Lines({self._lines!r})"
def __iter__(self) -> Iterator["Text"]:
return iter(self._lines)
@overload
def __getitem__(self, index: int) -> "Text":
...
@overload
def __getitem__(self, index: slice) -> List["Text"]:
...
def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
return self._lines[index]
def __setitem__(self, index: int, value: "Text") -> "Lines":
self._lines[index] = value
return self
def __len__(self) -> int:
return self._lines.__len__()
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
"""Console render method to insert line-breaks."""
yield from self._lines
def append(self, line: "Text") -> None:
self._lines.append(line)
def extend(self, lines: Iterable["Text"]) -> None:
self._lines.extend(lines)
def pop(self, index: int = -1) -> "Text":
return self._lines.pop(index)
def justify(
self,
console: "Console",
width: int,
justify: "JustifyMethod" = "left",
overflow: "OverflowMethod" = "fold",
) -> None:
"""Justify and overflow text to a given width.
Args:
console (Console): Console instance.
width (int): Number of characters per line.
justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
"""
from .text import Text
if justify == "left":
for line in self._lines:
line.truncate(width, overflow=overflow, pad=True)
elif justify == "center":
for line in self._lines:
line.rstrip()
line.truncate(width, overflow=overflow)
line.pad_left((width - cell_len(line.plain)) // 2)
line.pad_right(width - cell_len(line.plain))
elif justify == "right":
for line in self._lines:
line.rstrip()
line.truncate(width, overflow=overflow)
line.pad_left(width - cell_len(line.plain))
elif justify == "full":
for line_index, line in enumerate(self._lines):
if line_index == len(self._lines) - 1:
break
words = line.split(" ")
words_size = sum(cell_len(word.plain) for word in words)
num_spaces = len(words) - 1
spaces = [1 for _ in range(num_spaces)]
index = 0
if spaces:
while words_size + num_spaces < width:
spaces[len(spaces) - index - 1] += 1
num_spaces += 1
index = (index + 1) % len(spaces)
tokens: List[Text] = []
for index, (word, next_word) in enumerate(
zip_longest(words, words[1:])
):
tokens.append(word)
if index < len(spaces):
style = word.get_style_at_offset(console, -1)
next_style = next_word.get_style_at_offset(console, 0)
space_style = style if style == next_style else line.style
tokens.append(Text(" " * spaces[index], style=space_style))
self[line_index] = Text("").join(tokens)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/containers.py
|
Python
|
mit
| 5,497 |
import sys
import time
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union
if sys.version_info >= (3, 8):
from typing import Final
else:
from pip._vendor.typing_extensions import Final # pragma: no cover
from .segment import ControlCode, ControlType, Segment
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderResult
STRIP_CONTROL_CODES: Final = [
7, # Bell
8, # Backspace
11, # Vertical tab
12, # Form feed
13, # Carriage return
]
_CONTROL_STRIP_TRANSLATE: Final = {
_codepoint: None for _codepoint in STRIP_CONTROL_CODES
}
CONTROL_ESCAPE: Final = {
7: "\\a",
8: "\\b",
11: "\\v",
12: "\\f",
13: "\\r",
}
CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
ControlType.BELL: lambda: "\x07",
ControlType.CARRIAGE_RETURN: lambda: "\r",
ControlType.HOME: lambda: "\x1b[H",
ControlType.CLEAR: lambda: "\x1b[2J",
ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
}
class Control:
"""A renderable that inserts a control code (non printable but may move cursor).
Args:
*codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
tuple of ControlType and an integer parameter
"""
__slots__ = ["segment"]
def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
control_codes: List[ControlCode] = [
(code,) if isinstance(code, ControlType) else code for code in codes
]
_format_map = CONTROL_CODES_FORMAT
rendered_codes = "".join(
_format_map[code](*parameters) for code, *parameters in control_codes
)
self.segment = Segment(rendered_codes, None, control_codes)
@classmethod
def bell(cls) -> "Control":
"""Ring the 'bell'."""
return cls(ControlType.BELL)
@classmethod
def home(cls) -> "Control":
"""Move cursor to 'home' position."""
return cls(ControlType.HOME)
@classmethod
def move(cls, x: int = 0, y: int = 0) -> "Control":
"""Move cursor relative to current position.
Args:
x (int): X offset.
y (int): Y offset.
Returns:
~Control: Control object.
"""
def get_codes() -> Iterable[ControlCode]:
control = ControlType
if x:
yield (
control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
abs(x),
)
if y:
yield (
control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
abs(y),
)
control = cls(*get_codes())
return control
@classmethod
def move_to_column(cls, x: int, y: int = 0) -> "Control":
"""Move to the given column, optionally add offset to row.
Returns:
x (int): absolute x (column)
y (int): optional y offset (row)
Returns:
~Control: Control object.
"""
return (
cls(
(ControlType.CURSOR_MOVE_TO_COLUMN, x),
(
ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
abs(y),
),
)
if y
else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
)
@classmethod
def move_to(cls, x: int, y: int) -> "Control":
"""Move cursor to absolute position.
Args:
x (int): x offset (column)
y (int): y offset (row)
Returns:
~Control: Control object.
"""
return cls((ControlType.CURSOR_MOVE_TO, x, y))
@classmethod
def clear(cls) -> "Control":
"""Clear the screen."""
return cls(ControlType.CLEAR)
@classmethod
def show_cursor(cls, show: bool) -> "Control":
"""Show or hide the cursor."""
return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
@classmethod
def alt_screen(cls, enable: bool) -> "Control":
"""Enable or disable alt screen."""
if enable:
return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
else:
return cls(ControlType.DISABLE_ALT_SCREEN)
@classmethod
def title(cls, title: str) -> "Control":
"""Set the terminal window title
Args:
title (str): The new terminal window title
"""
return cls((ControlType.SET_WINDOW_TITLE, title))
def __str__(self) -> str:
return self.segment.text
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.segment.text:
yield self.segment
def strip_control_codes(
text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
) -> str:
"""Remove control codes from text.
Args:
text (str): A string possibly contain control codes.
Returns:
str: String with control codes removed.
"""
return text.translate(_translate_table)
def escape_control_codes(
text: str,
_translate_table: Dict[int, str] = CONTROL_ESCAPE,
) -> str:
"""Replace control codes with their "escaped" equivalent in the given text.
(e.g. "\b" becomes "\\b")
Args:
text (str): A string possibly containing control codes.
Returns:
str: String with control codes replaced with their escaped version.
"""
return text.translate(_translate_table)
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console
console = Console()
console.print("Look at the title of your terminal window ^")
# console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
for i in range(10):
console.set_window_title("🚀 Loading" + "." * i)
time.sleep(0.5)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/control.py
|
Python
|
mit
| 6,630 |
from typing import Dict
from .style import Style
DEFAULT_STYLES: Dict[str, Style] = {
"none": Style.null(),
"reset": Style(
color="default",
bgcolor="default",
dim=False,
bold=False,
italic=False,
underline=False,
blink=False,
blink2=False,
reverse=False,
conceal=False,
strike=False,
),
"dim": Style(dim=True),
"bright": Style(dim=False),
"bold": Style(bold=True),
"strong": Style(bold=True),
"code": Style(reverse=True, bold=True),
"italic": Style(italic=True),
"emphasize": Style(italic=True),
"underline": Style(underline=True),
"blink": Style(blink=True),
"blink2": Style(blink2=True),
"reverse": Style(reverse=True),
"strike": Style(strike=True),
"black": Style(color="black"),
"red": Style(color="red"),
"green": Style(color="green"),
"yellow": Style(color="yellow"),
"magenta": Style(color="magenta"),
"cyan": Style(color="cyan"),
"white": Style(color="white"),
"inspect.attr": Style(color="yellow", italic=True),
"inspect.attr.dunder": Style(color="yellow", italic=True, dim=True),
"inspect.callable": Style(bold=True, color="red"),
"inspect.async_def": Style(italic=True, color="bright_cyan"),
"inspect.def": Style(italic=True, color="bright_cyan"),
"inspect.class": Style(italic=True, color="bright_cyan"),
"inspect.error": Style(bold=True, color="red"),
"inspect.equals": Style(),
"inspect.help": Style(color="cyan"),
"inspect.doc": Style(dim=True),
"inspect.value.border": Style(color="green"),
"live.ellipsis": Style(bold=True, color="red"),
"layout.tree.row": Style(dim=False, color="red"),
"layout.tree.column": Style(dim=False, color="blue"),
"logging.keyword": Style(bold=True, color="yellow"),
"logging.level.notset": Style(dim=True),
"logging.level.debug": Style(color="green"),
"logging.level.info": Style(color="blue"),
"logging.level.warning": Style(color="red"),
"logging.level.error": Style(color="red", bold=True),
"logging.level.critical": Style(color="red", bold=True, reverse=True),
"log.level": Style.null(),
"log.time": Style(color="cyan", dim=True),
"log.message": Style.null(),
"log.path": Style(dim=True),
"repr.ellipsis": Style(color="yellow"),
"repr.indent": Style(color="green", dim=True),
"repr.error": Style(color="red", bold=True),
"repr.str": Style(color="green", italic=False, bold=False),
"repr.brace": Style(bold=True),
"repr.comma": Style(bold=True),
"repr.ipv4": Style(bold=True, color="bright_green"),
"repr.ipv6": Style(bold=True, color="bright_green"),
"repr.eui48": Style(bold=True, color="bright_green"),
"repr.eui64": Style(bold=True, color="bright_green"),
"repr.tag_start": Style(bold=True),
"repr.tag_name": Style(color="bright_magenta", bold=True),
"repr.tag_contents": Style(color="default"),
"repr.tag_end": Style(bold=True),
"repr.attrib_name": Style(color="yellow", italic=False),
"repr.attrib_equal": Style(bold=True),
"repr.attrib_value": Style(color="magenta", italic=False),
"repr.number": Style(color="cyan", bold=True, italic=False),
"repr.number_complex": Style(color="cyan", bold=True, italic=False), # same
"repr.bool_true": Style(color="bright_green", italic=True),
"repr.bool_false": Style(color="bright_red", italic=True),
"repr.none": Style(color="magenta", italic=True),
"repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False),
"repr.uuid": Style(color="bright_yellow", bold=False),
"repr.call": Style(color="magenta", bold=True),
"repr.path": Style(color="magenta"),
"repr.filename": Style(color="bright_magenta"),
"rule.line": Style(color="bright_green"),
"rule.text": Style.null(),
"json.brace": Style(bold=True),
"json.bool_true": Style(color="bright_green", italic=True),
"json.bool_false": Style(color="bright_red", italic=True),
"json.null": Style(color="magenta", italic=True),
"json.number": Style(color="cyan", bold=True, italic=False),
"json.str": Style(color="green", italic=False, bold=False),
"json.key": Style(color="blue", bold=True),
"prompt": Style.null(),
"prompt.choices": Style(color="magenta", bold=True),
"prompt.default": Style(color="cyan", bold=True),
"prompt.invalid": Style(color="red"),
"prompt.invalid.choice": Style(color="red"),
"pretty": Style.null(),
"scope.border": Style(color="blue"),
"scope.key": Style(color="yellow", italic=True),
"scope.key.special": Style(color="yellow", italic=True, dim=True),
"scope.equals": Style(color="red"),
"table.header": Style(bold=True),
"table.footer": Style(bold=True),
"table.cell": Style.null(),
"table.title": Style(italic=True),
"table.caption": Style(italic=True, dim=True),
"traceback.error": Style(color="red", italic=True),
"traceback.border.syntax_error": Style(color="bright_red"),
"traceback.border": Style(color="red"),
"traceback.text": Style.null(),
"traceback.title": Style(color="red", bold=True),
"traceback.exc_type": Style(color="bright_red", bold=True),
"traceback.exc_value": Style.null(),
"traceback.offset": Style(color="bright_red", bold=True),
"bar.back": Style(color="grey23"),
"bar.complete": Style(color="rgb(249,38,114)"),
"bar.finished": Style(color="rgb(114,156,31)"),
"bar.pulse": Style(color="rgb(249,38,114)"),
"progress.description": Style.null(),
"progress.filesize": Style(color="green"),
"progress.filesize.total": Style(color="green"),
"progress.download": Style(color="green"),
"progress.elapsed": Style(color="yellow"),
"progress.percentage": Style(color="magenta"),
"progress.remaining": Style(color="cyan"),
"progress.data.speed": Style(color="red"),
"progress.spinner": Style(color="green"),
"status.spinner": Style(color="green"),
"tree": Style(),
"tree.line": Style(),
"markdown.paragraph": Style(),
"markdown.text": Style(),
"markdown.emph": Style(italic=True),
"markdown.strong": Style(bold=True),
"markdown.code": Style(bgcolor="black", color="bright_white"),
"markdown.code_block": Style(dim=True, color="cyan", bgcolor="black"),
"markdown.block_quote": Style(color="magenta"),
"markdown.list": Style(color="cyan"),
"markdown.item": Style(),
"markdown.item.bullet": Style(color="yellow", bold=True),
"markdown.item.number": Style(color="yellow", bold=True),
"markdown.hr": Style(color="yellow"),
"markdown.h1.border": Style(),
"markdown.h1": Style(bold=True),
"markdown.h2": Style(bold=True, underline=True),
"markdown.h3": Style(bold=True),
"markdown.h4": Style(bold=True, dim=True),
"markdown.h5": Style(underline=True),
"markdown.h6": Style(italic=True),
"markdown.h7": Style(italic=True, dim=True),
"markdown.link": Style(color="bright_blue"),
"markdown.link_url": Style(color="blue"),
"iso8601.date": Style(color="blue"),
"iso8601.time": Style(color="magenta"),
"iso8601.timezone": Style(color="yellow"),
}
if __name__ == "__main__": # pragma: no cover
import argparse
import io
from pip._vendor.rich.console import Console
from pip._vendor.rich.table import Table
from pip._vendor.rich.text import Text
parser = argparse.ArgumentParser()
parser.add_argument("--html", action="store_true", help="Export as HTML table")
args = parser.parse_args()
html: bool = args.html
console = Console(record=True, width=70, file=io.StringIO()) if html else Console()
table = Table("Name", "Styling")
for style_name, style in DEFAULT_STYLES.items():
table.add_row(Text(style_name, style=style), str(style))
console.print(table)
if html:
print(console.export_html(inline_styles=True))
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/default_styles.py
|
Python
|
mit
| 7,954 |
import os
import platform
from pip._vendor.rich import inspect
from pip._vendor.rich.console import Console, get_windows_console_features
from pip._vendor.rich.panel import Panel
from pip._vendor.rich.pretty import Pretty
def report() -> None: # pragma: no cover
"""Print a report to the terminal with debugging information"""
console = Console()
inspect(console)
features = get_windows_console_features()
inspect(features)
env_names = (
"TERM",
"COLORTERM",
"CLICOLOR",
"NO_COLOR",
"TERM_PROGRAM",
"COLUMNS",
"LINES",
"JUPYTER_COLUMNS",
"JUPYTER_LINES",
"JPY_PARENT_PID",
"VSCODE_VERBOSE_LOGGING",
)
env = {name: os.getenv(name) for name in env_names}
console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables"))
console.print(f'platform="{platform.system()}"')
if __name__ == "__main__": # pragma: no cover
report()
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/diagnose.py
|
Python
|
mit
| 972 |
import sys
from typing import TYPE_CHECKING, Optional, Union
from .jupyter import JupyterMixin
from .segment import Segment
from .style import Style
from ._emoji_codes import EMOJI
from ._emoji_replace import _emoji_replace
if sys.version_info >= (3, 8):
from typing import Literal
else:
from pip._vendor.typing_extensions import Literal # pragma: no cover
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderResult
EmojiVariant = Literal["emoji", "text"]
class NoEmoji(Exception):
"""No emoji by that name."""
class Emoji(JupyterMixin):
__slots__ = ["name", "style", "_char", "variant"]
VARIANTS = {"text": "\uFE0E", "emoji": "\uFE0F"}
def __init__(
self,
name: str,
style: Union[str, Style] = "none",
variant: Optional[EmojiVariant] = None,
) -> None:
"""A single emoji character.
Args:
name (str): Name of emoji.
style (Union[str, Style], optional): Optional style. Defaults to None.
Raises:
NoEmoji: If the emoji doesn't exist.
"""
self.name = name
self.style = style
self.variant = variant
try:
self._char = EMOJI[name]
except KeyError:
raise NoEmoji(f"No emoji called {name!r}")
if variant is not None:
self._char += self.VARIANTS.get(variant, "")
@classmethod
def replace(cls, text: str) -> str:
"""Replace emoji markup with corresponding unicode characters.
Args:
text (str): A string with emojis codes, e.g. "Hello :smiley:!"
Returns:
str: A string with emoji codes replaces with actual emoji.
"""
return _emoji_replace(text)
def __repr__(self) -> str:
return f"<emoji {self.name!r}>"
def __str__(self) -> str:
return self._char
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
yield Segment(self._char, console.get_style(self.style))
if __name__ == "__main__": # pragma: no cover
import sys
from pip._vendor.rich.columns import Columns
from pip._vendor.rich.console import Console
console = Console(record=True)
columns = Columns(
(f":{name}: {name}" for name in sorted(EMOJI.keys()) if "\u200D" not in name),
column_first=True,
)
console.print(columns)
if len(sys.argv) > 1:
console.save_html(sys.argv[1])
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/emoji.py
|
Python
|
mit
| 2,501 |
class ConsoleError(Exception):
"""An error in console operation."""
class StyleError(Exception):
"""An error in styles."""
class StyleSyntaxError(ConsoleError):
"""Style was badly formatted."""
class MissingStyle(StyleError):
"""No such style."""
class StyleStackError(ConsoleError):
"""Style stack is invalid."""
class NotRenderableError(ConsoleError):
"""Object is not renderable."""
class MarkupError(ConsoleError):
"""Markup was badly formatted."""
class LiveError(ConsoleError):
"""Error related to Live display."""
class NoAltScreen(ConsoleError):
"""Alt screen mode was required."""
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/errors.py
|
Python
|
mit
| 642 |
import io
from typing import IO, TYPE_CHECKING, Any, List
from .ansi import AnsiDecoder
from .text import Text
if TYPE_CHECKING:
from .console import Console
class FileProxy(io.TextIOBase):
"""Wraps a file (e.g. sys.stdout) and redirects writes to a console."""
def __init__(self, console: "Console", file: IO[str]) -> None:
self.__console = console
self.__file = file
self.__buffer: List[str] = []
self.__ansi_decoder = AnsiDecoder()
@property
def rich_proxied_file(self) -> IO[str]:
"""Get proxied file."""
return self.__file
def __getattr__(self, name: str) -> Any:
return getattr(self.__file, name)
def write(self, text: str) -> int:
if not isinstance(text, str):
raise TypeError(f"write() argument must be str, not {type(text).__name__}")
buffer = self.__buffer
lines: List[str] = []
while text:
line, new_line, text = text.partition("\n")
if new_line:
lines.append("".join(buffer) + line)
del buffer[:]
else:
buffer.append(line)
break
if lines:
console = self.__console
with console:
output = Text("\n").join(
self.__ansi_decoder.decode_line(line) for line in lines
)
console.print(output)
return len(text)
def flush(self) -> None:
output = "".join(self.__buffer)
if output:
self.__console.print(output)
del self.__buffer[:]
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/file_proxy.py
|
Python
|
mit
| 1,616 |
# coding: utf-8
"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
The functions declared in this module should cover the different
usecases needed to generate a string representation of a file size
using several different units. Since there are many standards regarding
file size units, three different functions have been implemented.
See Also:
* `Wikipedia: Binary prefix <https://en.wikipedia.org/wiki/Binary_prefix>`_
"""
__all__ = ["decimal"]
from typing import Iterable, List, Optional, Tuple
def _to_str(
size: int,
suffixes: Iterable[str],
base: int,
*,
precision: Optional[int] = 1,
separator: Optional[str] = " ",
) -> str:
if size == 1:
return "1 byte"
elif size < base:
return "{:,} bytes".format(size)
for i, suffix in enumerate(suffixes, 2): # noqa: B007
unit = base**i
if size < unit:
break
return "{:,.{precision}f}{separator}{}".format(
(base * size / unit),
suffix,
precision=precision,
separator=separator,
)
def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
"""Pick a suffix and base for the given size."""
for i, suffix in enumerate(suffixes):
unit = base**i
if size < unit * base:
break
return unit, suffix
def decimal(
size: int,
*,
precision: Optional[int] = 1,
separator: Optional[str] = " ",
) -> str:
"""Convert a filesize in to a string (powers of 1000, SI prefixes).
In this convention, ``1000 B = 1 kB``.
This is typically the format used to advertise the storage
capacity of USB flash drives and the like (*256 MB* meaning
actually a storage capacity of more than *256 000 000 B*),
or used by **Mac OS X** since v10.6 to report file sizes.
Arguments:
int (size): A file size.
int (precision): The number of decimal places to include (default = 1).
str (separator): The string to separate the value from the units (default = " ").
Returns:
`str`: A string containing a abbreviated file size and units.
Example:
>>> filesize.decimal(30000)
'30.0 kB'
>>> filesize.decimal(30000, precision=2, separator="")
'30.00kB'
"""
return _to_str(
size,
("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
1000,
precision=precision,
separator=separator,
)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/filesize.py
|
Python
|
mit
| 2,507 |
import re
from abc import ABC, abstractmethod
from typing import List, Union
from .text import Span, Text
def _combine_regex(*regexes: str) -> str:
"""Combine a number of regexes in to a single regex.
Returns:
str: New regex with all regexes ORed together.
"""
return "|".join(regexes)
class Highlighter(ABC):
"""Abstract base class for highlighters."""
def __call__(self, text: Union[str, Text]) -> Text:
"""Highlight a str or Text instance.
Args:
text (Union[str, ~Text]): Text to highlight.
Raises:
TypeError: If not called with text or str.
Returns:
Text: A test instance with highlighting applied.
"""
if isinstance(text, str):
highlight_text = Text(text)
elif isinstance(text, Text):
highlight_text = text.copy()
else:
raise TypeError(f"str or Text instance required, not {text!r}")
self.highlight(highlight_text)
return highlight_text
@abstractmethod
def highlight(self, text: Text) -> None:
"""Apply highlighting in place to text.
Args:
text (~Text): A text object highlight.
"""
class NullHighlighter(Highlighter):
"""A highlighter object that doesn't highlight.
May be used to disable highlighting entirely.
"""
def highlight(self, text: Text) -> None:
"""Nothing to do"""
class RegexHighlighter(Highlighter):
"""Applies highlighting from a list of regular expressions."""
highlights: List[str] = []
base_style: str = ""
def highlight(self, text: Text) -> None:
"""Highlight :class:`rich.text.Text` using regular expressions.
Args:
text (~Text): Text to highlighted.
"""
highlight_regex = text.highlight_regex
for re_highlight in self.highlights:
highlight_regex(re_highlight, style_prefix=self.base_style)
class ReprHighlighter(RegexHighlighter):
"""Highlights the text typically produced from ``__repr__`` methods."""
base_style = "repr."
highlights = [
r"(?P<tag_start><)(?P<tag_name>[-\w.:|]*)(?P<tag_contents>[\w\W]*?)(?P<tag_end>>)",
r'(?P<attrib_name>[\w_]{1,50})=(?P<attrib_value>"?[\w_]+"?)?',
r"(?P<brace>[][{}()])",
_combine_regex(
r"(?P<ipv4>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})",
r"(?P<ipv6>([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})",
r"(?P<eui64>(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})",
r"(?P<eui48>(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})",
r"(?P<uuid>[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})",
r"(?P<call>[\w.]*?)\(",
r"\b(?P<bool_true>True)\b|\b(?P<bool_false>False)\b|\b(?P<none>None)\b",
r"(?P<ellipsis>\.\.\.)",
r"(?P<number_complex>(?<!\w)(?:\-?[0-9]+\.?[0-9]*(?:e[-+]?\d+?)?)(?:[-+](?:[0-9]+\.?[0-9]*(?:e[-+]?\d+)?))?j)",
r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[-+]?\d+?)?\b|0x[0-9a-fA-F]*)",
r"(?P<path>\B(/[-\w._+]+)*\/)(?P<filename>[-\w._+]*)?",
r"(?<![\\\w])(?P<str>b?'''.*?(?<!\\)'''|b?'.*?(?<!\\)'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
r"(?P<url>(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)",
),
]
class JSONHighlighter(RegexHighlighter):
"""Highlights JSON"""
# Captures the start and end of JSON strings, handling escaped quotes
JSON_STR = r"(?<![\\\w])(?P<str>b?\".*?(?<!\\)\")"
JSON_WHITESPACE = {" ", "\n", "\r", "\t"}
base_style = "json."
highlights = [
_combine_regex(
r"(?P<brace>[\{\[\(\)\]\}])",
r"\b(?P<bool_true>true)\b|\b(?P<bool_false>false)\b|\b(?P<null>null)\b",
r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[\-\+]?\d+?)?\b|0x[0-9a-fA-F]*)",
JSON_STR,
),
]
def highlight(self, text: Text) -> None:
super().highlight(text)
# Additional work to handle highlighting JSON keys
plain = text.plain
append = text.spans.append
whitespace = self.JSON_WHITESPACE
for match in re.finditer(self.JSON_STR, plain):
start, end = match.span()
cursor = end
while cursor < len(plain):
char = plain[cursor]
cursor += 1
if char == ":":
append(Span(start, end, "json.key"))
elif char in whitespace:
continue
break
class ISO8601Highlighter(RegexHighlighter):
"""Highlights the ISO8601 date time strings.
Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html
"""
base_style = "iso8601."
highlights = [
#
# Dates
#
# Calendar month (e.g. 2008-08). The hyphen is required
r"^(?P<year>[0-9]{4})-(?P<month>1[0-2]|0[1-9])$",
# Calendar date w/o hyphens (e.g. 20080830)
r"^(?P<date>(?P<year>[0-9]{4})(?P<month>1[0-2]|0[1-9])(?P<day>3[01]|0[1-9]|[12][0-9]))$",
# Ordinal date (e.g. 2008-243). The hyphen is optional
r"^(?P<date>(?P<year>[0-9]{4})-?(?P<day>36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$",
#
# Weeks
#
# Week of the year (e.g., 2008-W35). The hyphen is optional
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9]))$",
# Week date (e.g., 2008-W35-6). The hyphens are optional
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9])-?(?P<day>[1-7]))$",
#
# Times
#
# Hours and minutes (e.g., 17:21). The colon is optional
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):?(?P<minute>[0-5][0-9]))$",
# Hours, minutes, and seconds w/o colons (e.g., 172159)
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))$",
# Time zone designator (e.g., Z, +07 or +07:00). The colons and the minutes are optional
r"^(?P<timezone>(Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?))$",
# Hours, minutes, and seconds with time zone designator (e.g., 17:21:59+07:00).
# All the colons are optional. The minutes in the time zone designator are also optional
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?)$",
#
# Date and Time
#
# Calendar date with hours, minutes, and seconds (e.g., 2008-08-30 17:21:59 or 20080830 172159).
# A space is required between the date and the time. The hyphens and colons are optional.
# This regex matches dates and times that specify some hyphens or colons but omit others.
# This does not follow ISO 8601
r"^(?P<date>(?P<year>[0-9]{4})(?P<hyphen>-)?(?P<month>1[0-2]|0[1-9])(?(hyphen)-)(?P<day>3[01]|0[1-9]|[12][0-9])) (?P<time>(?P<hour>2[0-3]|[01][0-9])(?(hyphen):)(?P<minute>[0-5][0-9])(?(hyphen):)(?P<second>[0-5][0-9]))$",
#
# XML Schema dates and times
#
# Date, with optional time zone (e.g., 2008-08-30 or 2008-08-30+07:00).
# Hyphens are required. This is the XML Schema 'date' type
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
# Time, with optional fractional seconds and time zone (e.g., 01:45:36 or 01:45:36.123+07:00).
# There is no limit on the number of digits for the fractional seconds. This is the XML Schema 'time' type
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<frac>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
# Date and time, with optional fractional seconds and time zone (e.g., 2008-08-30T01:45:36 or 2008-08-30T01:45:36.123Z).
# This is the XML Schema 'dateTime' type
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))T(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<ms>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
]
if __name__ == "__main__": # pragma: no cover
from .console import Console
console = Console()
console.print("[bold green]hello world![/bold green]")
console.print("'[bold green]hello world![/bold green]'")
console.print(" /foo")
console.print("/foo/")
console.print("/foo/bar")
console.print("foo/bar/baz")
console.print("/foo/bar/baz?foo=bar+egg&egg=baz")
console.print("/foo/bar/baz/")
console.print("/foo/bar/baz/egg")
console.print("/foo/bar/baz/egg.py")
console.print("/foo/bar/baz/egg.py word")
console.print(" /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/ba._++z/egg+.py word")
console.print("https://example.org?foo=bar#header")
console.print(1234567.34)
console.print(1 / 2)
console.print(-1 / 123123123123)
console.print(
"127.0.1.1 bar 192.168.1.4 2001:0db8:85a3:0000:0000:8a2e:0370:7334 foo"
)
import json
console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None)
|
castiel248/Convert
|
Lib/site-packages/pip/_vendor/rich/highlighter.py
|
Python
|
mit
| 9,585 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.