test
This commit is contained in:
		| @ -0,0 +1,82 @@ | ||||
| """ | ||||
|     Pygments | ||||
|     ~~~~~~~~ | ||||
|  | ||||
|     Pygments is a syntax highlighting package written in Python. | ||||
|  | ||||
|     It is a generic syntax highlighter for general use in all kinds of software | ||||
|     such as forum systems, wikis or other applications that need to prettify | ||||
|     source code. Highlights are: | ||||
|  | ||||
|     * a wide range of common languages and markup formats is supported | ||||
|     * special attention is paid to details, increasing quality by a fair amount | ||||
|     * support for new languages and formats are added easily | ||||
|     * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image | ||||
|       formats that PIL supports, and ANSI sequences | ||||
|     * it is usable as a command-line tool and as a library | ||||
|     * ... and it highlights even Brainfuck! | ||||
|  | ||||
|     The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``. | ||||
|  | ||||
|     .. _Pygments master branch: | ||||
|        https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
| from io import StringIO, BytesIO | ||||
|  | ||||
| __version__ = '2.13.0' | ||||
| __docformat__ = 'restructuredtext' | ||||
|  | ||||
| __all__ = ['lex', 'format', 'highlight'] | ||||
|  | ||||
|  | ||||
| def lex(code, lexer): | ||||
|     """ | ||||
|     Lex ``code`` with ``lexer`` and return an iterable of tokens. | ||||
|     """ | ||||
|     try: | ||||
|         return lexer.get_tokens(code) | ||||
|     except TypeError: | ||||
|         # Heuristic to catch a common mistake. | ||||
|         from pip._vendor.pygments.lexer import RegexLexer | ||||
|         if isinstance(lexer, type) and issubclass(lexer, RegexLexer): | ||||
|             raise TypeError('lex() argument must be a lexer instance, ' | ||||
|                             'not a class') | ||||
|         raise | ||||
|  | ||||
|  | ||||
| def format(tokens, formatter, outfile=None):  # pylint: disable=redefined-builtin | ||||
|     """ | ||||
|     Format a tokenlist ``tokens`` with the formatter ``formatter``. | ||||
|  | ||||
|     If ``outfile`` is given and a valid file object (an object | ||||
|     with a ``write`` method), the result will be written to it, otherwise | ||||
|     it is returned as a string. | ||||
|     """ | ||||
|     try: | ||||
|         if not outfile: | ||||
|             realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() | ||||
|             formatter.format(tokens, realoutfile) | ||||
|             return realoutfile.getvalue() | ||||
|         else: | ||||
|             formatter.format(tokens, outfile) | ||||
|     except TypeError: | ||||
|         # Heuristic to catch a common mistake. | ||||
|         from pip._vendor.pygments.formatter import Formatter | ||||
|         if isinstance(formatter, type) and issubclass(formatter, Formatter): | ||||
|             raise TypeError('format() argument must be a formatter instance, ' | ||||
|                             'not a class') | ||||
|         raise | ||||
|  | ||||
|  | ||||
| def highlight(code, lexer, formatter, outfile=None): | ||||
|     """ | ||||
|     Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. | ||||
|  | ||||
|     If ``outfile`` is given and a valid file object (an object | ||||
|     with a ``write`` method), the result will be written to it, otherwise | ||||
|     it is returned as a string. | ||||
|     """ | ||||
|     return format(lex(code, lexer), formatter, outfile) | ||||
| @ -0,0 +1,17 @@ | ||||
| """ | ||||
|     pygments.__main__ | ||||
|     ~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Main entry point for ``python -m pygments``. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import sys | ||||
| from pip._vendor.pygments.cmdline import main | ||||
|  | ||||
| try: | ||||
|     sys.exit(main(sys.argv)) | ||||
| except KeyboardInterrupt: | ||||
|     sys.exit(1) | ||||
							
								
								
									
										668
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/cmdline.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										668
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/cmdline.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,668 @@ | ||||
| """ | ||||
|     pygments.cmdline | ||||
|     ~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Command line interface. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import os | ||||
| import sys | ||||
| import shutil | ||||
| import argparse | ||||
| from textwrap import dedent | ||||
|  | ||||
| from pip._vendor.pygments import __version__, highlight | ||||
| from pip._vendor.pygments.util import ClassNotFound, OptionError, docstring_headline, \ | ||||
|     guess_decode, guess_decode_from_terminal, terminal_encoding, \ | ||||
|     UnclosingTextIOWrapper | ||||
| from pip._vendor.pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \ | ||||
|     load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename | ||||
| from pip._vendor.pygments.lexers.special import TextLexer | ||||
| from pip._vendor.pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter | ||||
| from pip._vendor.pygments.formatters import get_all_formatters, get_formatter_by_name, \ | ||||
|     load_formatter_from_file, get_formatter_for_filename, find_formatter_class | ||||
| from pip._vendor.pygments.formatters.terminal import TerminalFormatter | ||||
| from pip._vendor.pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter | ||||
| from pip._vendor.pygments.filters import get_all_filters, find_filter_class | ||||
| from pip._vendor.pygments.styles import get_all_styles, get_style_by_name | ||||
|  | ||||
|  | ||||
| def _parse_options(o_strs): | ||||
|     opts = {} | ||||
|     if not o_strs: | ||||
|         return opts | ||||
|     for o_str in o_strs: | ||||
|         if not o_str.strip(): | ||||
|             continue | ||||
|         o_args = o_str.split(',') | ||||
|         for o_arg in o_args: | ||||
|             o_arg = o_arg.strip() | ||||
|             try: | ||||
|                 o_key, o_val = o_arg.split('=', 1) | ||||
|                 o_key = o_key.strip() | ||||
|                 o_val = o_val.strip() | ||||
|             except ValueError: | ||||
|                 opts[o_arg] = True | ||||
|             else: | ||||
|                 opts[o_key] = o_val | ||||
|     return opts | ||||
|  | ||||
|  | ||||
| def _parse_filters(f_strs): | ||||
|     filters = [] | ||||
|     if not f_strs: | ||||
|         return filters | ||||
|     for f_str in f_strs: | ||||
|         if ':' in f_str: | ||||
|             fname, fopts = f_str.split(':', 1) | ||||
|             filters.append((fname, _parse_options([fopts]))) | ||||
|         else: | ||||
|             filters.append((f_str, {})) | ||||
|     return filters | ||||
|  | ||||
|  | ||||
| def _print_help(what, name): | ||||
|     try: | ||||
|         if what == 'lexer': | ||||
|             cls = get_lexer_by_name(name) | ||||
|             print("Help on the %s lexer:" % cls.name) | ||||
|             print(dedent(cls.__doc__)) | ||||
|         elif what == 'formatter': | ||||
|             cls = find_formatter_class(name) | ||||
|             print("Help on the %s formatter:" % cls.name) | ||||
|             print(dedent(cls.__doc__)) | ||||
|         elif what == 'filter': | ||||
|             cls = find_filter_class(name) | ||||
|             print("Help on the %s filter:" % name) | ||||
|             print(dedent(cls.__doc__)) | ||||
|         return 0 | ||||
|     except (AttributeError, ValueError): | ||||
|         print("%s not found!" % what, file=sys.stderr) | ||||
|         return 1 | ||||
|  | ||||
|  | ||||
| def _print_list(what): | ||||
|     if what == 'lexer': | ||||
|         print() | ||||
|         print("Lexers:") | ||||
|         print("~~~~~~~") | ||||
|  | ||||
|         info = [] | ||||
|         for fullname, names, exts, _ in get_all_lexers(): | ||||
|             tup = (', '.join(names)+':', fullname, | ||||
|                    exts and '(filenames ' + ', '.join(exts) + ')' or '') | ||||
|             info.append(tup) | ||||
|         info.sort() | ||||
|         for i in info: | ||||
|             print(('* %s\n    %s %s') % i) | ||||
|  | ||||
|     elif what == 'formatter': | ||||
|         print() | ||||
|         print("Formatters:") | ||||
|         print("~~~~~~~~~~~") | ||||
|  | ||||
|         info = [] | ||||
|         for cls in get_all_formatters(): | ||||
|             doc = docstring_headline(cls) | ||||
|             tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and | ||||
|                    '(filenames ' + ', '.join(cls.filenames) + ')' or '') | ||||
|             info.append(tup) | ||||
|         info.sort() | ||||
|         for i in info: | ||||
|             print(('* %s\n    %s %s') % i) | ||||
|  | ||||
|     elif what == 'filter': | ||||
|         print() | ||||
|         print("Filters:") | ||||
|         print("~~~~~~~~") | ||||
|  | ||||
|         for name in get_all_filters(): | ||||
|             cls = find_filter_class(name) | ||||
|             print("* " + name + ':') | ||||
|             print("    %s" % docstring_headline(cls)) | ||||
|  | ||||
|     elif what == 'style': | ||||
|         print() | ||||
|         print("Styles:") | ||||
|         print("~~~~~~~") | ||||
|  | ||||
|         for name in get_all_styles(): | ||||
|             cls = get_style_by_name(name) | ||||
|             print("* " + name + ':') | ||||
|             print("    %s" % docstring_headline(cls)) | ||||
|  | ||||
|  | ||||
| def _print_list_as_json(requested_items): | ||||
|     import json | ||||
|     result = {} | ||||
|     if 'lexer' in requested_items: | ||||
|         info = {} | ||||
|         for fullname, names, filenames, mimetypes in get_all_lexers(): | ||||
|             info[fullname] = { | ||||
|                 'aliases': names, | ||||
|                 'filenames': filenames, | ||||
|                 'mimetypes': mimetypes | ||||
|             } | ||||
|         result['lexers'] = info | ||||
|  | ||||
|     if 'formatter' in requested_items: | ||||
|         info = {} | ||||
|         for cls in get_all_formatters(): | ||||
|             doc = docstring_headline(cls) | ||||
|             info[cls.name] = { | ||||
|                 'aliases': cls.aliases, | ||||
|                 'filenames': cls.filenames, | ||||
|                 'doc': doc | ||||
|             } | ||||
|         result['formatters'] = info | ||||
|  | ||||
|     if 'filter' in requested_items: | ||||
|         info = {} | ||||
|         for name in get_all_filters(): | ||||
|             cls = find_filter_class(name) | ||||
|             info[name] = { | ||||
|                 'doc': docstring_headline(cls) | ||||
|             } | ||||
|         result['filters'] = info | ||||
|  | ||||
|     if 'style' in requested_items: | ||||
|         info = {} | ||||
|         for name in get_all_styles(): | ||||
|             cls = get_style_by_name(name) | ||||
|             info[name] = { | ||||
|                 'doc': docstring_headline(cls) | ||||
|             } | ||||
|         result['styles'] = info | ||||
|  | ||||
|     json.dump(result, sys.stdout) | ||||
|  | ||||
| def main_inner(parser, argns): | ||||
|     if argns.help: | ||||
|         parser.print_help() | ||||
|         return 0 | ||||
|  | ||||
|     if argns.V: | ||||
|         print('Pygments version %s, (c) 2006-2022 by Georg Brandl, Matthäus ' | ||||
|               'Chajdas and contributors.' % __version__) | ||||
|         return 0 | ||||
|  | ||||
|     def is_only_option(opt): | ||||
|         return not any(v for (k, v) in vars(argns).items() if k != opt) | ||||
|  | ||||
|     # handle ``pygmentize -L`` | ||||
|     if argns.L is not None: | ||||
|         arg_set = set() | ||||
|         for k, v in vars(argns).items(): | ||||
|             if v: | ||||
|                 arg_set.add(k) | ||||
|  | ||||
|         arg_set.discard('L') | ||||
|         arg_set.discard('json') | ||||
|  | ||||
|         if arg_set: | ||||
|             parser.print_help(sys.stderr) | ||||
|             return 2 | ||||
|  | ||||
|         # print version | ||||
|         if not argns.json: | ||||
|             main(['', '-V']) | ||||
|         allowed_types = {'lexer', 'formatter', 'filter', 'style'} | ||||
|         largs = [arg.rstrip('s') for arg in argns.L] | ||||
|         if any(arg not in allowed_types for arg in largs): | ||||
|             parser.print_help(sys.stderr) | ||||
|             return 0 | ||||
|         if not largs: | ||||
|             largs = allowed_types | ||||
|         if not argns.json: | ||||
|             for arg in largs: | ||||
|                 _print_list(arg) | ||||
|         else: | ||||
|             _print_list_as_json(largs) | ||||
|         return 0 | ||||
|  | ||||
|     # handle ``pygmentize -H`` | ||||
|     if argns.H: | ||||
|         if not is_only_option('H'): | ||||
|             parser.print_help(sys.stderr) | ||||
|             return 2 | ||||
|         what, name = argns.H | ||||
|         if what not in ('lexer', 'formatter', 'filter'): | ||||
|             parser.print_help(sys.stderr) | ||||
|             return 2 | ||||
|         return _print_help(what, name) | ||||
|  | ||||
|     # parse -O options | ||||
|     parsed_opts = _parse_options(argns.O or []) | ||||
|  | ||||
|     # parse -P options | ||||
|     for p_opt in argns.P or []: | ||||
|         try: | ||||
|             name, value = p_opt.split('=', 1) | ||||
|         except ValueError: | ||||
|             parsed_opts[p_opt] = True | ||||
|         else: | ||||
|             parsed_opts[name] = value | ||||
|  | ||||
|     # encodings | ||||
|     inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding')) | ||||
|     outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding')) | ||||
|  | ||||
|     # handle ``pygmentize -N`` | ||||
|     if argns.N: | ||||
|         lexer = find_lexer_class_for_filename(argns.N) | ||||
|         if lexer is None: | ||||
|             lexer = TextLexer | ||||
|  | ||||
|         print(lexer.aliases[0]) | ||||
|         return 0 | ||||
|  | ||||
|     # handle ``pygmentize -C`` | ||||
|     if argns.C: | ||||
|         inp = sys.stdin.buffer.read() | ||||
|         try: | ||||
|             lexer = guess_lexer(inp, inencoding=inencoding) | ||||
|         except ClassNotFound: | ||||
|             lexer = TextLexer | ||||
|  | ||||
|         print(lexer.aliases[0]) | ||||
|         return 0 | ||||
|  | ||||
|     # handle ``pygmentize -S`` | ||||
|     S_opt = argns.S | ||||
|     a_opt = argns.a | ||||
|     if S_opt is not None: | ||||
|         f_opt = argns.f | ||||
|         if not f_opt: | ||||
|             parser.print_help(sys.stderr) | ||||
|             return 2 | ||||
|         if argns.l or argns.INPUTFILE: | ||||
|             parser.print_help(sys.stderr) | ||||
|             return 2 | ||||
|  | ||||
|         try: | ||||
|             parsed_opts['style'] = S_opt | ||||
|             fmter = get_formatter_by_name(f_opt, **parsed_opts) | ||||
|         except ClassNotFound as err: | ||||
|             print(err, file=sys.stderr) | ||||
|             return 1 | ||||
|  | ||||
|         print(fmter.get_style_defs(a_opt or '')) | ||||
|         return 0 | ||||
|  | ||||
|     # if no -S is given, -a is not allowed | ||||
|     if argns.a is not None: | ||||
|         parser.print_help(sys.stderr) | ||||
|         return 2 | ||||
|  | ||||
|     # parse -F options | ||||
|     F_opts = _parse_filters(argns.F or []) | ||||
|  | ||||
|     # -x: allow custom (eXternal) lexers and formatters | ||||
|     allow_custom_lexer_formatter = bool(argns.x) | ||||
|  | ||||
|     # select lexer | ||||
|     lexer = None | ||||
|  | ||||
|     # given by name? | ||||
|     lexername = argns.l | ||||
|     if lexername: | ||||
|         # custom lexer, located relative to user's cwd | ||||
|         if allow_custom_lexer_formatter and '.py' in lexername: | ||||
|             try: | ||||
|                 filename = None | ||||
|                 name = None | ||||
|                 if ':' in lexername: | ||||
|                     filename, name = lexername.rsplit(':', 1) | ||||
|  | ||||
|                     if '.py' in name: | ||||
|                         # This can happen on Windows: If the lexername is | ||||
|                         # C:\lexer.py -- return to normal load path in that case | ||||
|                         name = None | ||||
|  | ||||
|                 if filename and name: | ||||
|                     lexer = load_lexer_from_file(filename, name, | ||||
|                                                  **parsed_opts) | ||||
|                 else: | ||||
|                     lexer = load_lexer_from_file(lexername, **parsed_opts) | ||||
|             except ClassNotFound as err: | ||||
|                 print('Error:', err, file=sys.stderr) | ||||
|                 return 1 | ||||
|         else: | ||||
|             try: | ||||
|                 lexer = get_lexer_by_name(lexername, **parsed_opts) | ||||
|             except (OptionError, ClassNotFound) as err: | ||||
|                 print('Error:', err, file=sys.stderr) | ||||
|                 return 1 | ||||
|  | ||||
|     # read input code | ||||
|     code = None | ||||
|  | ||||
|     if argns.INPUTFILE: | ||||
|         if argns.s: | ||||
|             print('Error: -s option not usable when input file specified', | ||||
|                   file=sys.stderr) | ||||
|             return 2 | ||||
|  | ||||
|         infn = argns.INPUTFILE | ||||
|         try: | ||||
|             with open(infn, 'rb') as infp: | ||||
|                 code = infp.read() | ||||
|         except Exception as err: | ||||
|             print('Error: cannot read infile:', err, file=sys.stderr) | ||||
|             return 1 | ||||
|         if not inencoding: | ||||
|             code, inencoding = guess_decode(code) | ||||
|  | ||||
|         # do we have to guess the lexer? | ||||
|         if not lexer: | ||||
|             try: | ||||
|                 lexer = get_lexer_for_filename(infn, code, **parsed_opts) | ||||
|             except ClassNotFound as err: | ||||
|                 if argns.g: | ||||
|                     try: | ||||
|                         lexer = guess_lexer(code, **parsed_opts) | ||||
|                     except ClassNotFound: | ||||
|                         lexer = TextLexer(**parsed_opts) | ||||
|                 else: | ||||
|                     print('Error:', err, file=sys.stderr) | ||||
|                     return 1 | ||||
|             except OptionError as err: | ||||
|                 print('Error:', err, file=sys.stderr) | ||||
|                 return 1 | ||||
|  | ||||
|     elif not argns.s:  # treat stdin as full file (-s support is later) | ||||
|         # read code from terminal, always in binary mode since we want to | ||||
|         # decode ourselves and be tolerant with it | ||||
|         code = sys.stdin.buffer.read()  # use .buffer to get a binary stream | ||||
|         if not inencoding: | ||||
|             code, inencoding = guess_decode_from_terminal(code, sys.stdin) | ||||
|             # else the lexer will do the decoding | ||||
|         if not lexer: | ||||
|             try: | ||||
|                 lexer = guess_lexer(code, **parsed_opts) | ||||
|             except ClassNotFound: | ||||
|                 lexer = TextLexer(**parsed_opts) | ||||
|  | ||||
|     else:  # -s option needs a lexer with -l | ||||
|         if not lexer: | ||||
|             print('Error: when using -s a lexer has to be selected with -l', | ||||
|                   file=sys.stderr) | ||||
|             return 2 | ||||
|  | ||||
|     # process filters | ||||
|     for fname, fopts in F_opts: | ||||
|         try: | ||||
|             lexer.add_filter(fname, **fopts) | ||||
|         except ClassNotFound as err: | ||||
|             print('Error:', err, file=sys.stderr) | ||||
|             return 1 | ||||
|  | ||||
|     # select formatter | ||||
|     outfn = argns.o | ||||
|     fmter = argns.f | ||||
|     if fmter: | ||||
|         # custom formatter, located relative to user's cwd | ||||
|         if allow_custom_lexer_formatter and '.py' in fmter: | ||||
|             try: | ||||
|                 filename = None | ||||
|                 name = None | ||||
|                 if ':' in fmter: | ||||
|                     # Same logic as above for custom lexer | ||||
|                     filename, name = fmter.rsplit(':', 1) | ||||
|  | ||||
|                     if '.py' in name: | ||||
|                         name = None | ||||
|  | ||||
|                 if filename and name: | ||||
|                     fmter = load_formatter_from_file(filename, name, | ||||
|                                                      **parsed_opts) | ||||
|                 else: | ||||
|                     fmter = load_formatter_from_file(fmter, **parsed_opts) | ||||
|             except ClassNotFound as err: | ||||
|                 print('Error:', err, file=sys.stderr) | ||||
|                 return 1 | ||||
|         else: | ||||
|             try: | ||||
|                 fmter = get_formatter_by_name(fmter, **parsed_opts) | ||||
|             except (OptionError, ClassNotFound) as err: | ||||
|                 print('Error:', err, file=sys.stderr) | ||||
|                 return 1 | ||||
|  | ||||
|     if outfn: | ||||
|         if not fmter: | ||||
|             try: | ||||
|                 fmter = get_formatter_for_filename(outfn, **parsed_opts) | ||||
|             except (OptionError, ClassNotFound) as err: | ||||
|                 print('Error:', err, file=sys.stderr) | ||||
|                 return 1 | ||||
|         try: | ||||
|             outfile = open(outfn, 'wb') | ||||
|         except Exception as err: | ||||
|             print('Error: cannot open outfile:', err, file=sys.stderr) | ||||
|             return 1 | ||||
|     else: | ||||
|         if not fmter: | ||||
|             if os.environ.get('COLORTERM','') in ('truecolor', '24bit'): | ||||
|                 fmter = TerminalTrueColorFormatter(**parsed_opts) | ||||
|             elif '256' in os.environ.get('TERM', ''): | ||||
|                 fmter = Terminal256Formatter(**parsed_opts) | ||||
|             else: | ||||
|                 fmter = TerminalFormatter(**parsed_opts) | ||||
|         outfile = sys.stdout.buffer | ||||
|  | ||||
|     # determine output encoding if not explicitly selected | ||||
|     if not outencoding: | ||||
|         if outfn: | ||||
|             # output file? use lexer encoding for now (can still be None) | ||||
|             fmter.encoding = inencoding | ||||
|         else: | ||||
|             # else use terminal encoding | ||||
|             fmter.encoding = terminal_encoding(sys.stdout) | ||||
|  | ||||
|     # provide coloring under Windows, if possible | ||||
|     if not outfn and sys.platform in ('win32', 'cygwin') and \ | ||||
|        fmter.name in ('Terminal', 'Terminal256'):  # pragma: no cover | ||||
|         # unfortunately colorama doesn't support binary streams on Py3 | ||||
|         outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding) | ||||
|         fmter.encoding = None | ||||
|         try: | ||||
|             import pip._vendor.colorama.initialise as colorama_initialise | ||||
|         except ImportError: | ||||
|             pass | ||||
|         else: | ||||
|             outfile = colorama_initialise.wrap_stream( | ||||
|                 outfile, convert=None, strip=None, autoreset=False, wrap=True) | ||||
|  | ||||
|     # When using the LaTeX formatter and the option `escapeinside` is | ||||
|     # specified, we need a special lexer which collects escaped text | ||||
|     # before running the chosen language lexer. | ||||
|     escapeinside = parsed_opts.get('escapeinside', '') | ||||
|     if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter): | ||||
|         left = escapeinside[0] | ||||
|         right = escapeinside[1] | ||||
|         lexer = LatexEmbeddedLexer(left, right, lexer) | ||||
|  | ||||
|     # ... and do it! | ||||
|     if not argns.s: | ||||
|         # process whole input as per normal... | ||||
|         try: | ||||
|             highlight(code, lexer, fmter, outfile) | ||||
|         finally: | ||||
|             if outfn: | ||||
|                 outfile.close() | ||||
|         return 0 | ||||
|     else: | ||||
|         # line by line processing of stdin (eg: for 'tail -f')... | ||||
|         try: | ||||
|             while 1: | ||||
|                 line = sys.stdin.buffer.readline() | ||||
|                 if not line: | ||||
|                     break | ||||
|                 if not inencoding: | ||||
|                     line = guess_decode_from_terminal(line, sys.stdin)[0] | ||||
|                 highlight(line, lexer, fmter, outfile) | ||||
|                 if hasattr(outfile, 'flush'): | ||||
|                     outfile.flush() | ||||
|             return 0 | ||||
|         except KeyboardInterrupt:  # pragma: no cover | ||||
|             return 0 | ||||
|         finally: | ||||
|             if outfn: | ||||
|                 outfile.close() | ||||
|  | ||||
|  | ||||
| class HelpFormatter(argparse.HelpFormatter): | ||||
|     def __init__(self, prog, indent_increment=2, max_help_position=16, width=None): | ||||
|         if width is None: | ||||
|             try: | ||||
|                 width = shutil.get_terminal_size().columns - 2 | ||||
|             except Exception: | ||||
|                 pass | ||||
|         argparse.HelpFormatter.__init__(self, prog, indent_increment, | ||||
|                                         max_help_position, width) | ||||
|  | ||||
|  | ||||
| def main(args=sys.argv): | ||||
|     """ | ||||
|     Main command line entry point. | ||||
|     """ | ||||
|     desc = "Highlight an input file and write the result to an output file." | ||||
|     parser = argparse.ArgumentParser(description=desc, add_help=False, | ||||
|                                      formatter_class=HelpFormatter) | ||||
|  | ||||
|     operation = parser.add_argument_group('Main operation') | ||||
|     lexersel = operation.add_mutually_exclusive_group() | ||||
|     lexersel.add_argument( | ||||
|         '-l', metavar='LEXER', | ||||
|         help='Specify the lexer to use.  (Query names with -L.)  If not ' | ||||
|         'given and -g is not present, the lexer is guessed from the filename.') | ||||
|     lexersel.add_argument( | ||||
|         '-g', action='store_true', | ||||
|         help='Guess the lexer from the file contents, or pass through ' | ||||
|         'as plain text if nothing can be guessed.') | ||||
|     operation.add_argument( | ||||
|         '-F', metavar='FILTER[:options]', action='append', | ||||
|         help='Add a filter to the token stream.  (Query names with -L.) ' | ||||
|         'Filter options are given after a colon if necessary.') | ||||
|     operation.add_argument( | ||||
|         '-f', metavar='FORMATTER', | ||||
|         help='Specify the formatter to use.  (Query names with -L.) ' | ||||
|         'If not given, the formatter is guessed from the output filename, ' | ||||
|         'and defaults to the terminal formatter if the output is to the ' | ||||
|         'terminal or an unknown file extension.') | ||||
|     operation.add_argument( | ||||
|         '-O', metavar='OPTION=value[,OPTION=value,...]', action='append', | ||||
|         help='Give options to the lexer and formatter as a comma-separated ' | ||||
|         'list of key-value pairs. ' | ||||
|         'Example: `-O bg=light,python=cool`.') | ||||
|     operation.add_argument( | ||||
|         '-P', metavar='OPTION=value', action='append', | ||||
|         help='Give a single option to the lexer and formatter - with this ' | ||||
|         'you can pass options whose value contains commas and equal signs. ' | ||||
|         'Example: `-P "heading=Pygments, the Python highlighter"`.') | ||||
|     operation.add_argument( | ||||
|         '-o', metavar='OUTPUTFILE', | ||||
|         help='Where to write the output.  Defaults to standard output.') | ||||
|  | ||||
|     operation.add_argument( | ||||
|         'INPUTFILE', nargs='?', | ||||
|         help='Where to read the input.  Defaults to standard input.') | ||||
|  | ||||
|     flags = parser.add_argument_group('Operation flags') | ||||
|     flags.add_argument( | ||||
|         '-v', action='store_true', | ||||
|         help='Print a detailed traceback on unhandled exceptions, which ' | ||||
|         'is useful for debugging and bug reports.') | ||||
|     flags.add_argument( | ||||
|         '-s', action='store_true', | ||||
|         help='Process lines one at a time until EOF, rather than waiting to ' | ||||
|         'process the entire file.  This only works for stdin, only for lexers ' | ||||
|         'with no line-spanning constructs, and is intended for streaming ' | ||||
|         'input such as you get from `tail -f`. ' | ||||
|         'Example usage: `tail -f sql.log | pygmentize -s -l sql`.') | ||||
|     flags.add_argument( | ||||
|         '-x', action='store_true', | ||||
|         help='Allow custom lexers and formatters to be loaded from a .py file ' | ||||
|         'relative to the current working directory. For example, ' | ||||
|         '`-l ./customlexer.py -x`. By default, this option expects a file ' | ||||
|         'with a class named CustomLexer or CustomFormatter; you can also ' | ||||
|         'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). ' | ||||
|         'Users should be very careful not to use this option with untrusted ' | ||||
|         'files, because it will import and run them.') | ||||
|     flags.add_argument('--json', help='Output as JSON. This can ' | ||||
|         'be only used in conjunction with -L.', | ||||
|         default=False, | ||||
|         action='store_true') | ||||
|  | ||||
|     special_modes_group = parser.add_argument_group( | ||||
|         'Special modes - do not do any highlighting') | ||||
|     special_modes = special_modes_group.add_mutually_exclusive_group() | ||||
|     special_modes.add_argument( | ||||
|         '-S', metavar='STYLE -f formatter', | ||||
|         help='Print style definitions for STYLE for a formatter ' | ||||
|         'given with -f. The argument given by -a is formatter ' | ||||
|         'dependent.') | ||||
|     special_modes.add_argument( | ||||
|         '-L', nargs='*', metavar='WHAT', | ||||
|         help='List lexers, formatters, styles or filters -- ' | ||||
|         'give additional arguments for the thing(s) you want to list ' | ||||
|         '(e.g. "styles"), or omit them to list everything.') | ||||
|     special_modes.add_argument( | ||||
|         '-N', metavar='FILENAME', | ||||
|         help='Guess and print out a lexer name based solely on the given ' | ||||
|         'filename. Does not take input or highlight anything. If no specific ' | ||||
|         'lexer can be determined, "text" is printed.') | ||||
|     special_modes.add_argument( | ||||
|         '-C', action='store_true', | ||||
|         help='Like -N, but print out a lexer name based solely on ' | ||||
|         'a given content from standard input.') | ||||
|     special_modes.add_argument( | ||||
|         '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'), | ||||
|         help='Print detailed help for the object <name> of type <type>, ' | ||||
|         'where <type> is one of "lexer", "formatter" or "filter".') | ||||
|     special_modes.add_argument( | ||||
|         '-V', action='store_true', | ||||
|         help='Print the package version.') | ||||
|     special_modes.add_argument( | ||||
|         '-h', '--help', action='store_true', | ||||
|         help='Print this help.') | ||||
|     special_modes_group.add_argument( | ||||
|         '-a', metavar='ARG', | ||||
|         help='Formatter-specific additional argument for the -S (print ' | ||||
|         'style sheet) mode.') | ||||
|  | ||||
|     argns = parser.parse_args(args[1:]) | ||||
|  | ||||
|     try: | ||||
|         return main_inner(parser, argns) | ||||
|     except BrokenPipeError: | ||||
|         # someone closed our stdout, e.g. by quitting a pager. | ||||
|         return 0 | ||||
|     except Exception: | ||||
|         if argns.v: | ||||
|             print(file=sys.stderr) | ||||
|             print('*' * 65, file=sys.stderr) | ||||
|             print('An unhandled exception occurred while highlighting.', | ||||
|                   file=sys.stderr) | ||||
|             print('Please report the whole traceback to the issue tracker at', | ||||
|                   file=sys.stderr) | ||||
|             print('<https://github.com/pygments/pygments/issues>.', | ||||
|                   file=sys.stderr) | ||||
|             print('*' * 65, file=sys.stderr) | ||||
|             print(file=sys.stderr) | ||||
|             raise | ||||
|         import traceback | ||||
|         info = traceback.format_exception(*sys.exc_info()) | ||||
|         msg = info[-1].strip() | ||||
|         if len(info) >= 3: | ||||
|             # extract relevant file and position info | ||||
|             msg += '\n   (f%s)' % info[-2].split('\n')[0].strip()[1:] | ||||
|         print(file=sys.stderr) | ||||
|         print('*** Error while highlighting:', file=sys.stderr) | ||||
|         print(msg, file=sys.stderr) | ||||
|         print('*** If this is a bug you want to report, please rerun with -v.', | ||||
|               file=sys.stderr) | ||||
|         return 1 | ||||
							
								
								
									
										70
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/console.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/console.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,70 @@ | ||||
| """ | ||||
|     pygments.console | ||||
|     ~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Format colored console output. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| esc = "\x1b[" | ||||
|  | ||||
| codes = {} | ||||
| codes[""] = "" | ||||
| codes["reset"] = esc + "39;49;00m" | ||||
|  | ||||
| codes["bold"] = esc + "01m" | ||||
| codes["faint"] = esc + "02m" | ||||
| codes["standout"] = esc + "03m" | ||||
| codes["underline"] = esc + "04m" | ||||
| codes["blink"] = esc + "05m" | ||||
| codes["overline"] = esc + "06m" | ||||
|  | ||||
| dark_colors = ["black", "red", "green", "yellow", "blue", | ||||
|                "magenta", "cyan", "gray"] | ||||
| light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue", | ||||
|                 "brightmagenta", "brightcyan", "white"] | ||||
|  | ||||
| x = 30 | ||||
| for d, l in zip(dark_colors, light_colors): | ||||
|     codes[d] = esc + "%im" % x | ||||
|     codes[l] = esc + "%im" % (60 + x) | ||||
|     x += 1 | ||||
|  | ||||
| del d, l, x | ||||
|  | ||||
| codes["white"] = codes["bold"] | ||||
|  | ||||
|  | ||||
| def reset_color(): | ||||
|     return codes["reset"] | ||||
|  | ||||
|  | ||||
| def colorize(color_key, text): | ||||
|     return codes[color_key] + text + codes["reset"] | ||||
|  | ||||
|  | ||||
| def ansiformat(attr, text): | ||||
|     """ | ||||
|     Format ``text`` with a color and/or some attributes:: | ||||
|  | ||||
|         color       normal color | ||||
|         *color*     bold color | ||||
|         _color_     underlined color | ||||
|         +color+     blinking color | ||||
|     """ | ||||
|     result = [] | ||||
|     if attr[:1] == attr[-1:] == '+': | ||||
|         result.append(codes['blink']) | ||||
|         attr = attr[1:-1] | ||||
|     if attr[:1] == attr[-1:] == '*': | ||||
|         result.append(codes['bold']) | ||||
|         attr = attr[1:-1] | ||||
|     if attr[:1] == attr[-1:] == '_': | ||||
|         result.append(codes['underline']) | ||||
|         attr = attr[1:-1] | ||||
|     result.append(codes[attr]) | ||||
|     result.append(text) | ||||
|     result.append(codes['reset']) | ||||
|     return ''.join(result) | ||||
							
								
								
									
										71
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/filter.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/filter.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,71 @@ | ||||
| """ | ||||
|     pygments.filter | ||||
|     ~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Module that implements the default filter. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
|  | ||||
| def apply_filters(stream, filters, lexer=None): | ||||
|     """ | ||||
|     Use this method to apply an iterable of filters to | ||||
|     a stream. If lexer is given it's forwarded to the | ||||
|     filter, otherwise the filter receives `None`. | ||||
|     """ | ||||
|     def _apply(filter_, stream): | ||||
|         yield from filter_.filter(lexer, stream) | ||||
|     for filter_ in filters: | ||||
|         stream = _apply(filter_, stream) | ||||
|     return stream | ||||
|  | ||||
|  | ||||
| def simplefilter(f): | ||||
|     """ | ||||
|     Decorator that converts a function into a filter:: | ||||
|  | ||||
|         @simplefilter | ||||
|         def lowercase(self, lexer, stream, options): | ||||
|             for ttype, value in stream: | ||||
|                 yield ttype, value.lower() | ||||
|     """ | ||||
|     return type(f.__name__, (FunctionFilter,), { | ||||
|         '__module__': getattr(f, '__module__'), | ||||
|         '__doc__': f.__doc__, | ||||
|         'function': f, | ||||
|     }) | ||||
|  | ||||
|  | ||||
| class Filter: | ||||
|     """ | ||||
|     Default filter. Subclass this class or use the `simplefilter` | ||||
|     decorator to create own filters. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         self.options = options | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|  | ||||
| class FunctionFilter(Filter): | ||||
|     """ | ||||
|     Abstract class used by `simplefilter` to create simple | ||||
|     function filters on the fly. The `simplefilter` decorator | ||||
|     automatically creates subclasses of this class for | ||||
|     functions passed to it. | ||||
|     """ | ||||
|     function = None | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         if not hasattr(self, 'function'): | ||||
|             raise TypeError('%r used without bound function' % | ||||
|                             self.__class__.__name__) | ||||
|         Filter.__init__(self, **options) | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         # pylint: disable=not-callable | ||||
|         yield from self.function(lexer, stream, self.options) | ||||
| @ -0,0 +1,940 @@ | ||||
| """ | ||||
|     pygments.filters | ||||
|     ~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Module containing filter lookup functions and default | ||||
|     filters. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import re | ||||
|  | ||||
| from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \ | ||||
|     string_to_tokentype | ||||
| from pip._vendor.pygments.filter import Filter | ||||
| from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \ | ||||
|     get_choice_opt, ClassNotFound, OptionError | ||||
| from pip._vendor.pygments.plugin import find_plugin_filters | ||||
|  | ||||
|  | ||||
| def find_filter_class(filtername): | ||||
|     """Lookup a filter by name. Return None if not found.""" | ||||
|     if filtername in FILTERS: | ||||
|         return FILTERS[filtername] | ||||
|     for name, cls in find_plugin_filters(): | ||||
|         if name == filtername: | ||||
|             return cls | ||||
|     return None | ||||
|  | ||||
|  | ||||
| def get_filter_by_name(filtername, **options): | ||||
|     """Return an instantiated filter. | ||||
|  | ||||
|     Options are passed to the filter initializer if wanted. | ||||
|     Raise a ClassNotFound if not found. | ||||
|     """ | ||||
|     cls = find_filter_class(filtername) | ||||
|     if cls: | ||||
|         return cls(**options) | ||||
|     else: | ||||
|         raise ClassNotFound('filter %r not found' % filtername) | ||||
|  | ||||
|  | ||||
| def get_all_filters(): | ||||
|     """Return a generator of all filter names.""" | ||||
|     yield from FILTERS | ||||
|     for name, _ in find_plugin_filters(): | ||||
|         yield name | ||||
|  | ||||
|  | ||||
| def _replace_special(ttype, value, regex, specialttype, | ||||
|                      replacefunc=lambda x: x): | ||||
|     last = 0 | ||||
|     for match in regex.finditer(value): | ||||
|         start, end = match.start(), match.end() | ||||
|         if start != last: | ||||
|             yield ttype, value[last:start] | ||||
|         yield specialttype, replacefunc(value[start:end]) | ||||
|         last = end | ||||
|     if last != len(value): | ||||
|         yield ttype, value[last:] | ||||
|  | ||||
|  | ||||
| class CodeTagFilter(Filter): | ||||
|     """Highlight special code tags in comments and docstrings. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `codetags` : list of strings | ||||
|        A list of strings that are flagged as code tags.  The default is to | ||||
|        highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``. | ||||
|  | ||||
|     .. versionchanged:: 2.13 | ||||
|        Now recognizes ``FIXME`` by default. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|         tags = get_list_opt(options, 'codetags', | ||||
|                             ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE']) | ||||
|         self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([ | ||||
|             re.escape(tag) for tag in tags if tag | ||||
|         ])) | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         regex = self.tag_re | ||||
|         for ttype, value in stream: | ||||
|             if ttype in String.Doc or \ | ||||
|                ttype in Comment and \ | ||||
|                ttype not in Comment.Preproc: | ||||
|                 yield from _replace_special(ttype, value, regex, Comment.Special) | ||||
|             else: | ||||
|                 yield ttype, value | ||||
|  | ||||
|  | ||||
| class SymbolFilter(Filter): | ||||
|     """Convert mathematical symbols such as \\<longrightarrow> in Isabelle | ||||
|     or \\longrightarrow in LaTeX into Unicode characters. | ||||
|  | ||||
|     This is mostly useful for HTML or console output when you want to | ||||
|     approximate the source rendering you'd see in an IDE. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `lang` : string | ||||
|        The symbol language. Must be one of ``'isabelle'`` or | ||||
|        ``'latex'``.  The default is ``'isabelle'``. | ||||
|     """ | ||||
|  | ||||
|     latex_symbols = { | ||||
|         '\\alpha'                : '\U000003b1', | ||||
|         '\\beta'                 : '\U000003b2', | ||||
|         '\\gamma'                : '\U000003b3', | ||||
|         '\\delta'                : '\U000003b4', | ||||
|         '\\varepsilon'           : '\U000003b5', | ||||
|         '\\zeta'                 : '\U000003b6', | ||||
|         '\\eta'                  : '\U000003b7', | ||||
|         '\\vartheta'             : '\U000003b8', | ||||
|         '\\iota'                 : '\U000003b9', | ||||
|         '\\kappa'                : '\U000003ba', | ||||
|         '\\lambda'               : '\U000003bb', | ||||
|         '\\mu'                   : '\U000003bc', | ||||
|         '\\nu'                   : '\U000003bd', | ||||
|         '\\xi'                   : '\U000003be', | ||||
|         '\\pi'                   : '\U000003c0', | ||||
|         '\\varrho'               : '\U000003c1', | ||||
|         '\\sigma'                : '\U000003c3', | ||||
|         '\\tau'                  : '\U000003c4', | ||||
|         '\\upsilon'              : '\U000003c5', | ||||
|         '\\varphi'               : '\U000003c6', | ||||
|         '\\chi'                  : '\U000003c7', | ||||
|         '\\psi'                  : '\U000003c8', | ||||
|         '\\omega'                : '\U000003c9', | ||||
|         '\\Gamma'                : '\U00000393', | ||||
|         '\\Delta'                : '\U00000394', | ||||
|         '\\Theta'                : '\U00000398', | ||||
|         '\\Lambda'               : '\U0000039b', | ||||
|         '\\Xi'                   : '\U0000039e', | ||||
|         '\\Pi'                   : '\U000003a0', | ||||
|         '\\Sigma'                : '\U000003a3', | ||||
|         '\\Upsilon'              : '\U000003a5', | ||||
|         '\\Phi'                  : '\U000003a6', | ||||
|         '\\Psi'                  : '\U000003a8', | ||||
|         '\\Omega'                : '\U000003a9', | ||||
|         '\\leftarrow'            : '\U00002190', | ||||
|         '\\longleftarrow'        : '\U000027f5', | ||||
|         '\\rightarrow'           : '\U00002192', | ||||
|         '\\longrightarrow'       : '\U000027f6', | ||||
|         '\\Leftarrow'            : '\U000021d0', | ||||
|         '\\Longleftarrow'        : '\U000027f8', | ||||
|         '\\Rightarrow'           : '\U000021d2', | ||||
|         '\\Longrightarrow'       : '\U000027f9', | ||||
|         '\\leftrightarrow'       : '\U00002194', | ||||
|         '\\longleftrightarrow'   : '\U000027f7', | ||||
|         '\\Leftrightarrow'       : '\U000021d4', | ||||
|         '\\Longleftrightarrow'   : '\U000027fa', | ||||
|         '\\mapsto'               : '\U000021a6', | ||||
|         '\\longmapsto'           : '\U000027fc', | ||||
|         '\\relbar'               : '\U00002500', | ||||
|         '\\Relbar'               : '\U00002550', | ||||
|         '\\hookleftarrow'        : '\U000021a9', | ||||
|         '\\hookrightarrow'       : '\U000021aa', | ||||
|         '\\leftharpoondown'      : '\U000021bd', | ||||
|         '\\rightharpoondown'     : '\U000021c1', | ||||
|         '\\leftharpoonup'        : '\U000021bc', | ||||
|         '\\rightharpoonup'       : '\U000021c0', | ||||
|         '\\rightleftharpoons'    : '\U000021cc', | ||||
|         '\\leadsto'              : '\U0000219d', | ||||
|         '\\downharpoonleft'      : '\U000021c3', | ||||
|         '\\downharpoonright'     : '\U000021c2', | ||||
|         '\\upharpoonleft'        : '\U000021bf', | ||||
|         '\\upharpoonright'       : '\U000021be', | ||||
|         '\\restriction'          : '\U000021be', | ||||
|         '\\uparrow'              : '\U00002191', | ||||
|         '\\Uparrow'              : '\U000021d1', | ||||
|         '\\downarrow'            : '\U00002193', | ||||
|         '\\Downarrow'            : '\U000021d3', | ||||
|         '\\updownarrow'          : '\U00002195', | ||||
|         '\\Updownarrow'          : '\U000021d5', | ||||
|         '\\langle'               : '\U000027e8', | ||||
|         '\\rangle'               : '\U000027e9', | ||||
|         '\\lceil'                : '\U00002308', | ||||
|         '\\rceil'                : '\U00002309', | ||||
|         '\\lfloor'               : '\U0000230a', | ||||
|         '\\rfloor'               : '\U0000230b', | ||||
|         '\\flqq'                 : '\U000000ab', | ||||
|         '\\frqq'                 : '\U000000bb', | ||||
|         '\\bot'                  : '\U000022a5', | ||||
|         '\\top'                  : '\U000022a4', | ||||
|         '\\wedge'                : '\U00002227', | ||||
|         '\\bigwedge'             : '\U000022c0', | ||||
|         '\\vee'                  : '\U00002228', | ||||
|         '\\bigvee'               : '\U000022c1', | ||||
|         '\\forall'               : '\U00002200', | ||||
|         '\\exists'               : '\U00002203', | ||||
|         '\\nexists'              : '\U00002204', | ||||
|         '\\neg'                  : '\U000000ac', | ||||
|         '\\Box'                  : '\U000025a1', | ||||
|         '\\Diamond'              : '\U000025c7', | ||||
|         '\\vdash'                : '\U000022a2', | ||||
|         '\\models'               : '\U000022a8', | ||||
|         '\\dashv'                : '\U000022a3', | ||||
|         '\\surd'                 : '\U0000221a', | ||||
|         '\\le'                   : '\U00002264', | ||||
|         '\\ge'                   : '\U00002265', | ||||
|         '\\ll'                   : '\U0000226a', | ||||
|         '\\gg'                   : '\U0000226b', | ||||
|         '\\lesssim'              : '\U00002272', | ||||
|         '\\gtrsim'               : '\U00002273', | ||||
|         '\\lessapprox'           : '\U00002a85', | ||||
|         '\\gtrapprox'            : '\U00002a86', | ||||
|         '\\in'                   : '\U00002208', | ||||
|         '\\notin'                : '\U00002209', | ||||
|         '\\subset'               : '\U00002282', | ||||
|         '\\supset'               : '\U00002283', | ||||
|         '\\subseteq'             : '\U00002286', | ||||
|         '\\supseteq'             : '\U00002287', | ||||
|         '\\sqsubset'             : '\U0000228f', | ||||
|         '\\sqsupset'             : '\U00002290', | ||||
|         '\\sqsubseteq'           : '\U00002291', | ||||
|         '\\sqsupseteq'           : '\U00002292', | ||||
|         '\\cap'                  : '\U00002229', | ||||
|         '\\bigcap'               : '\U000022c2', | ||||
|         '\\cup'                  : '\U0000222a', | ||||
|         '\\bigcup'               : '\U000022c3', | ||||
|         '\\sqcup'                : '\U00002294', | ||||
|         '\\bigsqcup'             : '\U00002a06', | ||||
|         '\\sqcap'                : '\U00002293', | ||||
|         '\\Bigsqcap'             : '\U00002a05', | ||||
|         '\\setminus'             : '\U00002216', | ||||
|         '\\propto'               : '\U0000221d', | ||||
|         '\\uplus'                : '\U0000228e', | ||||
|         '\\bigplus'              : '\U00002a04', | ||||
|         '\\sim'                  : '\U0000223c', | ||||
|         '\\doteq'                : '\U00002250', | ||||
|         '\\simeq'                : '\U00002243', | ||||
|         '\\approx'               : '\U00002248', | ||||
|         '\\asymp'                : '\U0000224d', | ||||
|         '\\cong'                 : '\U00002245', | ||||
|         '\\equiv'                : '\U00002261', | ||||
|         '\\Join'                 : '\U000022c8', | ||||
|         '\\bowtie'               : '\U00002a1d', | ||||
|         '\\prec'                 : '\U0000227a', | ||||
|         '\\succ'                 : '\U0000227b', | ||||
|         '\\preceq'               : '\U0000227c', | ||||
|         '\\succeq'               : '\U0000227d', | ||||
|         '\\parallel'             : '\U00002225', | ||||
|         '\\mid'                  : '\U000000a6', | ||||
|         '\\pm'                   : '\U000000b1', | ||||
|         '\\mp'                   : '\U00002213', | ||||
|         '\\times'                : '\U000000d7', | ||||
|         '\\div'                  : '\U000000f7', | ||||
|         '\\cdot'                 : '\U000022c5', | ||||
|         '\\star'                 : '\U000022c6', | ||||
|         '\\circ'                 : '\U00002218', | ||||
|         '\\dagger'               : '\U00002020', | ||||
|         '\\ddagger'              : '\U00002021', | ||||
|         '\\lhd'                  : '\U000022b2', | ||||
|         '\\rhd'                  : '\U000022b3', | ||||
|         '\\unlhd'                : '\U000022b4', | ||||
|         '\\unrhd'                : '\U000022b5', | ||||
|         '\\triangleleft'         : '\U000025c3', | ||||
|         '\\triangleright'        : '\U000025b9', | ||||
|         '\\triangle'             : '\U000025b3', | ||||
|         '\\triangleq'            : '\U0000225c', | ||||
|         '\\oplus'                : '\U00002295', | ||||
|         '\\bigoplus'             : '\U00002a01', | ||||
|         '\\otimes'               : '\U00002297', | ||||
|         '\\bigotimes'            : '\U00002a02', | ||||
|         '\\odot'                 : '\U00002299', | ||||
|         '\\bigodot'              : '\U00002a00', | ||||
|         '\\ominus'               : '\U00002296', | ||||
|         '\\oslash'               : '\U00002298', | ||||
|         '\\dots'                 : '\U00002026', | ||||
|         '\\cdots'                : '\U000022ef', | ||||
|         '\\sum'                  : '\U00002211', | ||||
|         '\\prod'                 : '\U0000220f', | ||||
|         '\\coprod'               : '\U00002210', | ||||
|         '\\infty'                : '\U0000221e', | ||||
|         '\\int'                  : '\U0000222b', | ||||
|         '\\oint'                 : '\U0000222e', | ||||
|         '\\clubsuit'             : '\U00002663', | ||||
|         '\\diamondsuit'          : '\U00002662', | ||||
|         '\\heartsuit'            : '\U00002661', | ||||
|         '\\spadesuit'            : '\U00002660', | ||||
|         '\\aleph'                : '\U00002135', | ||||
|         '\\emptyset'             : '\U00002205', | ||||
|         '\\nabla'                : '\U00002207', | ||||
|         '\\partial'              : '\U00002202', | ||||
|         '\\flat'                 : '\U0000266d', | ||||
|         '\\natural'              : '\U0000266e', | ||||
|         '\\sharp'                : '\U0000266f', | ||||
|         '\\angle'                : '\U00002220', | ||||
|         '\\copyright'            : '\U000000a9', | ||||
|         '\\textregistered'       : '\U000000ae', | ||||
|         '\\textonequarter'       : '\U000000bc', | ||||
|         '\\textonehalf'          : '\U000000bd', | ||||
|         '\\textthreequarters'    : '\U000000be', | ||||
|         '\\textordfeminine'      : '\U000000aa', | ||||
|         '\\textordmasculine'     : '\U000000ba', | ||||
|         '\\euro'                 : '\U000020ac', | ||||
|         '\\pounds'               : '\U000000a3', | ||||
|         '\\yen'                  : '\U000000a5', | ||||
|         '\\textcent'             : '\U000000a2', | ||||
|         '\\textcurrency'         : '\U000000a4', | ||||
|         '\\textdegree'           : '\U000000b0', | ||||
|     } | ||||
|  | ||||
|     isabelle_symbols = { | ||||
|         '\\<zero>'                 : '\U0001d7ec', | ||||
|         '\\<one>'                  : '\U0001d7ed', | ||||
|         '\\<two>'                  : '\U0001d7ee', | ||||
|         '\\<three>'                : '\U0001d7ef', | ||||
|         '\\<four>'                 : '\U0001d7f0', | ||||
|         '\\<five>'                 : '\U0001d7f1', | ||||
|         '\\<six>'                  : '\U0001d7f2', | ||||
|         '\\<seven>'                : '\U0001d7f3', | ||||
|         '\\<eight>'                : '\U0001d7f4', | ||||
|         '\\<nine>'                 : '\U0001d7f5', | ||||
|         '\\<A>'                    : '\U0001d49c', | ||||
|         '\\<B>'                    : '\U0000212c', | ||||
|         '\\<C>'                    : '\U0001d49e', | ||||
|         '\\<D>'                    : '\U0001d49f', | ||||
|         '\\<E>'                    : '\U00002130', | ||||
|         '\\<F>'                    : '\U00002131', | ||||
|         '\\<G>'                    : '\U0001d4a2', | ||||
|         '\\<H>'                    : '\U0000210b', | ||||
|         '\\<I>'                    : '\U00002110', | ||||
|         '\\<J>'                    : '\U0001d4a5', | ||||
|         '\\<K>'                    : '\U0001d4a6', | ||||
|         '\\<L>'                    : '\U00002112', | ||||
|         '\\<M>'                    : '\U00002133', | ||||
|         '\\<N>'                    : '\U0001d4a9', | ||||
|         '\\<O>'                    : '\U0001d4aa', | ||||
|         '\\<P>'                    : '\U0001d4ab', | ||||
|         '\\<Q>'                    : '\U0001d4ac', | ||||
|         '\\<R>'                    : '\U0000211b', | ||||
|         '\\<S>'                    : '\U0001d4ae', | ||||
|         '\\<T>'                    : '\U0001d4af', | ||||
|         '\\<U>'                    : '\U0001d4b0', | ||||
|         '\\<V>'                    : '\U0001d4b1', | ||||
|         '\\<W>'                    : '\U0001d4b2', | ||||
|         '\\<X>'                    : '\U0001d4b3', | ||||
|         '\\<Y>'                    : '\U0001d4b4', | ||||
|         '\\<Z>'                    : '\U0001d4b5', | ||||
|         '\\<a>'                    : '\U0001d5ba', | ||||
|         '\\<b>'                    : '\U0001d5bb', | ||||
|         '\\<c>'                    : '\U0001d5bc', | ||||
|         '\\<d>'                    : '\U0001d5bd', | ||||
|         '\\<e>'                    : '\U0001d5be', | ||||
|         '\\<f>'                    : '\U0001d5bf', | ||||
|         '\\<g>'                    : '\U0001d5c0', | ||||
|         '\\<h>'                    : '\U0001d5c1', | ||||
|         '\\<i>'                    : '\U0001d5c2', | ||||
|         '\\<j>'                    : '\U0001d5c3', | ||||
|         '\\<k>'                    : '\U0001d5c4', | ||||
|         '\\<l>'                    : '\U0001d5c5', | ||||
|         '\\<m>'                    : '\U0001d5c6', | ||||
|         '\\<n>'                    : '\U0001d5c7', | ||||
|         '\\<o>'                    : '\U0001d5c8', | ||||
|         '\\<p>'                    : '\U0001d5c9', | ||||
|         '\\<q>'                    : '\U0001d5ca', | ||||
|         '\\<r>'                    : '\U0001d5cb', | ||||
|         '\\<s>'                    : '\U0001d5cc', | ||||
|         '\\<t>'                    : '\U0001d5cd', | ||||
|         '\\<u>'                    : '\U0001d5ce', | ||||
|         '\\<v>'                    : '\U0001d5cf', | ||||
|         '\\<w>'                    : '\U0001d5d0', | ||||
|         '\\<x>'                    : '\U0001d5d1', | ||||
|         '\\<y>'                    : '\U0001d5d2', | ||||
|         '\\<z>'                    : '\U0001d5d3', | ||||
|         '\\<AA>'                   : '\U0001d504', | ||||
|         '\\<BB>'                   : '\U0001d505', | ||||
|         '\\<CC>'                   : '\U0000212d', | ||||
|         '\\<DD>'                   : '\U0001d507', | ||||
|         '\\<EE>'                   : '\U0001d508', | ||||
|         '\\<FF>'                   : '\U0001d509', | ||||
|         '\\<GG>'                   : '\U0001d50a', | ||||
|         '\\<HH>'                   : '\U0000210c', | ||||
|         '\\<II>'                   : '\U00002111', | ||||
|         '\\<JJ>'                   : '\U0001d50d', | ||||
|         '\\<KK>'                   : '\U0001d50e', | ||||
|         '\\<LL>'                   : '\U0001d50f', | ||||
|         '\\<MM>'                   : '\U0001d510', | ||||
|         '\\<NN>'                   : '\U0001d511', | ||||
|         '\\<OO>'                   : '\U0001d512', | ||||
|         '\\<PP>'                   : '\U0001d513', | ||||
|         '\\<QQ>'                   : '\U0001d514', | ||||
|         '\\<RR>'                   : '\U0000211c', | ||||
|         '\\<SS>'                   : '\U0001d516', | ||||
|         '\\<TT>'                   : '\U0001d517', | ||||
|         '\\<UU>'                   : '\U0001d518', | ||||
|         '\\<VV>'                   : '\U0001d519', | ||||
|         '\\<WW>'                   : '\U0001d51a', | ||||
|         '\\<XX>'                   : '\U0001d51b', | ||||
|         '\\<YY>'                   : '\U0001d51c', | ||||
|         '\\<ZZ>'                   : '\U00002128', | ||||
|         '\\<aa>'                   : '\U0001d51e', | ||||
|         '\\<bb>'                   : '\U0001d51f', | ||||
|         '\\<cc>'                   : '\U0001d520', | ||||
|         '\\<dd>'                   : '\U0001d521', | ||||
|         '\\<ee>'                   : '\U0001d522', | ||||
|         '\\<ff>'                   : '\U0001d523', | ||||
|         '\\<gg>'                   : '\U0001d524', | ||||
|         '\\<hh>'                   : '\U0001d525', | ||||
|         '\\<ii>'                   : '\U0001d526', | ||||
|         '\\<jj>'                   : '\U0001d527', | ||||
|         '\\<kk>'                   : '\U0001d528', | ||||
|         '\\<ll>'                   : '\U0001d529', | ||||
|         '\\<mm>'                   : '\U0001d52a', | ||||
|         '\\<nn>'                   : '\U0001d52b', | ||||
|         '\\<oo>'                   : '\U0001d52c', | ||||
|         '\\<pp>'                   : '\U0001d52d', | ||||
|         '\\<qq>'                   : '\U0001d52e', | ||||
|         '\\<rr>'                   : '\U0001d52f', | ||||
|         '\\<ss>'                   : '\U0001d530', | ||||
|         '\\<tt>'                   : '\U0001d531', | ||||
|         '\\<uu>'                   : '\U0001d532', | ||||
|         '\\<vv>'                   : '\U0001d533', | ||||
|         '\\<ww>'                   : '\U0001d534', | ||||
|         '\\<xx>'                   : '\U0001d535', | ||||
|         '\\<yy>'                   : '\U0001d536', | ||||
|         '\\<zz>'                   : '\U0001d537', | ||||
|         '\\<alpha>'                : '\U000003b1', | ||||
|         '\\<beta>'                 : '\U000003b2', | ||||
|         '\\<gamma>'                : '\U000003b3', | ||||
|         '\\<delta>'                : '\U000003b4', | ||||
|         '\\<epsilon>'              : '\U000003b5', | ||||
|         '\\<zeta>'                 : '\U000003b6', | ||||
|         '\\<eta>'                  : '\U000003b7', | ||||
|         '\\<theta>'                : '\U000003b8', | ||||
|         '\\<iota>'                 : '\U000003b9', | ||||
|         '\\<kappa>'                : '\U000003ba', | ||||
|         '\\<lambda>'               : '\U000003bb', | ||||
|         '\\<mu>'                   : '\U000003bc', | ||||
|         '\\<nu>'                   : '\U000003bd', | ||||
|         '\\<xi>'                   : '\U000003be', | ||||
|         '\\<pi>'                   : '\U000003c0', | ||||
|         '\\<rho>'                  : '\U000003c1', | ||||
|         '\\<sigma>'                : '\U000003c3', | ||||
|         '\\<tau>'                  : '\U000003c4', | ||||
|         '\\<upsilon>'              : '\U000003c5', | ||||
|         '\\<phi>'                  : '\U000003c6', | ||||
|         '\\<chi>'                  : '\U000003c7', | ||||
|         '\\<psi>'                  : '\U000003c8', | ||||
|         '\\<omega>'                : '\U000003c9', | ||||
|         '\\<Gamma>'                : '\U00000393', | ||||
|         '\\<Delta>'                : '\U00000394', | ||||
|         '\\<Theta>'                : '\U00000398', | ||||
|         '\\<Lambda>'               : '\U0000039b', | ||||
|         '\\<Xi>'                   : '\U0000039e', | ||||
|         '\\<Pi>'                   : '\U000003a0', | ||||
|         '\\<Sigma>'                : '\U000003a3', | ||||
|         '\\<Upsilon>'              : '\U000003a5', | ||||
|         '\\<Phi>'                  : '\U000003a6', | ||||
|         '\\<Psi>'                  : '\U000003a8', | ||||
|         '\\<Omega>'                : '\U000003a9', | ||||
|         '\\<bool>'                 : '\U0001d539', | ||||
|         '\\<complex>'              : '\U00002102', | ||||
|         '\\<nat>'                  : '\U00002115', | ||||
|         '\\<rat>'                  : '\U0000211a', | ||||
|         '\\<real>'                 : '\U0000211d', | ||||
|         '\\<int>'                  : '\U00002124', | ||||
|         '\\<leftarrow>'            : '\U00002190', | ||||
|         '\\<longleftarrow>'        : '\U000027f5', | ||||
|         '\\<rightarrow>'           : '\U00002192', | ||||
|         '\\<longrightarrow>'       : '\U000027f6', | ||||
|         '\\<Leftarrow>'            : '\U000021d0', | ||||
|         '\\<Longleftarrow>'        : '\U000027f8', | ||||
|         '\\<Rightarrow>'           : '\U000021d2', | ||||
|         '\\<Longrightarrow>'       : '\U000027f9', | ||||
|         '\\<leftrightarrow>'       : '\U00002194', | ||||
|         '\\<longleftrightarrow>'   : '\U000027f7', | ||||
|         '\\<Leftrightarrow>'       : '\U000021d4', | ||||
|         '\\<Longleftrightarrow>'   : '\U000027fa', | ||||
|         '\\<mapsto>'               : '\U000021a6', | ||||
|         '\\<longmapsto>'           : '\U000027fc', | ||||
|         '\\<midarrow>'             : '\U00002500', | ||||
|         '\\<Midarrow>'             : '\U00002550', | ||||
|         '\\<hookleftarrow>'        : '\U000021a9', | ||||
|         '\\<hookrightarrow>'       : '\U000021aa', | ||||
|         '\\<leftharpoondown>'      : '\U000021bd', | ||||
|         '\\<rightharpoondown>'     : '\U000021c1', | ||||
|         '\\<leftharpoonup>'        : '\U000021bc', | ||||
|         '\\<rightharpoonup>'       : '\U000021c0', | ||||
|         '\\<rightleftharpoons>'    : '\U000021cc', | ||||
|         '\\<leadsto>'              : '\U0000219d', | ||||
|         '\\<downharpoonleft>'      : '\U000021c3', | ||||
|         '\\<downharpoonright>'     : '\U000021c2', | ||||
|         '\\<upharpoonleft>'        : '\U000021bf', | ||||
|         '\\<upharpoonright>'       : '\U000021be', | ||||
|         '\\<restriction>'          : '\U000021be', | ||||
|         '\\<Colon>'                : '\U00002237', | ||||
|         '\\<up>'                   : '\U00002191', | ||||
|         '\\<Up>'                   : '\U000021d1', | ||||
|         '\\<down>'                 : '\U00002193', | ||||
|         '\\<Down>'                 : '\U000021d3', | ||||
|         '\\<updown>'               : '\U00002195', | ||||
|         '\\<Updown>'               : '\U000021d5', | ||||
|         '\\<langle>'               : '\U000027e8', | ||||
|         '\\<rangle>'               : '\U000027e9', | ||||
|         '\\<lceil>'                : '\U00002308', | ||||
|         '\\<rceil>'                : '\U00002309', | ||||
|         '\\<lfloor>'               : '\U0000230a', | ||||
|         '\\<rfloor>'               : '\U0000230b', | ||||
|         '\\<lparr>'                : '\U00002987', | ||||
|         '\\<rparr>'                : '\U00002988', | ||||
|         '\\<lbrakk>'               : '\U000027e6', | ||||
|         '\\<rbrakk>'               : '\U000027e7', | ||||
|         '\\<lbrace>'               : '\U00002983', | ||||
|         '\\<rbrace>'               : '\U00002984', | ||||
|         '\\<guillemotleft>'        : '\U000000ab', | ||||
|         '\\<guillemotright>'       : '\U000000bb', | ||||
|         '\\<bottom>'               : '\U000022a5', | ||||
|         '\\<top>'                  : '\U000022a4', | ||||
|         '\\<and>'                  : '\U00002227', | ||||
|         '\\<And>'                  : '\U000022c0', | ||||
|         '\\<or>'                   : '\U00002228', | ||||
|         '\\<Or>'                   : '\U000022c1', | ||||
|         '\\<forall>'               : '\U00002200', | ||||
|         '\\<exists>'               : '\U00002203', | ||||
|         '\\<nexists>'              : '\U00002204', | ||||
|         '\\<not>'                  : '\U000000ac', | ||||
|         '\\<box>'                  : '\U000025a1', | ||||
|         '\\<diamond>'              : '\U000025c7', | ||||
|         '\\<turnstile>'            : '\U000022a2', | ||||
|         '\\<Turnstile>'            : '\U000022a8', | ||||
|         '\\<tturnstile>'           : '\U000022a9', | ||||
|         '\\<TTurnstile>'           : '\U000022ab', | ||||
|         '\\<stileturn>'            : '\U000022a3', | ||||
|         '\\<surd>'                 : '\U0000221a', | ||||
|         '\\<le>'                   : '\U00002264', | ||||
|         '\\<ge>'                   : '\U00002265', | ||||
|         '\\<lless>'                : '\U0000226a', | ||||
|         '\\<ggreater>'             : '\U0000226b', | ||||
|         '\\<lesssim>'              : '\U00002272', | ||||
|         '\\<greatersim>'           : '\U00002273', | ||||
|         '\\<lessapprox>'           : '\U00002a85', | ||||
|         '\\<greaterapprox>'        : '\U00002a86', | ||||
|         '\\<in>'                   : '\U00002208', | ||||
|         '\\<notin>'                : '\U00002209', | ||||
|         '\\<subset>'               : '\U00002282', | ||||
|         '\\<supset>'               : '\U00002283', | ||||
|         '\\<subseteq>'             : '\U00002286', | ||||
|         '\\<supseteq>'             : '\U00002287', | ||||
|         '\\<sqsubset>'             : '\U0000228f', | ||||
|         '\\<sqsupset>'             : '\U00002290', | ||||
|         '\\<sqsubseteq>'           : '\U00002291', | ||||
|         '\\<sqsupseteq>'           : '\U00002292', | ||||
|         '\\<inter>'                : '\U00002229', | ||||
|         '\\<Inter>'                : '\U000022c2', | ||||
|         '\\<union>'                : '\U0000222a', | ||||
|         '\\<Union>'                : '\U000022c3', | ||||
|         '\\<squnion>'              : '\U00002294', | ||||
|         '\\<Squnion>'              : '\U00002a06', | ||||
|         '\\<sqinter>'              : '\U00002293', | ||||
|         '\\<Sqinter>'              : '\U00002a05', | ||||
|         '\\<setminus>'             : '\U00002216', | ||||
|         '\\<propto>'               : '\U0000221d', | ||||
|         '\\<uplus>'                : '\U0000228e', | ||||
|         '\\<Uplus>'                : '\U00002a04', | ||||
|         '\\<noteq>'                : '\U00002260', | ||||
|         '\\<sim>'                  : '\U0000223c', | ||||
|         '\\<doteq>'                : '\U00002250', | ||||
|         '\\<simeq>'                : '\U00002243', | ||||
|         '\\<approx>'               : '\U00002248', | ||||
|         '\\<asymp>'                : '\U0000224d', | ||||
|         '\\<cong>'                 : '\U00002245', | ||||
|         '\\<smile>'                : '\U00002323', | ||||
|         '\\<equiv>'                : '\U00002261', | ||||
|         '\\<frown>'                : '\U00002322', | ||||
|         '\\<Join>'                 : '\U000022c8', | ||||
|         '\\<bowtie>'               : '\U00002a1d', | ||||
|         '\\<prec>'                 : '\U0000227a', | ||||
|         '\\<succ>'                 : '\U0000227b', | ||||
|         '\\<preceq>'               : '\U0000227c', | ||||
|         '\\<succeq>'               : '\U0000227d', | ||||
|         '\\<parallel>'             : '\U00002225', | ||||
|         '\\<bar>'                  : '\U000000a6', | ||||
|         '\\<plusminus>'            : '\U000000b1', | ||||
|         '\\<minusplus>'            : '\U00002213', | ||||
|         '\\<times>'                : '\U000000d7', | ||||
|         '\\<div>'                  : '\U000000f7', | ||||
|         '\\<cdot>'                 : '\U000022c5', | ||||
|         '\\<star>'                 : '\U000022c6', | ||||
|         '\\<bullet>'               : '\U00002219', | ||||
|         '\\<circ>'                 : '\U00002218', | ||||
|         '\\<dagger>'               : '\U00002020', | ||||
|         '\\<ddagger>'              : '\U00002021', | ||||
|         '\\<lhd>'                  : '\U000022b2', | ||||
|         '\\<rhd>'                  : '\U000022b3', | ||||
|         '\\<unlhd>'                : '\U000022b4', | ||||
|         '\\<unrhd>'                : '\U000022b5', | ||||
|         '\\<triangleleft>'         : '\U000025c3', | ||||
|         '\\<triangleright>'        : '\U000025b9', | ||||
|         '\\<triangle>'             : '\U000025b3', | ||||
|         '\\<triangleq>'            : '\U0000225c', | ||||
|         '\\<oplus>'                : '\U00002295', | ||||
|         '\\<Oplus>'                : '\U00002a01', | ||||
|         '\\<otimes>'               : '\U00002297', | ||||
|         '\\<Otimes>'               : '\U00002a02', | ||||
|         '\\<odot>'                 : '\U00002299', | ||||
|         '\\<Odot>'                 : '\U00002a00', | ||||
|         '\\<ominus>'               : '\U00002296', | ||||
|         '\\<oslash>'               : '\U00002298', | ||||
|         '\\<dots>'                 : '\U00002026', | ||||
|         '\\<cdots>'                : '\U000022ef', | ||||
|         '\\<Sum>'                  : '\U00002211', | ||||
|         '\\<Prod>'                 : '\U0000220f', | ||||
|         '\\<Coprod>'               : '\U00002210', | ||||
|         '\\<infinity>'             : '\U0000221e', | ||||
|         '\\<integral>'             : '\U0000222b', | ||||
|         '\\<ointegral>'            : '\U0000222e', | ||||
|         '\\<clubsuit>'             : '\U00002663', | ||||
|         '\\<diamondsuit>'          : '\U00002662', | ||||
|         '\\<heartsuit>'            : '\U00002661', | ||||
|         '\\<spadesuit>'            : '\U00002660', | ||||
|         '\\<aleph>'                : '\U00002135', | ||||
|         '\\<emptyset>'             : '\U00002205', | ||||
|         '\\<nabla>'                : '\U00002207', | ||||
|         '\\<partial>'              : '\U00002202', | ||||
|         '\\<flat>'                 : '\U0000266d', | ||||
|         '\\<natural>'              : '\U0000266e', | ||||
|         '\\<sharp>'                : '\U0000266f', | ||||
|         '\\<angle>'                : '\U00002220', | ||||
|         '\\<copyright>'            : '\U000000a9', | ||||
|         '\\<registered>'           : '\U000000ae', | ||||
|         '\\<hyphen>'               : '\U000000ad', | ||||
|         '\\<inverse>'              : '\U000000af', | ||||
|         '\\<onequarter>'           : '\U000000bc', | ||||
|         '\\<onehalf>'              : '\U000000bd', | ||||
|         '\\<threequarters>'        : '\U000000be', | ||||
|         '\\<ordfeminine>'          : '\U000000aa', | ||||
|         '\\<ordmasculine>'         : '\U000000ba', | ||||
|         '\\<section>'              : '\U000000a7', | ||||
|         '\\<paragraph>'            : '\U000000b6', | ||||
|         '\\<exclamdown>'           : '\U000000a1', | ||||
|         '\\<questiondown>'         : '\U000000bf', | ||||
|         '\\<euro>'                 : '\U000020ac', | ||||
|         '\\<pounds>'               : '\U000000a3', | ||||
|         '\\<yen>'                  : '\U000000a5', | ||||
|         '\\<cent>'                 : '\U000000a2', | ||||
|         '\\<currency>'             : '\U000000a4', | ||||
|         '\\<degree>'               : '\U000000b0', | ||||
|         '\\<amalg>'                : '\U00002a3f', | ||||
|         '\\<mho>'                  : '\U00002127', | ||||
|         '\\<lozenge>'              : '\U000025ca', | ||||
|         '\\<wp>'                   : '\U00002118', | ||||
|         '\\<wrong>'                : '\U00002240', | ||||
|         '\\<struct>'               : '\U000022c4', | ||||
|         '\\<acute>'                : '\U000000b4', | ||||
|         '\\<index>'                : '\U00000131', | ||||
|         '\\<dieresis>'             : '\U000000a8', | ||||
|         '\\<cedilla>'              : '\U000000b8', | ||||
|         '\\<hungarumlaut>'         : '\U000002dd', | ||||
|         '\\<some>'                 : '\U000003f5', | ||||
|         '\\<newline>'              : '\U000023ce', | ||||
|         '\\<open>'                 : '\U00002039', | ||||
|         '\\<close>'                : '\U0000203a', | ||||
|         '\\<here>'                 : '\U00002302', | ||||
|         '\\<^sub>'                 : '\U000021e9', | ||||
|         '\\<^sup>'                 : '\U000021e7', | ||||
|         '\\<^bold>'                : '\U00002759', | ||||
|         '\\<^bsub>'                : '\U000021d8', | ||||
|         '\\<^esub>'                : '\U000021d9', | ||||
|         '\\<^bsup>'                : '\U000021d7', | ||||
|         '\\<^esup>'                : '\U000021d6', | ||||
|     } | ||||
|  | ||||
|     lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|         lang = get_choice_opt(options, 'lang', | ||||
|                               ['isabelle', 'latex'], 'isabelle') | ||||
|         self.symbols = self.lang_map[lang] | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         for ttype, value in stream: | ||||
|             if value in self.symbols: | ||||
|                 yield ttype, self.symbols[value] | ||||
|             else: | ||||
|                 yield ttype, value | ||||
|  | ||||
|  | ||||
| class KeywordCaseFilter(Filter): | ||||
|     """Convert keywords to lowercase or uppercase or capitalize them, which | ||||
|     means first letter uppercase, rest lowercase. | ||||
|  | ||||
|     This can be useful e.g. if you highlight Pascal code and want to adapt the | ||||
|     code to your styleguide. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `case` : string | ||||
|        The casing to convert keywords to. Must be one of ``'lower'``, | ||||
|        ``'upper'`` or ``'capitalize'``.  The default is ``'lower'``. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|         case = get_choice_opt(options, 'case', | ||||
|                               ['lower', 'upper', 'capitalize'], 'lower') | ||||
|         self.convert = getattr(str, case) | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         for ttype, value in stream: | ||||
|             if ttype in Keyword: | ||||
|                 yield ttype, self.convert(value) | ||||
|             else: | ||||
|                 yield ttype, value | ||||
|  | ||||
|  | ||||
| class NameHighlightFilter(Filter): | ||||
|     """Highlight a normal Name (and Name.*) token with a different token type. | ||||
|  | ||||
|     Example:: | ||||
|  | ||||
|         filter = NameHighlightFilter( | ||||
|             names=['foo', 'bar', 'baz'], | ||||
|             tokentype=Name.Function, | ||||
|         ) | ||||
|  | ||||
|     This would highlight the names "foo", "bar" and "baz" | ||||
|     as functions. `Name.Function` is the default token type. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `names` : list of strings | ||||
|       A list of names that should be given the different token type. | ||||
|       There is no default. | ||||
|     `tokentype` : TokenType or string | ||||
|       A token type or a string containing a token type name that is | ||||
|       used for highlighting the strings in `names`.  The default is | ||||
|       `Name.Function`. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|         self.names = set(get_list_opt(options, 'names', [])) | ||||
|         tokentype = options.get('tokentype') | ||||
|         if tokentype: | ||||
|             self.tokentype = string_to_tokentype(tokentype) | ||||
|         else: | ||||
|             self.tokentype = Name.Function | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         for ttype, value in stream: | ||||
|             if ttype in Name and value in self.names: | ||||
|                 yield self.tokentype, value | ||||
|             else: | ||||
|                 yield ttype, value | ||||
|  | ||||
|  | ||||
| class ErrorToken(Exception): | ||||
|     pass | ||||
|  | ||||
|  | ||||
| class RaiseOnErrorTokenFilter(Filter): | ||||
|     """Raise an exception when the lexer generates an error token. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `excclass` : Exception class | ||||
|       The exception class to raise. | ||||
|       The default is `pygments.filters.ErrorToken`. | ||||
|  | ||||
|     .. versionadded:: 0.8 | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|         self.exception = options.get('excclass', ErrorToken) | ||||
|         try: | ||||
|             # issubclass() will raise TypeError if first argument is not a class | ||||
|             if not issubclass(self.exception, Exception): | ||||
|                 raise TypeError | ||||
|         except TypeError: | ||||
|             raise OptionError('excclass option is not an exception class') | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         for ttype, value in stream: | ||||
|             if ttype is Error: | ||||
|                 raise self.exception(value) | ||||
|             yield ttype, value | ||||
|  | ||||
|  | ||||
| class VisibleWhitespaceFilter(Filter): | ||||
|     """Convert tabs, newlines and/or spaces to visible characters. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `spaces` : string or bool | ||||
|       If this is a one-character string, spaces will be replaces by this string. | ||||
|       If it is another true value, spaces will be replaced by ``·`` (unicode | ||||
|       MIDDLE DOT).  If it is a false value, spaces will not be replaced.  The | ||||
|       default is ``False``. | ||||
|     `tabs` : string or bool | ||||
|       The same as for `spaces`, but the default replacement character is ``»`` | ||||
|       (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK).  The default value | ||||
|       is ``False``.  Note: this will not work if the `tabsize` option for the | ||||
|       lexer is nonzero, as tabs will already have been expanded then. | ||||
|     `tabsize` : int | ||||
|       If tabs are to be replaced by this filter (see the `tabs` option), this | ||||
|       is the total number of characters that a tab should be expanded to. | ||||
|       The default is ``8``. | ||||
|     `newlines` : string or bool | ||||
|       The same as for `spaces`, but the default replacement character is ``¶`` | ||||
|       (unicode PILCROW SIGN).  The default value is ``False``. | ||||
|     `wstokentype` : bool | ||||
|       If true, give whitespace the special `Whitespace` token type.  This allows | ||||
|       styling the visible whitespace differently (e.g. greyed out), but it can | ||||
|       disrupt background colors.  The default is ``True``. | ||||
|  | ||||
|     .. versionadded:: 0.8 | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|         for name, default in [('spaces',   '·'), | ||||
|                               ('tabs',     '»'), | ||||
|                               ('newlines', '¶')]: | ||||
|             opt = options.get(name, False) | ||||
|             if isinstance(opt, str) and len(opt) == 1: | ||||
|                 setattr(self, name, opt) | ||||
|             else: | ||||
|                 setattr(self, name, (opt and default or '')) | ||||
|         tabsize = get_int_opt(options, 'tabsize', 8) | ||||
|         if self.tabs: | ||||
|             self.tabs += ' ' * (tabsize - 1) | ||||
|         if self.newlines: | ||||
|             self.newlines += '\n' | ||||
|         self.wstt = get_bool_opt(options, 'wstokentype', True) | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         if self.wstt: | ||||
|             spaces = self.spaces or ' ' | ||||
|             tabs = self.tabs or '\t' | ||||
|             newlines = self.newlines or '\n' | ||||
|             regex = re.compile(r'\s') | ||||
|  | ||||
|             def replacefunc(wschar): | ||||
|                 if wschar == ' ': | ||||
|                     return spaces | ||||
|                 elif wschar == '\t': | ||||
|                     return tabs | ||||
|                 elif wschar == '\n': | ||||
|                     return newlines | ||||
|                 return wschar | ||||
|  | ||||
|             for ttype, value in stream: | ||||
|                 yield from _replace_special(ttype, value, regex, Whitespace, | ||||
|                                             replacefunc) | ||||
|         else: | ||||
|             spaces, tabs, newlines = self.spaces, self.tabs, self.newlines | ||||
|             # simpler processing | ||||
|             for ttype, value in stream: | ||||
|                 if spaces: | ||||
|                     value = value.replace(' ', spaces) | ||||
|                 if tabs: | ||||
|                     value = value.replace('\t', tabs) | ||||
|                 if newlines: | ||||
|                     value = value.replace('\n', newlines) | ||||
|                 yield ttype, value | ||||
|  | ||||
|  | ||||
| class GobbleFilter(Filter): | ||||
|     """Gobbles source code lines (eats initial characters). | ||||
|  | ||||
|     This filter drops the first ``n`` characters off every line of code.  This | ||||
|     may be useful when the source code fed to the lexer is indented by a fixed | ||||
|     amount of space that isn't desired in the output. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `n` : int | ||||
|        The number of characters to gobble. | ||||
|  | ||||
|     .. versionadded:: 1.2 | ||||
|     """ | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|         self.n = get_int_opt(options, 'n', 0) | ||||
|  | ||||
|     def gobble(self, value, left): | ||||
|         if left < len(value): | ||||
|             return value[left:], 0 | ||||
|         else: | ||||
|             return '', left - len(value) | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         n = self.n | ||||
|         left = n  # How many characters left to gobble. | ||||
|         for ttype, value in stream: | ||||
|             # Remove ``left`` tokens from first line, ``n`` from all others. | ||||
|             parts = value.split('\n') | ||||
|             (parts[0], left) = self.gobble(parts[0], left) | ||||
|             for i in range(1, len(parts)): | ||||
|                 (parts[i], left) = self.gobble(parts[i], n) | ||||
|             value = '\n'.join(parts) | ||||
|  | ||||
|             if value != '': | ||||
|                 yield ttype, value | ||||
|  | ||||
|  | ||||
| class TokenMergeFilter(Filter): | ||||
|     """Merges consecutive tokens with the same token type in the output | ||||
|     stream of a lexer. | ||||
|  | ||||
|     .. versionadded:: 1.2 | ||||
|     """ | ||||
|     def __init__(self, **options): | ||||
|         Filter.__init__(self, **options) | ||||
|  | ||||
|     def filter(self, lexer, stream): | ||||
|         current_type = None | ||||
|         current_value = None | ||||
|         for ttype, value in stream: | ||||
|             if ttype is current_type: | ||||
|                 current_value += value | ||||
|             else: | ||||
|                 if current_type is not None: | ||||
|                     yield current_type, current_value | ||||
|                 current_type = ttype | ||||
|                 current_value = value | ||||
|         if current_type is not None: | ||||
|             yield current_type, current_value | ||||
|  | ||||
|  | ||||
| FILTERS = { | ||||
|     'codetagify':     CodeTagFilter, | ||||
|     'keywordcase':    KeywordCaseFilter, | ||||
|     'highlight':      NameHighlightFilter, | ||||
|     'raiseonerror':   RaiseOnErrorTokenFilter, | ||||
|     'whitespace':     VisibleWhitespaceFilter, | ||||
|     'gobble':         GobbleFilter, | ||||
|     'tokenmerge':     TokenMergeFilter, | ||||
|     'symbols':        SymbolFilter, | ||||
| } | ||||
| @ -0,0 +1,94 @@ | ||||
| """ | ||||
|     pygments.formatter | ||||
|     ~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Base formatter class. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import codecs | ||||
|  | ||||
| from pip._vendor.pygments.util import get_bool_opt | ||||
| from pip._vendor.pygments.styles import get_style_by_name | ||||
|  | ||||
| __all__ = ['Formatter'] | ||||
|  | ||||
|  | ||||
| def _lookup_style(style): | ||||
|     if isinstance(style, str): | ||||
|         return get_style_by_name(style) | ||||
|     return style | ||||
|  | ||||
|  | ||||
| class Formatter: | ||||
|     """ | ||||
|     Converts a token stream to text. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     ``style`` | ||||
|         The style to use, can be a string or a Style subclass | ||||
|         (default: "default"). Not used by e.g. the | ||||
|         TerminalFormatter. | ||||
|     ``full`` | ||||
|         Tells the formatter to output a "full" document, i.e. | ||||
|         a complete self-contained document. This doesn't have | ||||
|         any effect for some formatters (default: false). | ||||
|     ``title`` | ||||
|         If ``full`` is true, the title that should be used to | ||||
|         caption the document (default: ''). | ||||
|     ``encoding`` | ||||
|         If given, must be an encoding name. This will be used to | ||||
|         convert the Unicode token strings to byte strings in the | ||||
|         output. If it is "" or None, Unicode strings will be written | ||||
|         to the output file, which most file-like objects do not | ||||
|         support (default: None). | ||||
|     ``outencoding`` | ||||
|         Overrides ``encoding`` if given. | ||||
|     """ | ||||
|  | ||||
|     #: Name of the formatter | ||||
|     name = None | ||||
|  | ||||
|     #: Shortcuts for the formatter | ||||
|     aliases = [] | ||||
|  | ||||
|     #: fn match rules | ||||
|     filenames = [] | ||||
|  | ||||
|     #: If True, this formatter outputs Unicode strings when no encoding | ||||
|     #: option is given. | ||||
|     unicodeoutput = True | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         self.style = _lookup_style(options.get('style', 'default')) | ||||
|         self.full = get_bool_opt(options, 'full', False) | ||||
|         self.title = options.get('title', '') | ||||
|         self.encoding = options.get('encoding', None) or None | ||||
|         if self.encoding in ('guess', 'chardet'): | ||||
|             # can happen for e.g. pygmentize -O encoding=guess | ||||
|             self.encoding = 'utf-8' | ||||
|         self.encoding = options.get('outencoding') or self.encoding | ||||
|         self.options = options | ||||
|  | ||||
|     def get_style_defs(self, arg=''): | ||||
|         """ | ||||
|         Return the style definitions for the current style as a string. | ||||
|  | ||||
|         ``arg`` is an additional argument whose meaning depends on the | ||||
|         formatter used. Note that ``arg`` can also be a list or tuple | ||||
|         for some formatters like the html formatter. | ||||
|         """ | ||||
|         return '' | ||||
|  | ||||
|     def format(self, tokensource, outfile): | ||||
|         """ | ||||
|         Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` | ||||
|         tuples and write it into ``outfile``. | ||||
|         """ | ||||
|         if self.encoding: | ||||
|             # wrap the outfile in a StreamWriter | ||||
|             outfile = codecs.lookup(self.encoding)[3](outfile) | ||||
|         return self.format_unencoded(tokensource, outfile) | ||||
| @ -0,0 +1,143 @@ | ||||
| """ | ||||
|     pygments.formatters | ||||
|     ~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Pygments formatters. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import re | ||||
| import sys | ||||
| import types | ||||
| from fnmatch import fnmatch | ||||
| from os.path import basename | ||||
|  | ||||
| from pip._vendor.pygments.formatters._mapping import FORMATTERS | ||||
| from pip._vendor.pygments.plugin import find_plugin_formatters | ||||
| from pip._vendor.pygments.util import ClassNotFound | ||||
|  | ||||
| __all__ = ['get_formatter_by_name', 'get_formatter_for_filename', | ||||
|            'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS) | ||||
|  | ||||
| _formatter_cache = {}  # classes by name | ||||
|  | ||||
| def _load_formatters(module_name): | ||||
|     """Load a formatter (and all others in the module too).""" | ||||
|     mod = __import__(module_name, None, None, ['__all__']) | ||||
|     for formatter_name in mod.__all__: | ||||
|         cls = getattr(mod, formatter_name) | ||||
|         _formatter_cache[cls.name] = cls | ||||
|  | ||||
|  | ||||
| def get_all_formatters(): | ||||
|     """Return a generator for all formatter classes.""" | ||||
|     # NB: this returns formatter classes, not info like get_all_lexers(). | ||||
|     for info in FORMATTERS.values(): | ||||
|         if info[1] not in _formatter_cache: | ||||
|             _load_formatters(info[0]) | ||||
|         yield _formatter_cache[info[1]] | ||||
|     for _, formatter in find_plugin_formatters(): | ||||
|         yield formatter | ||||
|  | ||||
|  | ||||
| def find_formatter_class(alias): | ||||
|     """Lookup a formatter by alias. | ||||
|  | ||||
|     Returns None if not found. | ||||
|     """ | ||||
|     for module_name, name, aliases, _, _ in FORMATTERS.values(): | ||||
|         if alias in aliases: | ||||
|             if name not in _formatter_cache: | ||||
|                 _load_formatters(module_name) | ||||
|             return _formatter_cache[name] | ||||
|     for _, cls in find_plugin_formatters(): | ||||
|         if alias in cls.aliases: | ||||
|             return cls | ||||
|  | ||||
|  | ||||
| def get_formatter_by_name(_alias, **options): | ||||
|     """Lookup and instantiate a formatter by alias. | ||||
|  | ||||
|     Raises ClassNotFound if not found. | ||||
|     """ | ||||
|     cls = find_formatter_class(_alias) | ||||
|     if cls is None: | ||||
|         raise ClassNotFound("no formatter found for name %r" % _alias) | ||||
|     return cls(**options) | ||||
|  | ||||
|  | ||||
| def load_formatter_from_file(filename, formattername="CustomFormatter", | ||||
|                              **options): | ||||
|     """Load a formatter from a file. | ||||
|  | ||||
|     This method expects a file located relative to the current working | ||||
|     directory, which contains a class named CustomFormatter. By default, | ||||
|     it expects the Formatter to be named CustomFormatter; you can specify | ||||
|     your own class name as the second argument to this function. | ||||
|  | ||||
|     Users should be very careful with the input, because this method | ||||
|     is equivalent to running eval on the input file. | ||||
|  | ||||
|     Raises ClassNotFound if there are any problems importing the Formatter. | ||||
|  | ||||
|     .. versionadded:: 2.2 | ||||
|     """ | ||||
|     try: | ||||
|         # This empty dict will contain the namespace for the exec'd file | ||||
|         custom_namespace = {} | ||||
|         with open(filename, 'rb') as f: | ||||
|             exec(f.read(), custom_namespace) | ||||
|         # Retrieve the class `formattername` from that namespace | ||||
|         if formattername not in custom_namespace: | ||||
|             raise ClassNotFound('no valid %s class found in %s' % | ||||
|                                 (formattername, filename)) | ||||
|         formatter_class = custom_namespace[formattername] | ||||
|         # And finally instantiate it with the options | ||||
|         return formatter_class(**options) | ||||
|     except OSError as err: | ||||
|         raise ClassNotFound('cannot read %s: %s' % (filename, err)) | ||||
|     except ClassNotFound: | ||||
|         raise | ||||
|     except Exception as err: | ||||
|         raise ClassNotFound('error when loading custom formatter: %s' % err) | ||||
|  | ||||
|  | ||||
| def get_formatter_for_filename(fn, **options): | ||||
|     """Lookup and instantiate a formatter by filename pattern. | ||||
|  | ||||
|     Raises ClassNotFound if not found. | ||||
|     """ | ||||
|     fn = basename(fn) | ||||
|     for modname, name, _, filenames, _ in FORMATTERS.values(): | ||||
|         for filename in filenames: | ||||
|             if fnmatch(fn, filename): | ||||
|                 if name not in _formatter_cache: | ||||
|                     _load_formatters(modname) | ||||
|                 return _formatter_cache[name](**options) | ||||
|     for cls in find_plugin_formatters(): | ||||
|         for filename in cls.filenames: | ||||
|             if fnmatch(fn, filename): | ||||
|                 return cls(**options) | ||||
|     raise ClassNotFound("no formatter found for file name %r" % fn) | ||||
|  | ||||
|  | ||||
| class _automodule(types.ModuleType): | ||||
|     """Automatically import formatters.""" | ||||
|  | ||||
|     def __getattr__(self, name): | ||||
|         info = FORMATTERS.get(name) | ||||
|         if info: | ||||
|             _load_formatters(info[0]) | ||||
|             cls = _formatter_cache[info[1]] | ||||
|             setattr(self, name, cls) | ||||
|             return cls | ||||
|         raise AttributeError(name) | ||||
|  | ||||
|  | ||||
| oldmod = sys.modules[__name__] | ||||
| newmod = _automodule(__name__) | ||||
| newmod.__dict__.update(oldmod.__dict__) | ||||
| sys.modules[__name__] = newmod | ||||
| del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types | ||||
| @ -0,0 +1,23 @@ | ||||
| # Automatically generated by scripts/gen_mapfiles.py. | ||||
| # DO NOT EDIT BY HAND; run `make mapfiles` instead. | ||||
|  | ||||
| FORMATTERS = { | ||||
|     'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'), | ||||
|     'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), | ||||
|     'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), | ||||
|     'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'), | ||||
|     'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."), | ||||
|     'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'), | ||||
|     'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), | ||||
|     'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), | ||||
|     'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'), | ||||
|     'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'), | ||||
|     'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'), | ||||
|     'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'), | ||||
|     'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'), | ||||
|     'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file.  This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'), | ||||
|     'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console.  Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), | ||||
|     'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'), | ||||
|     'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console.  Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), | ||||
|     'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'), | ||||
| } | ||||
| @ -0,0 +1,108 @@ | ||||
| """ | ||||
|     pygments.formatters.bbcode | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     BBcode formatter. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.util import get_bool_opt | ||||
|  | ||||
| __all__ = ['BBCodeFormatter'] | ||||
|  | ||||
|  | ||||
| class BBCodeFormatter(Formatter): | ||||
|     """ | ||||
|     Format tokens with BBcodes. These formatting codes are used by many | ||||
|     bulletin boards, so you can highlight your sourcecode with pygments before | ||||
|     posting it there. | ||||
|  | ||||
|     This formatter has no support for background colors and borders, as there | ||||
|     are no common BBcode tags for that. | ||||
|  | ||||
|     Some board systems (e.g. phpBB) don't support colors in their [code] tag, | ||||
|     so you can't use the highlighting together with that tag. | ||||
|     Text in a [code] tag usually is shown with a monospace font (which this | ||||
|     formatter can do with the ``monofont`` option) and no spaces (which you | ||||
|     need for indentation) are removed. | ||||
|  | ||||
|     Additional options accepted: | ||||
|  | ||||
|     `style` | ||||
|         The style to use, can be a string or a Style subclass (default: | ||||
|         ``'default'``). | ||||
|  | ||||
|     `codetag` | ||||
|         If set to true, put the output into ``[code]`` tags (default: | ||||
|         ``false``) | ||||
|  | ||||
|     `monofont` | ||||
|         If set to true, add a tag to show the code with a monospace font | ||||
|         (default: ``false``). | ||||
|     """ | ||||
|     name = 'BBCode' | ||||
|     aliases = ['bbcode', 'bb'] | ||||
|     filenames = [] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         self._code = get_bool_opt(options, 'codetag', False) | ||||
|         self._mono = get_bool_opt(options, 'monofont', False) | ||||
|  | ||||
|         self.styles = {} | ||||
|         self._make_styles() | ||||
|  | ||||
|     def _make_styles(self): | ||||
|         for ttype, ndef in self.style: | ||||
|             start = end = '' | ||||
|             if ndef['color']: | ||||
|                 start += '[color=#%s]' % ndef['color'] | ||||
|                 end = '[/color]' + end | ||||
|             if ndef['bold']: | ||||
|                 start += '[b]' | ||||
|                 end = '[/b]' + end | ||||
|             if ndef['italic']: | ||||
|                 start += '[i]' | ||||
|                 end = '[/i]' + end | ||||
|             if ndef['underline']: | ||||
|                 start += '[u]' | ||||
|                 end = '[/u]' + end | ||||
|             # there are no common BBcodes for background-color and border | ||||
|  | ||||
|             self.styles[ttype] = start, end | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         if self._code: | ||||
|             outfile.write('[code]') | ||||
|         if self._mono: | ||||
|             outfile.write('[font=monospace]') | ||||
|  | ||||
|         lastval = '' | ||||
|         lasttype = None | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             while ttype not in self.styles: | ||||
|                 ttype = ttype.parent | ||||
|             if ttype == lasttype: | ||||
|                 lastval += value | ||||
|             else: | ||||
|                 if lastval: | ||||
|                     start, end = self.styles[lasttype] | ||||
|                     outfile.write(''.join((start, lastval, end))) | ||||
|                 lastval = value | ||||
|                 lasttype = ttype | ||||
|  | ||||
|         if lastval: | ||||
|             start, end = self.styles[lasttype] | ||||
|             outfile.write(''.join((start, lastval, end))) | ||||
|  | ||||
|         if self._mono: | ||||
|             outfile.write('[/font]') | ||||
|         if self._code: | ||||
|             outfile.write('[/code]') | ||||
|         if self._code or self._mono: | ||||
|             outfile.write('\n') | ||||
| @ -0,0 +1,170 @@ | ||||
| """ | ||||
|     pygments.formatters.groff | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for groff output. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import math | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.util import get_bool_opt, get_int_opt | ||||
|  | ||||
| __all__ = ['GroffFormatter'] | ||||
|  | ||||
|  | ||||
| class GroffFormatter(Formatter): | ||||
|     """ | ||||
|     Format tokens with groff escapes to change their color and font style. | ||||
|  | ||||
|     .. versionadded:: 2.11 | ||||
|  | ||||
|     Additional options accepted: | ||||
|  | ||||
|     `style` | ||||
|         The style to use, can be a string or a Style subclass (default: | ||||
|         ``'default'``). | ||||
|  | ||||
|     `monospaced` | ||||
|         If set to true, monospace font will be used (default: ``true``). | ||||
|  | ||||
|     `linenos` | ||||
|         If set to true, print the line numbers (default: ``false``). | ||||
|  | ||||
|     `wrap` | ||||
|         Wrap lines to the specified number of characters. Disabled if set to 0 | ||||
|         (default: ``0``). | ||||
|     """ | ||||
|  | ||||
|     name = 'groff' | ||||
|     aliases = ['groff','troff','roff'] | ||||
|     filenames = [] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|  | ||||
|         self.monospaced = get_bool_opt(options, 'monospaced', True) | ||||
|         self.linenos = get_bool_opt(options, 'linenos', False) | ||||
|         self._lineno = 0 | ||||
|         self.wrap = get_int_opt(options, 'wrap', 0) | ||||
|         self._linelen = 0 | ||||
|  | ||||
|         self.styles = {} | ||||
|         self._make_styles() | ||||
|  | ||||
|  | ||||
|     def _make_styles(self): | ||||
|         regular = '\\f[CR]' if self.monospaced else '\\f[R]' | ||||
|         bold = '\\f[CB]' if self.monospaced else '\\f[B]' | ||||
|         italic = '\\f[CI]' if self.monospaced else '\\f[I]' | ||||
|  | ||||
|         for ttype, ndef in self.style: | ||||
|             start = end = '' | ||||
|             if ndef['color']: | ||||
|                 start += '\\m[%s]' % ndef['color'] | ||||
|                 end = '\\m[]' + end | ||||
|             if ndef['bold']: | ||||
|                 start += bold | ||||
|                 end = regular + end | ||||
|             if ndef['italic']: | ||||
|                 start += italic | ||||
|                 end = regular + end | ||||
|             if ndef['bgcolor']: | ||||
|                 start += '\\M[%s]' % ndef['bgcolor'] | ||||
|                 end = '\\M[]' + end | ||||
|  | ||||
|             self.styles[ttype] = start, end | ||||
|  | ||||
|  | ||||
|     def _define_colors(self, outfile): | ||||
|         colors = set() | ||||
|         for _, ndef in self.style: | ||||
|             if ndef['color'] is not None: | ||||
|                 colors.add(ndef['color']) | ||||
|  | ||||
|         for color in colors: | ||||
|             outfile.write('.defcolor ' + color + ' rgb #' + color + '\n') | ||||
|  | ||||
|  | ||||
|     def _write_lineno(self, outfile): | ||||
|         self._lineno += 1 | ||||
|         outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno)) | ||||
|  | ||||
|  | ||||
|     def _wrap_line(self, line): | ||||
|         length = len(line.rstrip('\n')) | ||||
|         space = '     ' if self.linenos else '' | ||||
|         newline = '' | ||||
|  | ||||
|         if length > self.wrap: | ||||
|             for i in range(0, math.floor(length / self.wrap)): | ||||
|                 chunk = line[i*self.wrap:i*self.wrap+self.wrap] | ||||
|                 newline += (chunk + '\n' + space) | ||||
|             remainder = length % self.wrap | ||||
|             if remainder > 0: | ||||
|                 newline += line[-remainder-1:] | ||||
|                 self._linelen = remainder | ||||
|         elif self._linelen + length > self.wrap: | ||||
|             newline = ('\n' + space) + line | ||||
|             self._linelen = length | ||||
|         else: | ||||
|             newline = line | ||||
|             self._linelen += length | ||||
|  | ||||
|         return newline | ||||
|  | ||||
|  | ||||
|     def _escape_chars(self, text): | ||||
|         text = text.replace('\\', '\\[u005C]'). \ | ||||
|                     replace('.', '\\[char46]'). \ | ||||
|                     replace('\'', '\\[u0027]'). \ | ||||
|                     replace('`', '\\[u0060]'). \ | ||||
|                     replace('~', '\\[u007E]') | ||||
|         copy = text | ||||
|  | ||||
|         for char in copy: | ||||
|             if len(char) != len(char.encode()): | ||||
|                 uni = char.encode('unicode_escape') \ | ||||
|                     .decode()[1:] \ | ||||
|                     .replace('x', 'u00') \ | ||||
|                     .upper() | ||||
|                 text = text.replace(char, '\\[u' + uni[1:] + ']') | ||||
|  | ||||
|         return text | ||||
|  | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         self._define_colors(outfile) | ||||
|  | ||||
|         outfile.write('.nf\n\\f[CR]\n') | ||||
|  | ||||
|         if self.linenos: | ||||
|             self._write_lineno(outfile) | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             while ttype not in self.styles: | ||||
|                 ttype = ttype.parent | ||||
|             start, end = self.styles[ttype] | ||||
|  | ||||
|             for line in value.splitlines(True): | ||||
|                 if self.wrap > 0: | ||||
|                     line = self._wrap_line(line) | ||||
|  | ||||
|                 if start and end: | ||||
|                     text = self._escape_chars(line.rstrip('\n')) | ||||
|                     if text != '': | ||||
|                         outfile.write(''.join((start, text, end))) | ||||
|                 else: | ||||
|                     outfile.write(self._escape_chars(line.rstrip('\n'))) | ||||
|  | ||||
|                 if line.endswith('\n'): | ||||
|                     if self.linenos: | ||||
|                         self._write_lineno(outfile) | ||||
|                         self._linelen = 0 | ||||
|                     else: | ||||
|                         outfile.write('\n') | ||||
|                         self._linelen = 0 | ||||
|  | ||||
|         outfile.write('\n.fi') | ||||
| @ -0,0 +1,989 @@ | ||||
| """ | ||||
|     pygments.formatters.html | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for HTML output. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import functools | ||||
| import os | ||||
| import sys | ||||
| import os.path | ||||
| from io import StringIO | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES | ||||
| from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt | ||||
|  | ||||
| try: | ||||
|     import ctags | ||||
| except ImportError: | ||||
|     ctags = None | ||||
|  | ||||
| __all__ = ['HtmlFormatter'] | ||||
|  | ||||
|  | ||||
| _escape_html_table = { | ||||
|     ord('&'): '&', | ||||
|     ord('<'): '<', | ||||
|     ord('>'): '>', | ||||
|     ord('"'): '"', | ||||
|     ord("'"): ''', | ||||
| } | ||||
|  | ||||
|  | ||||
| def escape_html(text, table=_escape_html_table): | ||||
|     """Escape &, <, > as well as single and double quotes for HTML.""" | ||||
|     return text.translate(table) | ||||
|  | ||||
|  | ||||
| def webify(color): | ||||
|     if color.startswith('calc') or color.startswith('var'): | ||||
|         return color | ||||
|     else: | ||||
|         return '#' + color | ||||
|  | ||||
|  | ||||
| def _get_ttype_class(ttype): | ||||
|     fname = STANDARD_TYPES.get(ttype) | ||||
|     if fname: | ||||
|         return fname | ||||
|     aname = '' | ||||
|     while fname is None: | ||||
|         aname = '-' + ttype[-1] + aname | ||||
|         ttype = ttype.parent | ||||
|         fname = STANDARD_TYPES.get(ttype) | ||||
|     return fname + aname | ||||
|  | ||||
|  | ||||
| CSSFILE_TEMPLATE = '''\ | ||||
| /* | ||||
| generated by Pygments <https://pygments.org/> | ||||
| Copyright 2006-2022 by the Pygments team. | ||||
| Licensed under the BSD license, see LICENSE for details. | ||||
| */ | ||||
| %(styledefs)s | ||||
| ''' | ||||
|  | ||||
| DOC_HEADER = '''\ | ||||
| <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" | ||||
|    "http://www.w3.org/TR/html4/strict.dtd"> | ||||
| <!-- | ||||
| generated by Pygments <https://pygments.org/> | ||||
| Copyright 2006-2022 by the Pygments team. | ||||
| Licensed under the BSD license, see LICENSE for details. | ||||
| --> | ||||
| <html> | ||||
| <head> | ||||
|   <title>%(title)s</title> | ||||
|   <meta http-equiv="content-type" content="text/html; charset=%(encoding)s"> | ||||
|   <style type="text/css"> | ||||
| ''' + CSSFILE_TEMPLATE + ''' | ||||
|   </style> | ||||
| </head> | ||||
| <body> | ||||
| <h2>%(title)s</h2> | ||||
|  | ||||
| ''' | ||||
|  | ||||
| DOC_HEADER_EXTERNALCSS = '''\ | ||||
| <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" | ||||
|    "http://www.w3.org/TR/html4/strict.dtd"> | ||||
|  | ||||
| <html> | ||||
| <head> | ||||
|   <title>%(title)s</title> | ||||
|   <meta http-equiv="content-type" content="text/html; charset=%(encoding)s"> | ||||
|   <link rel="stylesheet" href="%(cssfile)s" type="text/css"> | ||||
| </head> | ||||
| <body> | ||||
| <h2>%(title)s</h2> | ||||
|  | ||||
| ''' | ||||
|  | ||||
| DOC_FOOTER = '''\ | ||||
| </body> | ||||
| </html> | ||||
| ''' | ||||
|  | ||||
|  | ||||
| class HtmlFormatter(Formatter): | ||||
|     r""" | ||||
|     Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped | ||||
|     in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` | ||||
|     option. | ||||
|  | ||||
|     If the `linenos` option is set to ``"table"``, the ``<pre>`` is | ||||
|     additionally wrapped inside a ``<table>`` which has one row and two | ||||
|     cells: one containing the line numbers and one containing the code. | ||||
|     Example: | ||||
|  | ||||
|     .. sourcecode:: html | ||||
|  | ||||
|         <div class="highlight" > | ||||
|         <table><tr> | ||||
|           <td class="linenos" title="click to toggle" | ||||
|             onclick="with (this.firstChild.style) | ||||
|                      { display = (display == '') ? 'none' : '' }"> | ||||
|             <pre>1 | ||||
|             2</pre> | ||||
|           </td> | ||||
|           <td class="code"> | ||||
|             <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar): | ||||
|               <span class="Ke">pass</span> | ||||
|             </pre> | ||||
|           </td> | ||||
|         </tr></table></div> | ||||
|  | ||||
|     (whitespace added to improve clarity). | ||||
|  | ||||
|     Wrapping can be disabled using the `nowrap` option. | ||||
|  | ||||
|     A list of lines can be specified using the `hl_lines` option to make these | ||||
|     lines highlighted (as of Pygments 0.11). | ||||
|  | ||||
|     With the `full` option, a complete HTML 4 document is output, including | ||||
|     the style definitions inside a ``<style>`` tag, or in a separate file if | ||||
|     the `cssfile` option is given. | ||||
|  | ||||
|     When `tagsfile` is set to the path of a ctags index file, it is used to | ||||
|     generate hyperlinks from names to their definition.  You must enable | ||||
|     `lineanchors` and run ctags with the `-n` option for this to work.  The | ||||
|     `python-ctags` module from PyPI must be installed to use this feature; | ||||
|     otherwise a `RuntimeError` will be raised. | ||||
|  | ||||
|     The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string | ||||
|     containing CSS rules for the CSS classes used by the formatter. The | ||||
|     argument `arg` can be used to specify additional CSS selectors that | ||||
|     are prepended to the classes. A call `fmter.get_style_defs('td .code')` | ||||
|     would result in the following CSS classes: | ||||
|  | ||||
|     .. sourcecode:: css | ||||
|  | ||||
|         td .code .kw { font-weight: bold; color: #00FF00 } | ||||
|         td .code .cm { color: #999999 } | ||||
|         ... | ||||
|  | ||||
|     If you have Pygments 0.6 or higher, you can also pass a list or tuple to the | ||||
|     `get_style_defs()` method to request multiple prefixes for the tokens: | ||||
|  | ||||
|     .. sourcecode:: python | ||||
|  | ||||
|         formatter.get_style_defs(['div.syntax pre', 'pre.syntax']) | ||||
|  | ||||
|     The output would then look like this: | ||||
|  | ||||
|     .. sourcecode:: css | ||||
|  | ||||
|         div.syntax pre .kw, | ||||
|         pre.syntax .kw { font-weight: bold; color: #00FF00 } | ||||
|         div.syntax pre .cm, | ||||
|         pre.syntax .cm { color: #999999 } | ||||
|         ... | ||||
|  | ||||
|     Additional options accepted: | ||||
|  | ||||
|     `nowrap` | ||||
|         If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>`` | ||||
|         tag. This disables most other options (default: ``False``). | ||||
|  | ||||
|     `full` | ||||
|         Tells the formatter to output a "full" document, i.e. a complete | ||||
|         self-contained document (default: ``False``). | ||||
|  | ||||
|     `title` | ||||
|         If `full` is true, the title that should be used to caption the | ||||
|         document (default: ``''``). | ||||
|  | ||||
|     `style` | ||||
|         The style to use, can be a string or a Style subclass (default: | ||||
|         ``'default'``). This option has no effect if the `cssfile` | ||||
|         and `noclobber_cssfile` option are given and the file specified in | ||||
|         `cssfile` exists. | ||||
|  | ||||
|     `noclasses` | ||||
|         If set to true, token ``<span>`` tags (as well as line number elements) | ||||
|         will not use CSS classes, but inline styles. This is not recommended | ||||
|         for larger pieces of code since it increases output size by quite a bit | ||||
|         (default: ``False``). | ||||
|  | ||||
|     `classprefix` | ||||
|         Since the token types use relatively short class names, they may clash | ||||
|         with some of your own class names. In this case you can use the | ||||
|         `classprefix` option to give a string to prepend to all Pygments-generated | ||||
|         CSS class names for token types. | ||||
|         Note that this option also affects the output of `get_style_defs()`. | ||||
|  | ||||
|     `cssclass` | ||||
|         CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``). | ||||
|         If you set this option, the default selector for `get_style_defs()` | ||||
|         will be this class. | ||||
|  | ||||
|         .. versionadded:: 0.9 | ||||
|            If you select the ``'table'`` line numbers, the wrapping table will | ||||
|            have a CSS class of this string plus ``'table'``, the default is | ||||
|            accordingly ``'highlighttable'``. | ||||
|  | ||||
|     `cssstyles` | ||||
|         Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``). | ||||
|  | ||||
|     `prestyles` | ||||
|         Inline CSS styles for the ``<pre>`` tag (default: ``''``). | ||||
|  | ||||
|         .. versionadded:: 0.11 | ||||
|  | ||||
|     `cssfile` | ||||
|         If the `full` option is true and this option is given, it must be the | ||||
|         name of an external file. If the filename does not include an absolute | ||||
|         path, the file's path will be assumed to be relative to the main output | ||||
|         file's path, if the latter can be found. The stylesheet is then written | ||||
|         to this file instead of the HTML file. | ||||
|  | ||||
|         .. versionadded:: 0.6 | ||||
|  | ||||
|     `noclobber_cssfile` | ||||
|         If `cssfile` is given and the specified file exists, the css file will | ||||
|         not be overwritten. This allows the use of the `full` option in | ||||
|         combination with a user specified css file. Default is ``False``. | ||||
|  | ||||
|         .. versionadded:: 1.1 | ||||
|  | ||||
|     `linenos` | ||||
|         If set to ``'table'``, output line numbers as a table with two cells, | ||||
|         one containing the line numbers, the other the whole code.  This is | ||||
|         copy-and-paste-friendly, but may cause alignment problems with some | ||||
|         browsers or fonts.  If set to ``'inline'``, the line numbers will be | ||||
|         integrated in the ``<pre>`` tag that contains the code (that setting | ||||
|         is *new in Pygments 0.8*). | ||||
|  | ||||
|         For compatibility with Pygments 0.7 and earlier, every true value | ||||
|         except ``'inline'`` means the same as ``'table'`` (in particular, that | ||||
|         means also ``True``). | ||||
|  | ||||
|         The default value is ``False``, which means no line numbers at all. | ||||
|  | ||||
|         **Note:** with the default ("table") line number mechanism, the line | ||||
|         numbers and code can have different line heights in Internet Explorer | ||||
|         unless you give the enclosing ``<pre>`` tags an explicit ``line-height`` | ||||
|         CSS property (you get the default line spacing with ``line-height: | ||||
|         125%``). | ||||
|  | ||||
|     `hl_lines` | ||||
|         Specify a list of lines to be highlighted. The line numbers are always | ||||
|         relative to the input (i.e. the first line is line 1) and are | ||||
|         independent of `linenostart`. | ||||
|  | ||||
|         .. versionadded:: 0.11 | ||||
|  | ||||
|     `linenostart` | ||||
|         The line number for the first line (default: ``1``). | ||||
|  | ||||
|     `linenostep` | ||||
|         If set to a number n > 1, only every nth line number is printed. | ||||
|  | ||||
|     `linenospecial` | ||||
|         If set to a number n > 0, every nth line number is given the CSS | ||||
|         class ``"special"`` (default: ``0``). | ||||
|  | ||||
|     `nobackground` | ||||
|         If set to ``True``, the formatter won't output the background color | ||||
|         for the wrapping element (this automatically defaults to ``False`` | ||||
|         when there is no wrapping element [eg: no argument for the | ||||
|         `get_syntax_defs` method given]) (default: ``False``). | ||||
|  | ||||
|         .. versionadded:: 0.6 | ||||
|  | ||||
|     `lineseparator` | ||||
|         This string is output between lines of code. It defaults to ``"\n"``, | ||||
|         which is enough to break a line inside ``<pre>`` tags, but you can | ||||
|         e.g. set it to ``"<br>"`` to get HTML line breaks. | ||||
|  | ||||
|         .. versionadded:: 0.7 | ||||
|  | ||||
|     `lineanchors` | ||||
|         If set to a nonempty string, e.g. ``foo``, the formatter will wrap each | ||||
|         output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``. | ||||
|         This allows easy linking to certain lines. | ||||
|  | ||||
|         .. versionadded:: 0.9 | ||||
|  | ||||
|     `linespans` | ||||
|         If set to a nonempty string, e.g. ``foo``, the formatter will wrap each | ||||
|         output line in a span tag with an ``id`` of ``foo-linenumber``. | ||||
|         This allows easy access to lines via javascript. | ||||
|  | ||||
|         .. versionadded:: 1.6 | ||||
|  | ||||
|     `anchorlinenos` | ||||
|         If set to `True`, will wrap line numbers in <a> tags. Used in | ||||
|         combination with `linenos` and `lineanchors`. | ||||
|  | ||||
|     `tagsfile` | ||||
|         If set to the path of a ctags file, wrap names in anchor tags that | ||||
|         link to their definitions. `lineanchors` should be used, and the | ||||
|         tags file should specify line numbers (see the `-n` option to ctags). | ||||
|  | ||||
|         .. versionadded:: 1.6 | ||||
|  | ||||
|     `tagurlformat` | ||||
|         A string formatting pattern used to generate links to ctags definitions. | ||||
|         Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`. | ||||
|         Defaults to an empty string, resulting in just `#prefix-number` links. | ||||
|  | ||||
|         .. versionadded:: 1.6 | ||||
|  | ||||
|     `filename` | ||||
|         A string used to generate a filename when rendering ``<pre>`` blocks, | ||||
|         for example if displaying source code. If `linenos` is set to | ||||
|         ``'table'`` then the filename will be rendered in an initial row | ||||
|         containing a single `<th>` which spans both columns. | ||||
|  | ||||
|         .. versionadded:: 2.1 | ||||
|  | ||||
|     `wrapcode` | ||||
|         Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended | ||||
|         by the HTML5 specification. | ||||
|  | ||||
|         .. versionadded:: 2.4 | ||||
|  | ||||
|     `debug_token_types` | ||||
|         Add ``title`` attributes to all token ``<span>`` tags that show the | ||||
|         name of the token. | ||||
|  | ||||
|         .. versionadded:: 2.10 | ||||
|  | ||||
|  | ||||
|     **Subclassing the HTML formatter** | ||||
|  | ||||
|     .. versionadded:: 0.7 | ||||
|  | ||||
|     The HTML formatter is now built in a way that allows easy subclassing, thus | ||||
|     customizing the output HTML code. The `format()` method calls | ||||
|     `self._format_lines()` which returns a generator that yields tuples of ``(1, | ||||
|     line)``, where the ``1`` indicates that the ``line`` is a line of the | ||||
|     formatted source code. | ||||
|  | ||||
|     If the `nowrap` option is set, the generator is the iterated over and the | ||||
|     resulting HTML is output. | ||||
|  | ||||
|     Otherwise, `format()` calls `self.wrap()`, which wraps the generator with | ||||
|     other generators. These may add some HTML code to the one generated by | ||||
|     `_format_lines()`, either by modifying the lines generated by the latter, | ||||
|     then yielding them again with ``(1, line)``, and/or by yielding other HTML | ||||
|     code before or after the lines, with ``(0, html)``. The distinction between | ||||
|     source lines and other code makes it possible to wrap the generator multiple | ||||
|     times. | ||||
|  | ||||
|     The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag. | ||||
|  | ||||
|     A custom `HtmlFormatter` subclass could look like this: | ||||
|  | ||||
|     .. sourcecode:: python | ||||
|  | ||||
|         class CodeHtmlFormatter(HtmlFormatter): | ||||
|  | ||||
|             def wrap(self, source, *, include_div): | ||||
|                 return self._wrap_code(source) | ||||
|  | ||||
|             def _wrap_code(self, source): | ||||
|                 yield 0, '<code>' | ||||
|                 for i, t in source: | ||||
|                     if i == 1: | ||||
|                         # it's a line of formatted code | ||||
|                         t += '<br>' | ||||
|                     yield i, t | ||||
|                 yield 0, '</code>' | ||||
|  | ||||
|     This results in wrapping the formatted lines with a ``<code>`` tag, where the | ||||
|     source lines are broken using ``<br>`` tags. | ||||
|  | ||||
|     After calling `wrap()`, the `format()` method also adds the "line numbers" | ||||
|     and/or "full document" wrappers if the respective options are set. Then, all | ||||
|     HTML yielded by the wrapped generator is output. | ||||
|     """ | ||||
|  | ||||
|     name = 'HTML' | ||||
|     aliases = ['html'] | ||||
|     filenames = ['*.html', '*.htm'] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         self.title = self._decodeifneeded(self.title) | ||||
|         self.nowrap = get_bool_opt(options, 'nowrap', False) | ||||
|         self.noclasses = get_bool_opt(options, 'noclasses', False) | ||||
|         self.classprefix = options.get('classprefix', '') | ||||
|         self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight')) | ||||
|         self.cssstyles = self._decodeifneeded(options.get('cssstyles', '')) | ||||
|         self.prestyles = self._decodeifneeded(options.get('prestyles', '')) | ||||
|         self.cssfile = self._decodeifneeded(options.get('cssfile', '')) | ||||
|         self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False) | ||||
|         self.tagsfile = self._decodeifneeded(options.get('tagsfile', '')) | ||||
|         self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', '')) | ||||
|         self.filename = self._decodeifneeded(options.get('filename', '')) | ||||
|         self.wrapcode = get_bool_opt(options, 'wrapcode', False) | ||||
|         self.span_element_openers = {} | ||||
|         self.debug_token_types = get_bool_opt(options, 'debug_token_types', False) | ||||
|  | ||||
|         if self.tagsfile: | ||||
|             if not ctags: | ||||
|                 raise RuntimeError('The "ctags" package must to be installed ' | ||||
|                                    'to be able to use the "tagsfile" feature.') | ||||
|             self._ctags = ctags.CTags(self.tagsfile) | ||||
|  | ||||
|         linenos = options.get('linenos', False) | ||||
|         if linenos == 'inline': | ||||
|             self.linenos = 2 | ||||
|         elif linenos: | ||||
|             # compatibility with <= 0.7 | ||||
|             self.linenos = 1 | ||||
|         else: | ||||
|             self.linenos = 0 | ||||
|         self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) | ||||
|         self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) | ||||
|         self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0)) | ||||
|         self.nobackground = get_bool_opt(options, 'nobackground', False) | ||||
|         self.lineseparator = options.get('lineseparator', '\n') | ||||
|         self.lineanchors = options.get('lineanchors', '') | ||||
|         self.linespans = options.get('linespans', '') | ||||
|         self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False) | ||||
|         self.hl_lines = set() | ||||
|         for lineno in get_list_opt(options, 'hl_lines', []): | ||||
|             try: | ||||
|                 self.hl_lines.add(int(lineno)) | ||||
|             except ValueError: | ||||
|                 pass | ||||
|  | ||||
|         self._create_stylesheet() | ||||
|  | ||||
|     def _get_css_class(self, ttype): | ||||
|         """Return the css class of this token type prefixed with | ||||
|         the classprefix option.""" | ||||
|         ttypeclass = _get_ttype_class(ttype) | ||||
|         if ttypeclass: | ||||
|             return self.classprefix + ttypeclass | ||||
|         return '' | ||||
|  | ||||
|     def _get_css_classes(self, ttype): | ||||
|         """Return the CSS classes of this token type prefixed with the classprefix option.""" | ||||
|         cls = self._get_css_class(ttype) | ||||
|         while ttype not in STANDARD_TYPES: | ||||
|             ttype = ttype.parent | ||||
|             cls = self._get_css_class(ttype) + ' ' + cls | ||||
|         return cls or '' | ||||
|  | ||||
|     def _get_css_inline_styles(self, ttype): | ||||
|         """Return the inline CSS styles for this token type.""" | ||||
|         cclass = self.ttype2class.get(ttype) | ||||
|         while cclass is None: | ||||
|             ttype = ttype.parent | ||||
|             cclass = self.ttype2class.get(ttype) | ||||
|         return cclass or '' | ||||
|  | ||||
|     def _create_stylesheet(self): | ||||
|         t2c = self.ttype2class = {Token: ''} | ||||
|         c2s = self.class2style = {} | ||||
|         for ttype, ndef in self.style: | ||||
|             name = self._get_css_class(ttype) | ||||
|             style = '' | ||||
|             if ndef['color']: | ||||
|                 style += 'color: %s; ' % webify(ndef['color']) | ||||
|             if ndef['bold']: | ||||
|                 style += 'font-weight: bold; ' | ||||
|             if ndef['italic']: | ||||
|                 style += 'font-style: italic; ' | ||||
|             if ndef['underline']: | ||||
|                 style += 'text-decoration: underline; ' | ||||
|             if ndef['bgcolor']: | ||||
|                 style += 'background-color: %s; ' % webify(ndef['bgcolor']) | ||||
|             if ndef['border']: | ||||
|                 style += 'border: 1px solid %s; ' % webify(ndef['border']) | ||||
|             if style: | ||||
|                 t2c[ttype] = name | ||||
|                 # save len(ttype) to enable ordering the styles by | ||||
|                 # hierarchy (necessary for CSS cascading rules!) | ||||
|                 c2s[name] = (style[:-2], ttype, len(ttype)) | ||||
|  | ||||
|     def get_style_defs(self, arg=None): | ||||
|         """ | ||||
|         Return CSS style definitions for the classes produced by the current | ||||
|         highlighting style. ``arg`` can be a string or list of selectors to | ||||
|         insert before the token type classes. | ||||
|         """ | ||||
|         style_lines = [] | ||||
|  | ||||
|         style_lines.extend(self.get_linenos_style_defs()) | ||||
|         style_lines.extend(self.get_background_style_defs(arg)) | ||||
|         style_lines.extend(self.get_token_style_defs(arg)) | ||||
|  | ||||
|         return '\n'.join(style_lines) | ||||
|  | ||||
|     def get_token_style_defs(self, arg=None): | ||||
|         prefix = self.get_css_prefix(arg) | ||||
|  | ||||
|         styles = [ | ||||
|             (level, ttype, cls, style) | ||||
|             for cls, (style, ttype, level) in self.class2style.items() | ||||
|             if cls and style | ||||
|         ] | ||||
|         styles.sort() | ||||
|  | ||||
|         lines = [ | ||||
|             '%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:]) | ||||
|             for (level, ttype, cls, style) in styles | ||||
|         ] | ||||
|  | ||||
|         return lines | ||||
|  | ||||
|     def get_background_style_defs(self, arg=None): | ||||
|         prefix = self.get_css_prefix(arg) | ||||
|         bg_color = self.style.background_color | ||||
|         hl_color = self.style.highlight_color | ||||
|  | ||||
|         lines = [] | ||||
|  | ||||
|         if arg and not self.nobackground and bg_color is not None: | ||||
|             text_style = '' | ||||
|             if Text in self.ttype2class: | ||||
|                 text_style = ' ' + self.class2style[self.ttype2class[Text]][0] | ||||
|             lines.insert( | ||||
|                 0, '%s{ background: %s;%s }' % ( | ||||
|                     prefix(''), bg_color, text_style | ||||
|                 ) | ||||
|             ) | ||||
|         if hl_color is not None: | ||||
|             lines.insert( | ||||
|                 0, '%s { background-color: %s }' % (prefix('hll'), hl_color) | ||||
|             ) | ||||
|  | ||||
|         return lines | ||||
|  | ||||
|     def get_linenos_style_defs(self): | ||||
|         lines = [ | ||||
|             'pre { %s }' % self._pre_style, | ||||
|             'td.linenos .normal { %s }' % self._linenos_style, | ||||
|             'span.linenos { %s }' % self._linenos_style, | ||||
|             'td.linenos .special { %s }' % self._linenos_special_style, | ||||
|             'span.linenos.special { %s }' % self._linenos_special_style, | ||||
|         ] | ||||
|  | ||||
|         return lines | ||||
|  | ||||
|     def get_css_prefix(self, arg): | ||||
|         if arg is None: | ||||
|             arg = ('cssclass' in self.options and '.'+self.cssclass or '') | ||||
|         if isinstance(arg, str): | ||||
|             args = [arg] | ||||
|         else: | ||||
|             args = list(arg) | ||||
|  | ||||
|         def prefix(cls): | ||||
|             if cls: | ||||
|                 cls = '.' + cls | ||||
|             tmp = [] | ||||
|             for arg in args: | ||||
|                 tmp.append((arg and arg + ' ' or '') + cls) | ||||
|             return ', '.join(tmp) | ||||
|  | ||||
|         return prefix | ||||
|  | ||||
|     @property | ||||
|     def _pre_style(self): | ||||
|         return 'line-height: 125%;' | ||||
|  | ||||
|     @property | ||||
|     def _linenos_style(self): | ||||
|         return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % ( | ||||
|             self.style.line_number_color, | ||||
|             self.style.line_number_background_color | ||||
|         ) | ||||
|  | ||||
|     @property | ||||
|     def _linenos_special_style(self): | ||||
|         return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % ( | ||||
|             self.style.line_number_special_color, | ||||
|             self.style.line_number_special_background_color | ||||
|         ) | ||||
|  | ||||
|     def _decodeifneeded(self, value): | ||||
|         if isinstance(value, bytes): | ||||
|             if self.encoding: | ||||
|                 return value.decode(self.encoding) | ||||
|             return value.decode() | ||||
|         return value | ||||
|  | ||||
|     def _wrap_full(self, inner, outfile): | ||||
|         if self.cssfile: | ||||
|             if os.path.isabs(self.cssfile): | ||||
|                 # it's an absolute filename | ||||
|                 cssfilename = self.cssfile | ||||
|             else: | ||||
|                 try: | ||||
|                     filename = outfile.name | ||||
|                     if not filename or filename[0] == '<': | ||||
|                         # pseudo files, e.g. name == '<fdopen>' | ||||
|                         raise AttributeError | ||||
|                     cssfilename = os.path.join(os.path.dirname(filename), | ||||
|                                                self.cssfile) | ||||
|                 except AttributeError: | ||||
|                     print('Note: Cannot determine output file name, ' | ||||
|                           'using current directory as base for the CSS file name', | ||||
|                           file=sys.stderr) | ||||
|                     cssfilename = self.cssfile | ||||
|             # write CSS file only if noclobber_cssfile isn't given as an option. | ||||
|             try: | ||||
|                 if not os.path.exists(cssfilename) or not self.noclobber_cssfile: | ||||
|                     with open(cssfilename, "w") as cf: | ||||
|                         cf.write(CSSFILE_TEMPLATE % | ||||
|                                  {'styledefs': self.get_style_defs('body')}) | ||||
|             except OSError as err: | ||||
|                 err.strerror = 'Error writing CSS file: ' + err.strerror | ||||
|                 raise | ||||
|  | ||||
|             yield 0, (DOC_HEADER_EXTERNALCSS % | ||||
|                       dict(title=self.title, | ||||
|                            cssfile=self.cssfile, | ||||
|                            encoding=self.encoding)) | ||||
|         else: | ||||
|             yield 0, (DOC_HEADER % | ||||
|                       dict(title=self.title, | ||||
|                            styledefs=self.get_style_defs('body'), | ||||
|                            encoding=self.encoding)) | ||||
|  | ||||
|         yield from inner | ||||
|         yield 0, DOC_FOOTER | ||||
|  | ||||
|     def _wrap_tablelinenos(self, inner): | ||||
|         dummyoutfile = StringIO() | ||||
|         lncount = 0 | ||||
|         for t, line in inner: | ||||
|             if t: | ||||
|                 lncount += 1 | ||||
|             dummyoutfile.write(line) | ||||
|  | ||||
|         fl = self.linenostart | ||||
|         mw = len(str(lncount + fl - 1)) | ||||
|         sp = self.linenospecial | ||||
|         st = self.linenostep | ||||
|         anchor_name = self.lineanchors or self.linespans | ||||
|         aln = self.anchorlinenos | ||||
|         nocls = self.noclasses | ||||
|  | ||||
|         lines = [] | ||||
|  | ||||
|         for i in range(fl, fl+lncount): | ||||
|             print_line = i % st == 0 | ||||
|             special_line = sp and i % sp == 0 | ||||
|  | ||||
|             if print_line: | ||||
|                 line = '%*d' % (mw, i) | ||||
|                 if aln: | ||||
|                     line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line) | ||||
|             else: | ||||
|                 line = ' ' * mw | ||||
|  | ||||
|             if nocls: | ||||
|                 if special_line: | ||||
|                     style = ' style="%s"' % self._linenos_special_style | ||||
|                 else: | ||||
|                     style = ' style="%s"' % self._linenos_style | ||||
|             else: | ||||
|                 if special_line: | ||||
|                     style = ' class="special"' | ||||
|                 else: | ||||
|                     style = ' class="normal"' | ||||
|  | ||||
|             if style: | ||||
|                 line = '<span%s>%s</span>' % (style, line) | ||||
|  | ||||
|             lines.append(line) | ||||
|  | ||||
|         ls = '\n'.join(lines) | ||||
|  | ||||
|         # If a filename was specified, we can't put it into the code table as it | ||||
|         # would misalign the line numbers. Hence we emit a separate row for it. | ||||
|         filename_tr = "" | ||||
|         if self.filename: | ||||
|             filename_tr = ( | ||||
|                 '<tr><th colspan="2" class="filename">' | ||||
|                 '<span class="filename">' + self.filename + '</span>' | ||||
|                 '</th></tr>') | ||||
|  | ||||
|         # in case you wonder about the seemingly redundant <div> here: since the | ||||
|         # content in the other cell also is wrapped in a div, some browsers in | ||||
|         # some configurations seem to mess up the formatting... | ||||
|         yield 0, (f'<table class="{self.cssclass}table">' + filename_tr + | ||||
|             '<tr><td class="linenos"><div class="linenodiv"><pre>' + | ||||
|             ls + '</pre></div></td><td class="code">') | ||||
|         yield 0, '<div>' | ||||
|         yield 0, dummyoutfile.getvalue() | ||||
|         yield 0, '</div>' | ||||
|         yield 0, '</td></tr></table>' | ||||
|          | ||||
|  | ||||
|     def _wrap_inlinelinenos(self, inner): | ||||
|         # need a list of lines since we need the width of a single number :( | ||||
|         inner_lines = list(inner) | ||||
|         sp = self.linenospecial | ||||
|         st = self.linenostep | ||||
|         num = self.linenostart | ||||
|         mw = len(str(len(inner_lines) + num - 1)) | ||||
|         anchor_name = self.lineanchors or self.linespans | ||||
|         aln = self.anchorlinenos | ||||
|         nocls = self.noclasses | ||||
|  | ||||
|         for _, inner_line in inner_lines: | ||||
|             print_line = num % st == 0 | ||||
|             special_line = sp and num % sp == 0 | ||||
|  | ||||
|             if print_line: | ||||
|                 line = '%*d' % (mw, num) | ||||
|             else: | ||||
|                 line = ' ' * mw | ||||
|  | ||||
|             if nocls: | ||||
|                 if special_line: | ||||
|                     style = ' style="%s"' % self._linenos_special_style | ||||
|                 else: | ||||
|                     style = ' style="%s"' % self._linenos_style | ||||
|             else: | ||||
|                 if special_line: | ||||
|                     style = ' class="linenos special"' | ||||
|                 else: | ||||
|                     style = ' class="linenos"' | ||||
|  | ||||
|             if style: | ||||
|                 linenos = '<span%s>%s</span>' % (style, line) | ||||
|             else: | ||||
|                 linenos = line | ||||
|  | ||||
|             if aln: | ||||
|                 yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) + | ||||
|                           inner_line) | ||||
|             else: | ||||
|                 yield 1, linenos + inner_line | ||||
|             num += 1 | ||||
|  | ||||
|     def _wrap_lineanchors(self, inner): | ||||
|         s = self.lineanchors | ||||
|         # subtract 1 since we have to increment i *before* yielding | ||||
|         i = self.linenostart - 1 | ||||
|         for t, line in inner: | ||||
|             if t: | ||||
|                 i += 1 | ||||
|                 href = "" if self.linenos else ' href="#%s-%d"' % (s, i) | ||||
|                 yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line | ||||
|             else: | ||||
|                 yield 0, line | ||||
|  | ||||
|     def _wrap_linespans(self, inner): | ||||
|         s = self.linespans | ||||
|         i = self.linenostart - 1 | ||||
|         for t, line in inner: | ||||
|             if t: | ||||
|                 i += 1 | ||||
|                 yield 1, '<span id="%s-%d">%s</span>' % (s, i, line) | ||||
|             else: | ||||
|                 yield 0, line | ||||
|  | ||||
|     def _wrap_div(self, inner): | ||||
|         style = [] | ||||
|         if (self.noclasses and not self.nobackground and | ||||
|                 self.style.background_color is not None): | ||||
|             style.append('background: %s' % (self.style.background_color,)) | ||||
|         if self.cssstyles: | ||||
|             style.append(self.cssstyles) | ||||
|         style = '; '.join(style) | ||||
|  | ||||
|         yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) + | ||||
|                   (style and (' style="%s"' % style)) + '>') | ||||
|         yield from inner | ||||
|         yield 0, '</div>\n' | ||||
|  | ||||
|     def _wrap_pre(self, inner): | ||||
|         style = [] | ||||
|         if self.prestyles: | ||||
|             style.append(self.prestyles) | ||||
|         if self.noclasses: | ||||
|             style.append(self._pre_style) | ||||
|         style = '; '.join(style) | ||||
|  | ||||
|         if self.filename and self.linenos != 1: | ||||
|             yield 0, ('<span class="filename">' + self.filename + '</span>') | ||||
|  | ||||
|         # the empty span here is to keep leading empty lines from being | ||||
|         # ignored by HTML parsers | ||||
|         yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>') | ||||
|         yield from inner | ||||
|         yield 0, '</pre>' | ||||
|  | ||||
|     def _wrap_code(self, inner): | ||||
|         yield 0, '<code>' | ||||
|         yield from inner | ||||
|         yield 0, '</code>' | ||||
|  | ||||
|     @functools.lru_cache(maxsize=100) | ||||
|     def _translate_parts(self, value): | ||||
|         """HTML-escape a value and split it by newlines.""" | ||||
|         return value.translate(_escape_html_table).split('\n') | ||||
|  | ||||
|     def _format_lines(self, tokensource): | ||||
|         """ | ||||
|         Just format the tokens, without any wrapping tags. | ||||
|         Yield individual lines. | ||||
|         """ | ||||
|         nocls = self.noclasses | ||||
|         lsep = self.lineseparator | ||||
|         tagsfile = self.tagsfile | ||||
|  | ||||
|         lspan = '' | ||||
|         line = [] | ||||
|         for ttype, value in tokensource: | ||||
|             try: | ||||
|                 cspan = self.span_element_openers[ttype] | ||||
|             except KeyError: | ||||
|                 title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else '' | ||||
|                 if nocls: | ||||
|                     css_style = self._get_css_inline_styles(ttype) | ||||
|                     if css_style: | ||||
|                         css_style = self.class2style[css_style][0] | ||||
|                         cspan = '<span style="%s"%s>' % (css_style, title) | ||||
|                     else: | ||||
|                         cspan = '' | ||||
|                 else: | ||||
|                     css_class = self._get_css_classes(ttype) | ||||
|                     if css_class: | ||||
|                         cspan = '<span class="%s"%s>' % (css_class, title) | ||||
|                     else: | ||||
|                         cspan = '' | ||||
|                 self.span_element_openers[ttype] = cspan | ||||
|  | ||||
|             parts = self._translate_parts(value) | ||||
|  | ||||
|             if tagsfile and ttype in Token.Name: | ||||
|                 filename, linenumber = self._lookup_ctag(value) | ||||
|                 if linenumber: | ||||
|                     base, filename = os.path.split(filename) | ||||
|                     if base: | ||||
|                         base += '/' | ||||
|                     filename, extension = os.path.splitext(filename) | ||||
|                     url = self.tagurlformat % {'path': base, 'fname': filename, | ||||
|                                                'fext': extension} | ||||
|                     parts[0] = "<a href=\"%s#%s-%d\">%s" % \ | ||||
|                         (url, self.lineanchors, linenumber, parts[0]) | ||||
|                     parts[-1] = parts[-1] + "</a>" | ||||
|  | ||||
|             # for all but the last line | ||||
|             for part in parts[:-1]: | ||||
|                 if line: | ||||
|                     if lspan != cspan: | ||||
|                         line.extend(((lspan and '</span>'), cspan, part, | ||||
|                                      (cspan and '</span>'), lsep)) | ||||
|                     else:  # both are the same | ||||
|                         line.extend((part, (lspan and '</span>'), lsep)) | ||||
|                     yield 1, ''.join(line) | ||||
|                     line = [] | ||||
|                 elif part: | ||||
|                     yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) | ||||
|                 else: | ||||
|                     yield 1, lsep | ||||
|             # for the last line | ||||
|             if line and parts[-1]: | ||||
|                 if lspan != cspan: | ||||
|                     line.extend(((lspan and '</span>'), cspan, parts[-1])) | ||||
|                     lspan = cspan | ||||
|                 else: | ||||
|                     line.append(parts[-1]) | ||||
|             elif parts[-1]: | ||||
|                 line = [cspan, parts[-1]] | ||||
|                 lspan = cspan | ||||
|             # else we neither have to open a new span nor set lspan | ||||
|  | ||||
|         if line: | ||||
|             line.extend(((lspan and '</span>'), lsep)) | ||||
|             yield 1, ''.join(line) | ||||
|  | ||||
|     def _lookup_ctag(self, token): | ||||
|         entry = ctags.TagEntry() | ||||
|         if self._ctags.find(entry, token.encode(), 0): | ||||
|             return entry['file'], entry['lineNumber'] | ||||
|         else: | ||||
|             return None, None | ||||
|  | ||||
|     def _highlight_lines(self, tokensource): | ||||
|         """ | ||||
|         Highlighted the lines specified in the `hl_lines` option by | ||||
|         post-processing the token stream coming from `_format_lines`. | ||||
|         """ | ||||
|         hls = self.hl_lines | ||||
|  | ||||
|         for i, (t, value) in enumerate(tokensource): | ||||
|             if t != 1: | ||||
|                 yield t, value | ||||
|             if i + 1 in hls:  # i + 1 because Python indexes start at 0 | ||||
|                 if self.noclasses: | ||||
|                     style = '' | ||||
|                     if self.style.highlight_color is not None: | ||||
|                         style = (' style="background-color: %s"' % | ||||
|                                  (self.style.highlight_color,)) | ||||
|                     yield 1, '<span%s>%s</span>' % (style, value) | ||||
|                 else: | ||||
|                     yield 1, '<span class="hll">%s</span>' % value | ||||
|             else: | ||||
|                 yield 1, value | ||||
|  | ||||
|     def wrap(self, source): | ||||
|         """ | ||||
|         Wrap the ``source``, which is a generator yielding | ||||
|         individual lines, in custom generators. See docstring | ||||
|         for `format`. Can be overridden. | ||||
|         """ | ||||
|  | ||||
|         output = source | ||||
|         if self.wrapcode: | ||||
|             output = self._wrap_code(output) | ||||
|          | ||||
|         output = self._wrap_pre(output) | ||||
|      | ||||
|         return output | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         """ | ||||
|         The formatting process uses several nested generators; which of | ||||
|         them are used is determined by the user's options. | ||||
|  | ||||
|         Each generator should take at least one argument, ``inner``, | ||||
|         and wrap the pieces of text generated by this. | ||||
|  | ||||
|         Always yield 2-tuples: (code, text). If "code" is 1, the text | ||||
|         is part of the original tokensource being highlighted, if it's | ||||
|         0, the text is some piece of wrapping. This makes it possible to | ||||
|         use several different wrappers that process the original source | ||||
|         linewise, e.g. line number generators. | ||||
|         """ | ||||
|         source = self._format_lines(tokensource) | ||||
|  | ||||
|         # As a special case, we wrap line numbers before line highlighting | ||||
|         # so the line numbers get wrapped in the highlighting tag. | ||||
|         if not self.nowrap and self.linenos == 2: | ||||
|             source = self._wrap_inlinelinenos(source) | ||||
|  | ||||
|         if self.hl_lines: | ||||
|             source = self._highlight_lines(source) | ||||
|  | ||||
|         if not self.nowrap: | ||||
|             if self.lineanchors: | ||||
|                 source = self._wrap_lineanchors(source) | ||||
|             if self.linespans: | ||||
|                 source = self._wrap_linespans(source) | ||||
|             source = self.wrap(source) | ||||
|             if self.linenos == 1: | ||||
|                 source = self._wrap_tablelinenos(source) | ||||
|             source = self._wrap_div(source) | ||||
|             if self.full: | ||||
|                 source = self._wrap_full(source, outfile) | ||||
|  | ||||
|         for t, piece in source: | ||||
|             outfile.write(piece) | ||||
| @ -0,0 +1,645 @@ | ||||
| """ | ||||
|     pygments.formatters.img | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for Pixmap output. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import os | ||||
| import sys | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ | ||||
|     get_choice_opt | ||||
|  | ||||
| import subprocess | ||||
|  | ||||
| # Import this carefully | ||||
| try: | ||||
|     from PIL import Image, ImageDraw, ImageFont | ||||
|     pil_available = True | ||||
| except ImportError: | ||||
|     pil_available = False | ||||
|  | ||||
| try: | ||||
|     import _winreg | ||||
| except ImportError: | ||||
|     try: | ||||
|         import winreg as _winreg | ||||
|     except ImportError: | ||||
|         _winreg = None | ||||
|  | ||||
| __all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter', | ||||
|            'BmpImageFormatter'] | ||||
|  | ||||
|  | ||||
| # For some unknown reason every font calls it something different | ||||
| STYLES = { | ||||
|     'NORMAL':     ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'], | ||||
|     'ITALIC':     ['Oblique', 'Italic'], | ||||
|     'BOLD':       ['Bold'], | ||||
|     'BOLDITALIC': ['Bold Oblique', 'Bold Italic'], | ||||
| } | ||||
|  | ||||
| # A sane default for modern systems | ||||
| DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono' | ||||
| DEFAULT_FONT_NAME_WIN = 'Courier New' | ||||
| DEFAULT_FONT_NAME_MAC = 'Menlo' | ||||
|  | ||||
|  | ||||
| class PilNotAvailable(ImportError): | ||||
|     """When Python imaging library is not available""" | ||||
|  | ||||
|  | ||||
| class FontNotFound(Exception): | ||||
|     """When there are no usable fonts specified""" | ||||
|  | ||||
|  | ||||
| class FontManager: | ||||
|     """ | ||||
|     Manages a set of fonts: normal, italic, bold, etc... | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, font_name, font_size=14): | ||||
|         self.font_name = font_name | ||||
|         self.font_size = font_size | ||||
|         self.fonts = {} | ||||
|         self.encoding = None | ||||
|         if sys.platform.startswith('win'): | ||||
|             if not font_name: | ||||
|                 self.font_name = DEFAULT_FONT_NAME_WIN | ||||
|             self._create_win() | ||||
|         elif sys.platform.startswith('darwin'): | ||||
|             if not font_name: | ||||
|                 self.font_name = DEFAULT_FONT_NAME_MAC | ||||
|             self._create_mac() | ||||
|         else: | ||||
|             if not font_name: | ||||
|                 self.font_name = DEFAULT_FONT_NAME_NIX | ||||
|             self._create_nix() | ||||
|  | ||||
|     def _get_nix_font_path(self, name, style): | ||||
|         proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'], | ||||
|                                 stdout=subprocess.PIPE, stderr=None) | ||||
|         stdout, _ = proc.communicate() | ||||
|         if proc.returncode == 0: | ||||
|             lines = stdout.splitlines() | ||||
|             for line in lines: | ||||
|                 if line.startswith(b'Fontconfig warning:'): | ||||
|                     continue | ||||
|                 path = line.decode().strip().strip(':') | ||||
|                 if path: | ||||
|                     return path | ||||
|             return None | ||||
|  | ||||
|     def _create_nix(self): | ||||
|         for name in STYLES['NORMAL']: | ||||
|             path = self._get_nix_font_path(self.font_name, name) | ||||
|             if path is not None: | ||||
|                 self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) | ||||
|                 break | ||||
|         else: | ||||
|             raise FontNotFound('No usable fonts named: "%s"' % | ||||
|                                self.font_name) | ||||
|         for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): | ||||
|             for stylename in STYLES[style]: | ||||
|                 path = self._get_nix_font_path(self.font_name, stylename) | ||||
|                 if path is not None: | ||||
|                     self.fonts[style] = ImageFont.truetype(path, self.font_size) | ||||
|                     break | ||||
|             else: | ||||
|                 if style == 'BOLDITALIC': | ||||
|                     self.fonts[style] = self.fonts['BOLD'] | ||||
|                 else: | ||||
|                     self.fonts[style] = self.fonts['NORMAL'] | ||||
|  | ||||
|     def _get_mac_font_path(self, font_map, name, style): | ||||
|         return font_map.get((name + ' ' + style).strip().lower()) | ||||
|  | ||||
|     def _create_mac(self): | ||||
|         font_map = {} | ||||
|         for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'), | ||||
|                          '/Library/Fonts/', '/System/Library/Fonts/'): | ||||
|             font_map.update( | ||||
|                 (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f)) | ||||
|                 for f in os.listdir(font_dir) | ||||
|                 if f.lower().endswith(('ttf', 'ttc'))) | ||||
|  | ||||
|         for name in STYLES['NORMAL']: | ||||
|             path = self._get_mac_font_path(font_map, self.font_name, name) | ||||
|             if path is not None: | ||||
|                 self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) | ||||
|                 break | ||||
|         else: | ||||
|             raise FontNotFound('No usable fonts named: "%s"' % | ||||
|                                self.font_name) | ||||
|         for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): | ||||
|             for stylename in STYLES[style]: | ||||
|                 path = self._get_mac_font_path(font_map, self.font_name, stylename) | ||||
|                 if path is not None: | ||||
|                     self.fonts[style] = ImageFont.truetype(path, self.font_size) | ||||
|                     break | ||||
|             else: | ||||
|                 if style == 'BOLDITALIC': | ||||
|                     self.fonts[style] = self.fonts['BOLD'] | ||||
|                 else: | ||||
|                     self.fonts[style] = self.fonts['NORMAL'] | ||||
|  | ||||
|     def _lookup_win(self, key, basename, styles, fail=False): | ||||
|         for suffix in ('', ' (TrueType)'): | ||||
|             for style in styles: | ||||
|                 try: | ||||
|                     valname = '%s%s%s' % (basename, style and ' '+style, suffix) | ||||
|                     val, _ = _winreg.QueryValueEx(key, valname) | ||||
|                     return val | ||||
|                 except OSError: | ||||
|                     continue | ||||
|         else: | ||||
|             if fail: | ||||
|                 raise FontNotFound('Font %s (%s) not found in registry' % | ||||
|                                    (basename, styles[0])) | ||||
|             return None | ||||
|  | ||||
|     def _create_win(self): | ||||
|         lookuperror = None | ||||
|         keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'), | ||||
|                      (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'), | ||||
|                      (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'), | ||||
|                      (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ] | ||||
|         for keyname in keynames: | ||||
|             try: | ||||
|                 key = _winreg.OpenKey(*keyname) | ||||
|                 try: | ||||
|                     path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True) | ||||
|                     self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) | ||||
|                     for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): | ||||
|                         path = self._lookup_win(key, self.font_name, STYLES[style]) | ||||
|                         if path: | ||||
|                             self.fonts[style] = ImageFont.truetype(path, self.font_size) | ||||
|                         else: | ||||
|                             if style == 'BOLDITALIC': | ||||
|                                 self.fonts[style] = self.fonts['BOLD'] | ||||
|                             else: | ||||
|                                 self.fonts[style] = self.fonts['NORMAL'] | ||||
|                     return | ||||
|                 except FontNotFound as err: | ||||
|                     lookuperror = err | ||||
|                 finally: | ||||
|                     _winreg.CloseKey(key) | ||||
|             except OSError: | ||||
|                 pass | ||||
|         else: | ||||
|             # If we get here, we checked all registry keys and had no luck | ||||
|             # We can be in one of two situations now: | ||||
|             # * All key lookups failed. In this case lookuperror is None and we | ||||
|             #   will raise a generic error | ||||
|             # * At least one lookup failed with a FontNotFound error. In this | ||||
|             #   case, we will raise that as a more specific error | ||||
|             if lookuperror: | ||||
|                 raise lookuperror | ||||
|             raise FontNotFound('Can\'t open Windows font registry key') | ||||
|  | ||||
|     def get_char_size(self): | ||||
|         """ | ||||
|         Get the character size. | ||||
|         """ | ||||
|         return self.get_text_size('M') | ||||
|  | ||||
|     def get_text_size(self, text): | ||||
|         """ | ||||
|         Get the text size (width, height). | ||||
|         """ | ||||
|         font = self.fonts['NORMAL'] | ||||
|         if hasattr(font, 'getbbox'):  # Pillow >= 9.2.0 | ||||
|             return font.getbbox(text)[2:4] | ||||
|         else: | ||||
|             return font.getsize(text) | ||||
|  | ||||
|     def get_font(self, bold, oblique): | ||||
|         """ | ||||
|         Get the font based on bold and italic flags. | ||||
|         """ | ||||
|         if bold and oblique: | ||||
|             return self.fonts['BOLDITALIC'] | ||||
|         elif bold: | ||||
|             return self.fonts['BOLD'] | ||||
|         elif oblique: | ||||
|             return self.fonts['ITALIC'] | ||||
|         else: | ||||
|             return self.fonts['NORMAL'] | ||||
|  | ||||
|  | ||||
| class ImageFormatter(Formatter): | ||||
|     """ | ||||
|     Create a PNG image from source code. This uses the Python Imaging Library to | ||||
|     generate a pixmap from the source code. | ||||
|  | ||||
|     .. versionadded:: 0.10 | ||||
|  | ||||
|     Additional options accepted: | ||||
|  | ||||
|     `image_format` | ||||
|         An image format to output to that is recognised by PIL, these include: | ||||
|  | ||||
|         * "PNG" (default) | ||||
|         * "JPEG" | ||||
|         * "BMP" | ||||
|         * "GIF" | ||||
|  | ||||
|     `line_pad` | ||||
|         The extra spacing (in pixels) between each line of text. | ||||
|  | ||||
|         Default: 2 | ||||
|  | ||||
|     `font_name` | ||||
|         The font name to be used as the base font from which others, such as | ||||
|         bold and italic fonts will be generated.  This really should be a | ||||
|         monospace font to look sane. | ||||
|  | ||||
|         Default: "Courier New" on Windows, "Menlo" on Mac OS, and | ||||
|                  "DejaVu Sans Mono" on \\*nix | ||||
|  | ||||
|     `font_size` | ||||
|         The font size in points to be used. | ||||
|  | ||||
|         Default: 14 | ||||
|  | ||||
|     `image_pad` | ||||
|         The padding, in pixels to be used at each edge of the resulting image. | ||||
|  | ||||
|         Default: 10 | ||||
|  | ||||
|     `line_numbers` | ||||
|         Whether line numbers should be shown: True/False | ||||
|  | ||||
|         Default: True | ||||
|  | ||||
|     `line_number_start` | ||||
|         The line number of the first line. | ||||
|  | ||||
|         Default: 1 | ||||
|  | ||||
|     `line_number_step` | ||||
|         The step used when printing line numbers. | ||||
|  | ||||
|         Default: 1 | ||||
|  | ||||
|     `line_number_bg` | ||||
|         The background colour (in "#123456" format) of the line number bar, or | ||||
|         None to use the style background color. | ||||
|  | ||||
|         Default: "#eed" | ||||
|  | ||||
|     `line_number_fg` | ||||
|         The text color of the line numbers (in "#123456"-like format). | ||||
|  | ||||
|         Default: "#886" | ||||
|  | ||||
|     `line_number_chars` | ||||
|         The number of columns of line numbers allowable in the line number | ||||
|         margin. | ||||
|  | ||||
|         Default: 2 | ||||
|  | ||||
|     `line_number_bold` | ||||
|         Whether line numbers will be bold: True/False | ||||
|  | ||||
|         Default: False | ||||
|  | ||||
|     `line_number_italic` | ||||
|         Whether line numbers will be italicized: True/False | ||||
|  | ||||
|         Default: False | ||||
|  | ||||
|     `line_number_separator` | ||||
|         Whether a line will be drawn between the line number area and the | ||||
|         source code area: True/False | ||||
|  | ||||
|         Default: True | ||||
|  | ||||
|     `line_number_pad` | ||||
|         The horizontal padding (in pixels) between the line number margin, and | ||||
|         the source code area. | ||||
|  | ||||
|         Default: 6 | ||||
|  | ||||
|     `hl_lines` | ||||
|         Specify a list of lines to be highlighted. | ||||
|  | ||||
|         .. versionadded:: 1.2 | ||||
|  | ||||
|         Default: empty list | ||||
|  | ||||
|     `hl_color` | ||||
|         Specify the color for highlighting lines. | ||||
|  | ||||
|         .. versionadded:: 1.2 | ||||
|  | ||||
|         Default: highlight color of the selected style | ||||
|     """ | ||||
|  | ||||
|     # Required by the pygments mapper | ||||
|     name = 'img' | ||||
|     aliases = ['img', 'IMG', 'png'] | ||||
|     filenames = ['*.png'] | ||||
|  | ||||
|     unicodeoutput = False | ||||
|  | ||||
|     default_image_format = 'png' | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         """ | ||||
|         See the class docstring for explanation of options. | ||||
|         """ | ||||
|         if not pil_available: | ||||
|             raise PilNotAvailable( | ||||
|                 'Python Imaging Library is required for this formatter') | ||||
|         Formatter.__init__(self, **options) | ||||
|         self.encoding = 'latin1'  # let pygments.format() do the right thing | ||||
|         # Read the style | ||||
|         self.styles = dict(self.style) | ||||
|         if self.style.background_color is None: | ||||
|             self.background_color = '#fff' | ||||
|         else: | ||||
|             self.background_color = self.style.background_color | ||||
|         # Image options | ||||
|         self.image_format = get_choice_opt( | ||||
|             options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'], | ||||
|             self.default_image_format, normcase=True) | ||||
|         self.image_pad = get_int_opt(options, 'image_pad', 10) | ||||
|         self.line_pad = get_int_opt(options, 'line_pad', 2) | ||||
|         # The fonts | ||||
|         fontsize = get_int_opt(options, 'font_size', 14) | ||||
|         self.fonts = FontManager(options.get('font_name', ''), fontsize) | ||||
|         self.fontw, self.fonth = self.fonts.get_char_size() | ||||
|         # Line number options | ||||
|         self.line_number_fg = options.get('line_number_fg', '#886') | ||||
|         self.line_number_bg = options.get('line_number_bg', '#eed') | ||||
|         self.line_number_chars = get_int_opt(options, | ||||
|                                              'line_number_chars', 2) | ||||
|         self.line_number_bold = get_bool_opt(options, | ||||
|                                              'line_number_bold', False) | ||||
|         self.line_number_italic = get_bool_opt(options, | ||||
|                                                'line_number_italic', False) | ||||
|         self.line_number_pad = get_int_opt(options, 'line_number_pad', 6) | ||||
|         self.line_numbers = get_bool_opt(options, 'line_numbers', True) | ||||
|         self.line_number_separator = get_bool_opt(options, | ||||
|                                                   'line_number_separator', True) | ||||
|         self.line_number_step = get_int_opt(options, 'line_number_step', 1) | ||||
|         self.line_number_start = get_int_opt(options, 'line_number_start', 1) | ||||
|         if self.line_numbers: | ||||
|             self.line_number_width = (self.fontw * self.line_number_chars + | ||||
|                                       self.line_number_pad * 2) | ||||
|         else: | ||||
|             self.line_number_width = 0 | ||||
|         self.hl_lines = [] | ||||
|         hl_lines_str = get_list_opt(options, 'hl_lines', []) | ||||
|         for line in hl_lines_str: | ||||
|             try: | ||||
|                 self.hl_lines.append(int(line)) | ||||
|             except ValueError: | ||||
|                 pass | ||||
|         self.hl_color = options.get('hl_color', | ||||
|                                     self.style.highlight_color) or '#f90' | ||||
|         self.drawables = [] | ||||
|  | ||||
|     def get_style_defs(self, arg=''): | ||||
|         raise NotImplementedError('The -S option is meaningless for the image ' | ||||
|                                   'formatter. Use -O style=<stylename> instead.') | ||||
|  | ||||
|     def _get_line_height(self): | ||||
|         """ | ||||
|         Get the height of a line. | ||||
|         """ | ||||
|         return self.fonth + self.line_pad | ||||
|  | ||||
|     def _get_line_y(self, lineno): | ||||
|         """ | ||||
|         Get the Y coordinate of a line number. | ||||
|         """ | ||||
|         return lineno * self._get_line_height() + self.image_pad | ||||
|  | ||||
|     def _get_char_width(self): | ||||
|         """ | ||||
|         Get the width of a character. | ||||
|         """ | ||||
|         return self.fontw | ||||
|  | ||||
|     def _get_char_x(self, linelength): | ||||
|         """ | ||||
|         Get the X coordinate of a character position. | ||||
|         """ | ||||
|         return linelength + self.image_pad + self.line_number_width | ||||
|  | ||||
|     def _get_text_pos(self, linelength, lineno): | ||||
|         """ | ||||
|         Get the actual position for a character and line position. | ||||
|         """ | ||||
|         return self._get_char_x(linelength), self._get_line_y(lineno) | ||||
|  | ||||
|     def _get_linenumber_pos(self, lineno): | ||||
|         """ | ||||
|         Get the actual position for the start of a line number. | ||||
|         """ | ||||
|         return (self.image_pad, self._get_line_y(lineno)) | ||||
|  | ||||
|     def _get_text_color(self, style): | ||||
|         """ | ||||
|         Get the correct color for the token from the style. | ||||
|         """ | ||||
|         if style['color'] is not None: | ||||
|             fill = '#' + style['color'] | ||||
|         else: | ||||
|             fill = '#000' | ||||
|         return fill | ||||
|  | ||||
|     def _get_text_bg_color(self, style): | ||||
|         """ | ||||
|         Get the correct background color for the token from the style. | ||||
|         """ | ||||
|         if style['bgcolor'] is not None: | ||||
|             bg_color = '#' + style['bgcolor'] | ||||
|         else: | ||||
|             bg_color = None | ||||
|         return bg_color | ||||
|  | ||||
|     def _get_style_font(self, style): | ||||
|         """ | ||||
|         Get the correct font for the style. | ||||
|         """ | ||||
|         return self.fonts.get_font(style['bold'], style['italic']) | ||||
|  | ||||
|     def _get_image_size(self, maxlinelength, maxlineno): | ||||
|         """ | ||||
|         Get the required image size. | ||||
|         """ | ||||
|         return (self._get_char_x(maxlinelength) + self.image_pad, | ||||
|                 self._get_line_y(maxlineno + 0) + self.image_pad) | ||||
|  | ||||
|     def _draw_linenumber(self, posno, lineno): | ||||
|         """ | ||||
|         Remember a line number drawable to paint later. | ||||
|         """ | ||||
|         self._draw_text( | ||||
|             self._get_linenumber_pos(posno), | ||||
|             str(lineno).rjust(self.line_number_chars), | ||||
|             font=self.fonts.get_font(self.line_number_bold, | ||||
|                                      self.line_number_italic), | ||||
|             text_fg=self.line_number_fg, | ||||
|             text_bg=None, | ||||
|         ) | ||||
|  | ||||
|     def _draw_text(self, pos, text, font, text_fg, text_bg): | ||||
|         """ | ||||
|         Remember a single drawable tuple to paint later. | ||||
|         """ | ||||
|         self.drawables.append((pos, text, font, text_fg, text_bg)) | ||||
|  | ||||
|     def _create_drawables(self, tokensource): | ||||
|         """ | ||||
|         Create drawables for the token content. | ||||
|         """ | ||||
|         lineno = charno = maxcharno = 0 | ||||
|         maxlinelength = linelength = 0 | ||||
|         for ttype, value in tokensource: | ||||
|             while ttype not in self.styles: | ||||
|                 ttype = ttype.parent | ||||
|             style = self.styles[ttype] | ||||
|             # TODO: make sure tab expansion happens earlier in the chain.  It | ||||
|             # really ought to be done on the input, as to do it right here is | ||||
|             # quite complex. | ||||
|             value = value.expandtabs(4) | ||||
|             lines = value.splitlines(True) | ||||
|             # print lines | ||||
|             for i, line in enumerate(lines): | ||||
|                 temp = line.rstrip('\n') | ||||
|                 if temp: | ||||
|                     self._draw_text( | ||||
|                         self._get_text_pos(linelength, lineno), | ||||
|                         temp, | ||||
|                         font = self._get_style_font(style), | ||||
|                         text_fg = self._get_text_color(style), | ||||
|                         text_bg = self._get_text_bg_color(style), | ||||
|                     ) | ||||
|                     temp_width, _ = self.fonts.get_text_size(temp) | ||||
|                     linelength += temp_width | ||||
|                     maxlinelength = max(maxlinelength, linelength) | ||||
|                     charno += len(temp) | ||||
|                     maxcharno = max(maxcharno, charno) | ||||
|                 if line.endswith('\n'): | ||||
|                     # add a line for each extra line in the value | ||||
|                     linelength = 0 | ||||
|                     charno = 0 | ||||
|                     lineno += 1 | ||||
|         self.maxlinelength = maxlinelength | ||||
|         self.maxcharno = maxcharno | ||||
|         self.maxlineno = lineno | ||||
|  | ||||
|     def _draw_line_numbers(self): | ||||
|         """ | ||||
|         Create drawables for the line numbers. | ||||
|         """ | ||||
|         if not self.line_numbers: | ||||
|             return | ||||
|         for p in range(self.maxlineno): | ||||
|             n = p + self.line_number_start | ||||
|             if (n % self.line_number_step) == 0: | ||||
|                 self._draw_linenumber(p, n) | ||||
|  | ||||
|     def _paint_line_number_bg(self, im): | ||||
|         """ | ||||
|         Paint the line number background on the image. | ||||
|         """ | ||||
|         if not self.line_numbers: | ||||
|             return | ||||
|         if self.line_number_fg is None: | ||||
|             return | ||||
|         draw = ImageDraw.Draw(im) | ||||
|         recth = im.size[-1] | ||||
|         rectw = self.image_pad + self.line_number_width - self.line_number_pad | ||||
|         draw.rectangle([(0, 0), (rectw, recth)], | ||||
|                        fill=self.line_number_bg) | ||||
|         if self.line_number_separator: | ||||
|             draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg) | ||||
|         del draw | ||||
|  | ||||
|     def format(self, tokensource, outfile): | ||||
|         """ | ||||
|         Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` | ||||
|         tuples and write it into ``outfile``. | ||||
|  | ||||
|         This implementation calculates where it should draw each token on the | ||||
|         pixmap, then calculates the required pixmap size and draws the items. | ||||
|         """ | ||||
|         self._create_drawables(tokensource) | ||||
|         self._draw_line_numbers() | ||||
|         im = Image.new( | ||||
|             'RGB', | ||||
|             self._get_image_size(self.maxlinelength, self.maxlineno), | ||||
|             self.background_color | ||||
|         ) | ||||
|         self._paint_line_number_bg(im) | ||||
|         draw = ImageDraw.Draw(im) | ||||
|         # Highlight | ||||
|         if self.hl_lines: | ||||
|             x = self.image_pad + self.line_number_width - self.line_number_pad + 1 | ||||
|             recth = self._get_line_height() | ||||
|             rectw = im.size[0] - x | ||||
|             for linenumber in self.hl_lines: | ||||
|                 y = self._get_line_y(linenumber - 1) | ||||
|                 draw.rectangle([(x, y), (x + rectw, y + recth)], | ||||
|                                fill=self.hl_color) | ||||
|         for pos, value, font, text_fg, text_bg in self.drawables: | ||||
|             if text_bg: | ||||
|                 text_size = draw.textsize(text=value, font=font) | ||||
|                 draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg) | ||||
|             draw.text(pos, value, font=font, fill=text_fg) | ||||
|         im.save(outfile, self.image_format.upper()) | ||||
|  | ||||
|  | ||||
| # Add one formatter per format, so that the "-f gif" option gives the correct result | ||||
| # when used in pygmentize. | ||||
|  | ||||
| class GifImageFormatter(ImageFormatter): | ||||
|     """ | ||||
|     Create a GIF image from source code. This uses the Python Imaging Library to | ||||
|     generate a pixmap from the source code. | ||||
|  | ||||
|     .. versionadded:: 1.0 | ||||
|     """ | ||||
|  | ||||
|     name = 'img_gif' | ||||
|     aliases = ['gif'] | ||||
|     filenames = ['*.gif'] | ||||
|     default_image_format = 'gif' | ||||
|  | ||||
|  | ||||
| class JpgImageFormatter(ImageFormatter): | ||||
|     """ | ||||
|     Create a JPEG image from source code. This uses the Python Imaging Library to | ||||
|     generate a pixmap from the source code. | ||||
|  | ||||
|     .. versionadded:: 1.0 | ||||
|     """ | ||||
|  | ||||
|     name = 'img_jpg' | ||||
|     aliases = ['jpg', 'jpeg'] | ||||
|     filenames = ['*.jpg'] | ||||
|     default_image_format = 'jpeg' | ||||
|  | ||||
|  | ||||
| class BmpImageFormatter(ImageFormatter): | ||||
|     """ | ||||
|     Create a bitmap image from source code. This uses the Python Imaging Library to | ||||
|     generate a pixmap from the source code. | ||||
|  | ||||
|     .. versionadded:: 1.0 | ||||
|     """ | ||||
|  | ||||
|     name = 'img_bmp' | ||||
|     aliases = ['bmp', 'bitmap'] | ||||
|     filenames = ['*.bmp'] | ||||
|     default_image_format = 'bmp' | ||||
| @ -0,0 +1,179 @@ | ||||
| """ | ||||
|     pygments.formatters.irc | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for IRC output | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \ | ||||
|     Number, Operator, Generic, Token, Whitespace | ||||
| from pip._vendor.pygments.util import get_choice_opt | ||||
|  | ||||
|  | ||||
| __all__ = ['IRCFormatter'] | ||||
|  | ||||
|  | ||||
| #: Map token types to a tuple of color values for light and dark | ||||
| #: backgrounds. | ||||
| IRC_COLORS = { | ||||
|     Token:              ('',            ''), | ||||
|  | ||||
|     Whitespace:         ('gray',   'brightblack'), | ||||
|     Comment:            ('gray',   'brightblack'), | ||||
|     Comment.Preproc:    ('cyan',        'brightcyan'), | ||||
|     Keyword:            ('blue',    'brightblue'), | ||||
|     Keyword.Type:       ('cyan',        'brightcyan'), | ||||
|     Operator.Word:      ('magenta',      'brightcyan'), | ||||
|     Name.Builtin:       ('cyan',        'brightcyan'), | ||||
|     Name.Function:      ('green',   'brightgreen'), | ||||
|     Name.Namespace:     ('_cyan_',      '_brightcyan_'), | ||||
|     Name.Class:         ('_green_', '_brightgreen_'), | ||||
|     Name.Exception:     ('cyan',        'brightcyan'), | ||||
|     Name.Decorator:     ('brightblack',    'gray'), | ||||
|     Name.Variable:      ('red',     'brightred'), | ||||
|     Name.Constant:      ('red',     'brightred'), | ||||
|     Name.Attribute:     ('cyan',        'brightcyan'), | ||||
|     Name.Tag:           ('brightblue',        'brightblue'), | ||||
|     String:             ('yellow',       'yellow'), | ||||
|     Number:             ('blue',    'brightblue'), | ||||
|  | ||||
|     Generic.Deleted:    ('brightred',        'brightred'), | ||||
|     Generic.Inserted:   ('green',  'brightgreen'), | ||||
|     Generic.Heading:    ('**',         '**'), | ||||
|     Generic.Subheading: ('*magenta*',   '*brightmagenta*'), | ||||
|     Generic.Error:      ('brightred',        'brightred'), | ||||
|  | ||||
|     Error:              ('_brightred_',      '_brightred_'), | ||||
| } | ||||
|  | ||||
|  | ||||
| IRC_COLOR_MAP = { | ||||
|     'white': 0, | ||||
|     'black': 1, | ||||
|     'blue': 2, | ||||
|     'brightgreen': 3, | ||||
|     'brightred': 4, | ||||
|     'yellow': 5, | ||||
|     'magenta': 6, | ||||
|     'orange': 7, | ||||
|     'green': 7, #compat w/ ansi | ||||
|     'brightyellow': 8, | ||||
|     'lightgreen': 9, | ||||
|     'brightcyan': 9, # compat w/ ansi | ||||
|     'cyan': 10, | ||||
|     'lightblue': 11, | ||||
|     'red': 11, # compat w/ ansi | ||||
|     'brightblue': 12, | ||||
|     'brightmagenta': 13, | ||||
|     'brightblack': 14, | ||||
|     'gray': 15, | ||||
| } | ||||
|  | ||||
| def ircformat(color, text): | ||||
|     if len(color) < 1: | ||||
|         return text | ||||
|     add = sub = '' | ||||
|     if '_' in color: # italic | ||||
|         add += '\x1D' | ||||
|         sub = '\x1D' + sub | ||||
|         color = color.strip('_') | ||||
|     if '*' in color: # bold | ||||
|         add += '\x02' | ||||
|         sub = '\x02' + sub | ||||
|         color = color.strip('*') | ||||
|     # underline (\x1F) not supported | ||||
|     # backgrounds (\x03FF,BB) not supported | ||||
|     if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff | ||||
|         add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2) | ||||
|         sub = '\x03' + sub | ||||
|     return add + text + sub | ||||
|     return '<'+add+'>'+text+'</'+sub+'>' | ||||
|  | ||||
|  | ||||
| class IRCFormatter(Formatter): | ||||
|     r""" | ||||
|     Format tokens with IRC color sequences | ||||
|  | ||||
|     The `get_style_defs()` method doesn't do anything special since there is | ||||
|     no support for common styles. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `bg` | ||||
|         Set to ``"light"`` or ``"dark"`` depending on the terminal's background | ||||
|         (default: ``"light"``). | ||||
|  | ||||
|     `colorscheme` | ||||
|         A dictionary mapping token types to (lightbg, darkbg) color names or | ||||
|         ``None`` (default: ``None`` = use builtin colorscheme). | ||||
|  | ||||
|     `linenos` | ||||
|         Set to ``True`` to have line numbers in the output as well | ||||
|         (default: ``False`` = no line numbers). | ||||
|     """ | ||||
|     name = 'IRC' | ||||
|     aliases = ['irc', 'IRC'] | ||||
|     filenames = [] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         self.darkbg = get_choice_opt(options, 'bg', | ||||
|                                      ['light', 'dark'], 'light') == 'dark' | ||||
|         self.colorscheme = options.get('colorscheme', None) or IRC_COLORS | ||||
|         self.linenos = options.get('linenos', False) | ||||
|         self._lineno = 0 | ||||
|  | ||||
|     def _write_lineno(self, outfile): | ||||
|         self._lineno += 1 | ||||
|         outfile.write("\n%04d: " % self._lineno) | ||||
|  | ||||
|     def _format_unencoded_with_lineno(self, tokensource, outfile): | ||||
|         self._write_lineno(outfile) | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             if value.endswith("\n"): | ||||
|                 self._write_lineno(outfile) | ||||
|                 value = value[:-1] | ||||
|             color = self.colorscheme.get(ttype) | ||||
|             while color is None: | ||||
|                 ttype = ttype.parent | ||||
|                 color = self.colorscheme.get(ttype) | ||||
|             if color: | ||||
|                 color = color[self.darkbg] | ||||
|                 spl = value.split('\n') | ||||
|                 for line in spl[:-1]: | ||||
|                     self._write_lineno(outfile) | ||||
|                     if line: | ||||
|                         outfile.write(ircformat(color, line[:-1])) | ||||
|                 if spl[-1]: | ||||
|                     outfile.write(ircformat(color, spl[-1])) | ||||
|             else: | ||||
|                 outfile.write(value) | ||||
|  | ||||
|         outfile.write("\n") | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         if self.linenos: | ||||
|             self._format_unencoded_with_lineno(tokensource, outfile) | ||||
|             return | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             color = self.colorscheme.get(ttype) | ||||
|             while color is None: | ||||
|                 ttype = ttype[:-1] | ||||
|                 color = self.colorscheme.get(ttype) | ||||
|             if color: | ||||
|                 color = color[self.darkbg] | ||||
|                 spl = value.split('\n') | ||||
|                 for line in spl[:-1]: | ||||
|                     if line: | ||||
|                         outfile.write(ircformat(color, line)) | ||||
|                     outfile.write('\n') | ||||
|                 if spl[-1]: | ||||
|                     outfile.write(ircformat(color, spl[-1])) | ||||
|             else: | ||||
|                 outfile.write(value) | ||||
| @ -0,0 +1,521 @@ | ||||
| """ | ||||
|     pygments.formatters.latex | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for LaTeX fancyvrb output. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from io import StringIO | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.lexer import Lexer, do_insertions | ||||
| from pip._vendor.pygments.token import Token, STANDARD_TYPES | ||||
| from pip._vendor.pygments.util import get_bool_opt, get_int_opt | ||||
|  | ||||
|  | ||||
| __all__ = ['LatexFormatter'] | ||||
|  | ||||
|  | ||||
| def escape_tex(text, commandprefix): | ||||
|     return text.replace('\\', '\x00'). \ | ||||
|                 replace('{', '\x01'). \ | ||||
|                 replace('}', '\x02'). \ | ||||
|                 replace('\x00', r'\%sZbs{}' % commandprefix). \ | ||||
|                 replace('\x01', r'\%sZob{}' % commandprefix). \ | ||||
|                 replace('\x02', r'\%sZcb{}' % commandprefix). \ | ||||
|                 replace('^', r'\%sZca{}' % commandprefix). \ | ||||
|                 replace('_', r'\%sZus{}' % commandprefix). \ | ||||
|                 replace('&', r'\%sZam{}' % commandprefix). \ | ||||
|                 replace('<', r'\%sZlt{}' % commandprefix). \ | ||||
|                 replace('>', r'\%sZgt{}' % commandprefix). \ | ||||
|                 replace('#', r'\%sZsh{}' % commandprefix). \ | ||||
|                 replace('%', r'\%sZpc{}' % commandprefix). \ | ||||
|                 replace('$', r'\%sZdl{}' % commandprefix). \ | ||||
|                 replace('-', r'\%sZhy{}' % commandprefix). \ | ||||
|                 replace("'", r'\%sZsq{}' % commandprefix). \ | ||||
|                 replace('"', r'\%sZdq{}' % commandprefix). \ | ||||
|                 replace('~', r'\%sZti{}' % commandprefix) | ||||
|  | ||||
|  | ||||
| DOC_TEMPLATE = r''' | ||||
| \documentclass{%(docclass)s} | ||||
| \usepackage{fancyvrb} | ||||
| \usepackage{color} | ||||
| \usepackage[%(encoding)s]{inputenc} | ||||
| %(preamble)s | ||||
|  | ||||
| %(styledefs)s | ||||
|  | ||||
| \begin{document} | ||||
|  | ||||
| \section*{%(title)s} | ||||
|  | ||||
| %(code)s | ||||
| \end{document} | ||||
| ''' | ||||
|  | ||||
| ## Small explanation of the mess below :) | ||||
| # | ||||
| # The previous version of the LaTeX formatter just assigned a command to | ||||
| # each token type defined in the current style.  That obviously is | ||||
| # problematic if the highlighted code is produced for a different style | ||||
| # than the style commands themselves. | ||||
| # | ||||
| # This version works much like the HTML formatter which assigns multiple | ||||
| # CSS classes to each <span> tag, from the most specific to the least | ||||
| # specific token type, thus falling back to the parent token type if one | ||||
| # is not defined.  Here, the classes are there too and use the same short | ||||
| # forms given in token.STANDARD_TYPES. | ||||
| # | ||||
| # Highlighted code now only uses one custom command, which by default is | ||||
| # \PY and selectable by the commandprefix option (and in addition the | ||||
| # escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for | ||||
| # backwards compatibility purposes). | ||||
| # | ||||
| # \PY has two arguments: the classes, separated by +, and the text to | ||||
| # render in that style.  The classes are resolved into the respective | ||||
| # style commands by magic, which serves to ignore unknown classes. | ||||
| # | ||||
| # The magic macros are: | ||||
| # * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text | ||||
| #   to render in \PY@do.  Their definition determines the style. | ||||
| # * \PY@reset resets \PY@it etc. to do nothing. | ||||
| # * \PY@toks parses the list of classes, using magic inspired by the | ||||
| #   keyval package (but modified to use plusses instead of commas | ||||
| #   because fancyvrb redefines commas inside its environments). | ||||
| # * \PY@tok processes one class, calling the \PY@tok@classname command | ||||
| #   if it exists. | ||||
| # * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style | ||||
| #   for its class. | ||||
| # * \PY resets the style, parses the classnames and then calls \PY@do. | ||||
| # | ||||
| # Tip: to read this code, print it out in substituted form using e.g. | ||||
| # >>> print STYLE_TEMPLATE % {'cp': 'PY'} | ||||
|  | ||||
| STYLE_TEMPLATE = r''' | ||||
| \makeatletter | ||||
| \def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%% | ||||
|     \let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%% | ||||
|     \let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax} | ||||
| \def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname} | ||||
| \def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%% | ||||
|     \%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi} | ||||
| \def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%% | ||||
|     \%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}} | ||||
| \def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}} | ||||
|  | ||||
| %(styles)s | ||||
|  | ||||
| \def\%(cp)sZbs{\char`\\} | ||||
| \def\%(cp)sZus{\char`\_} | ||||
| \def\%(cp)sZob{\char`\{} | ||||
| \def\%(cp)sZcb{\char`\}} | ||||
| \def\%(cp)sZca{\char`\^} | ||||
| \def\%(cp)sZam{\char`\&} | ||||
| \def\%(cp)sZlt{\char`\<} | ||||
| \def\%(cp)sZgt{\char`\>} | ||||
| \def\%(cp)sZsh{\char`\#} | ||||
| \def\%(cp)sZpc{\char`\%%} | ||||
| \def\%(cp)sZdl{\char`\$} | ||||
| \def\%(cp)sZhy{\char`\-} | ||||
| \def\%(cp)sZsq{\char`\'} | ||||
| \def\%(cp)sZdq{\char`\"} | ||||
| \def\%(cp)sZti{\char`\~} | ||||
| %% for compatibility with earlier versions | ||||
| \def\%(cp)sZat{@} | ||||
| \def\%(cp)sZlb{[} | ||||
| \def\%(cp)sZrb{]} | ||||
| \makeatother | ||||
| ''' | ||||
|  | ||||
|  | ||||
| def _get_ttype_name(ttype): | ||||
|     fname = STANDARD_TYPES.get(ttype) | ||||
|     if fname: | ||||
|         return fname | ||||
|     aname = '' | ||||
|     while fname is None: | ||||
|         aname = ttype[-1] + aname | ||||
|         ttype = ttype.parent | ||||
|         fname = STANDARD_TYPES.get(ttype) | ||||
|     return fname + aname | ||||
|  | ||||
|  | ||||
| class LatexFormatter(Formatter): | ||||
|     r""" | ||||
|     Format tokens as LaTeX code. This needs the `fancyvrb` and `color` | ||||
|     standard packages. | ||||
|  | ||||
|     Without the `full` option, code is formatted as one ``Verbatim`` | ||||
|     environment, like this: | ||||
|  | ||||
|     .. sourcecode:: latex | ||||
|  | ||||
|         \begin{Verbatim}[commandchars=\\\{\}] | ||||
|         \PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}): | ||||
|             \PY{k}{pass} | ||||
|         \end{Verbatim} | ||||
|  | ||||
|     Wrapping can be disabled using the `nowrap` option. | ||||
|  | ||||
|     The special command used here (``\PY``) and all the other macros it needs | ||||
|     are output by the `get_style_defs` method. | ||||
|  | ||||
|     With the `full` option, a complete LaTeX document is output, including | ||||
|     the command definitions in the preamble. | ||||
|  | ||||
|     The `get_style_defs()` method of a `LatexFormatter` returns a string | ||||
|     containing ``\def`` commands defining the macros needed inside the | ||||
|     ``Verbatim`` environments. | ||||
|  | ||||
|     Additional options accepted: | ||||
|  | ||||
|     `nowrap` | ||||
|         If set to ``True``, don't wrap the tokens at all, not even inside a | ||||
|         ``\begin{Verbatim}`` environment. This disables most other options | ||||
|         (default: ``False``). | ||||
|  | ||||
|     `style` | ||||
|         The style to use, can be a string or a Style subclass (default: | ||||
|         ``'default'``). | ||||
|  | ||||
|     `full` | ||||
|         Tells the formatter to output a "full" document, i.e. a complete | ||||
|         self-contained document (default: ``False``). | ||||
|  | ||||
|     `title` | ||||
|         If `full` is true, the title that should be used to caption the | ||||
|         document (default: ``''``). | ||||
|  | ||||
|     `docclass` | ||||
|         If the `full` option is enabled, this is the document class to use | ||||
|         (default: ``'article'``). | ||||
|  | ||||
|     `preamble` | ||||
|         If the `full` option is enabled, this can be further preamble commands, | ||||
|         e.g. ``\usepackage`` (default: ``''``). | ||||
|  | ||||
|     `linenos` | ||||
|         If set to ``True``, output line numbers (default: ``False``). | ||||
|  | ||||
|     `linenostart` | ||||
|         The line number for the first line (default: ``1``). | ||||
|  | ||||
|     `linenostep` | ||||
|         If set to a number n > 1, only every nth line number is printed. | ||||
|  | ||||
|     `verboptions` | ||||
|         Additional options given to the Verbatim environment (see the *fancyvrb* | ||||
|         docs for possible values) (default: ``''``). | ||||
|  | ||||
|     `commandprefix` | ||||
|         The LaTeX commands used to produce colored output are constructed | ||||
|         using this prefix and some letters (default: ``'PY'``). | ||||
|  | ||||
|         .. versionadded:: 0.7 | ||||
|         .. versionchanged:: 0.10 | ||||
|            The default is now ``'PY'`` instead of ``'C'``. | ||||
|  | ||||
|     `texcomments` | ||||
|         If set to ``True``, enables LaTeX comment lines.  That is, LaTex markup | ||||
|         in comment tokens is not escaped so that LaTeX can render it (default: | ||||
|         ``False``). | ||||
|  | ||||
|         .. versionadded:: 1.2 | ||||
|  | ||||
|     `mathescape` | ||||
|         If set to ``True``, enables LaTeX math mode escape in comments. That | ||||
|         is, ``'$...$'`` inside a comment will trigger math mode (default: | ||||
|         ``False``). | ||||
|  | ||||
|         .. versionadded:: 1.2 | ||||
|  | ||||
|     `escapeinside` | ||||
|         If set to a string of length 2, enables escaping to LaTeX. Text | ||||
|         delimited by these 2 characters is read as LaTeX code and | ||||
|         typeset accordingly. It has no effect in string literals. It has | ||||
|         no effect in comments if `texcomments` or `mathescape` is | ||||
|         set. (default: ``''``). | ||||
|  | ||||
|         .. versionadded:: 2.0 | ||||
|  | ||||
|     `envname` | ||||
|         Allows you to pick an alternative environment name replacing Verbatim. | ||||
|         The alternate environment still has to support Verbatim's option syntax. | ||||
|         (default: ``'Verbatim'``). | ||||
|  | ||||
|         .. versionadded:: 2.0 | ||||
|     """ | ||||
|     name = 'LaTeX' | ||||
|     aliases = ['latex', 'tex'] | ||||
|     filenames = ['*.tex'] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         self.nowrap = get_bool_opt(options, 'nowrap', False) | ||||
|         self.docclass = options.get('docclass', 'article') | ||||
|         self.preamble = options.get('preamble', '') | ||||
|         self.linenos = get_bool_opt(options, 'linenos', False) | ||||
|         self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) | ||||
|         self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) | ||||
|         self.verboptions = options.get('verboptions', '') | ||||
|         self.nobackground = get_bool_opt(options, 'nobackground', False) | ||||
|         self.commandprefix = options.get('commandprefix', 'PY') | ||||
|         self.texcomments = get_bool_opt(options, 'texcomments', False) | ||||
|         self.mathescape = get_bool_opt(options, 'mathescape', False) | ||||
|         self.escapeinside = options.get('escapeinside', '') | ||||
|         if len(self.escapeinside) == 2: | ||||
|             self.left = self.escapeinside[0] | ||||
|             self.right = self.escapeinside[1] | ||||
|         else: | ||||
|             self.escapeinside = '' | ||||
|         self.envname = options.get('envname', 'Verbatim') | ||||
|  | ||||
|         self._create_stylesheet() | ||||
|  | ||||
|     def _create_stylesheet(self): | ||||
|         t2n = self.ttype2name = {Token: ''} | ||||
|         c2d = self.cmd2def = {} | ||||
|         cp = self.commandprefix | ||||
|  | ||||
|         def rgbcolor(col): | ||||
|             if col: | ||||
|                 return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0) | ||||
|                                  for i in (0, 2, 4)]) | ||||
|             else: | ||||
|                 return '1,1,1' | ||||
|  | ||||
|         for ttype, ndef in self.style: | ||||
|             name = _get_ttype_name(ttype) | ||||
|             cmndef = '' | ||||
|             if ndef['bold']: | ||||
|                 cmndef += r'\let\$$@bf=\textbf' | ||||
|             if ndef['italic']: | ||||
|                 cmndef += r'\let\$$@it=\textit' | ||||
|             if ndef['underline']: | ||||
|                 cmndef += r'\let\$$@ul=\underline' | ||||
|             if ndef['roman']: | ||||
|                 cmndef += r'\let\$$@ff=\textrm' | ||||
|             if ndef['sans']: | ||||
|                 cmndef += r'\let\$$@ff=\textsf' | ||||
|             if ndef['mono']: | ||||
|                 cmndef += r'\let\$$@ff=\textsf' | ||||
|             if ndef['color']: | ||||
|                 cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' % | ||||
|                            rgbcolor(ndef['color'])) | ||||
|             if ndef['border']: | ||||
|                 cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}' | ||||
|                            r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' % | ||||
|                            (rgbcolor(ndef['border']), | ||||
|                             rgbcolor(ndef['bgcolor']))) | ||||
|             elif ndef['bgcolor']: | ||||
|                 cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}' | ||||
|                            r'\colorbox[rgb]{%s}{\strut ##1}}}' % | ||||
|                            rgbcolor(ndef['bgcolor'])) | ||||
|             if cmndef == '': | ||||
|                 continue | ||||
|             cmndef = cmndef.replace('$$', cp) | ||||
|             t2n[ttype] = name | ||||
|             c2d[name] = cmndef | ||||
|  | ||||
|     def get_style_defs(self, arg=''): | ||||
|         """ | ||||
|         Return the command sequences needed to define the commands | ||||
|         used to format text in the verbatim environment. ``arg`` is ignored. | ||||
|         """ | ||||
|         cp = self.commandprefix | ||||
|         styles = [] | ||||
|         for name, definition in self.cmd2def.items(): | ||||
|             styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition)) | ||||
|         return STYLE_TEMPLATE % {'cp': self.commandprefix, | ||||
|                                  'styles': '\n'.join(styles)} | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         # TODO: add support for background colors | ||||
|         t2n = self.ttype2name | ||||
|         cp = self.commandprefix | ||||
|  | ||||
|         if self.full: | ||||
|             realoutfile = outfile | ||||
|             outfile = StringIO() | ||||
|  | ||||
|         if not self.nowrap: | ||||
|             outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}') | ||||
|             if self.linenos: | ||||
|                 start, step = self.linenostart, self.linenostep | ||||
|                 outfile.write(',numbers=left' + | ||||
|                               (start and ',firstnumber=%d' % start or '') + | ||||
|                               (step and ',stepnumber=%d' % step or '')) | ||||
|             if self.mathescape or self.texcomments or self.escapeinside: | ||||
|                 outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7' | ||||
|                               '\\catcode`\\_=8\\relax}') | ||||
|             if self.verboptions: | ||||
|                 outfile.write(',' + self.verboptions) | ||||
|             outfile.write(']\n') | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             if ttype in Token.Comment: | ||||
|                 if self.texcomments: | ||||
|                     # Try to guess comment starting lexeme and escape it ... | ||||
|                     start = value[0:1] | ||||
|                     for i in range(1, len(value)): | ||||
|                         if start[0] != value[i]: | ||||
|                             break | ||||
|                         start += value[i] | ||||
|  | ||||
|                     value = value[len(start):] | ||||
|                     start = escape_tex(start, cp) | ||||
|  | ||||
|                     # ... but do not escape inside comment. | ||||
|                     value = start + value | ||||
|                 elif self.mathescape: | ||||
|                     # Only escape parts not inside a math environment. | ||||
|                     parts = value.split('$') | ||||
|                     in_math = False | ||||
|                     for i, part in enumerate(parts): | ||||
|                         if not in_math: | ||||
|                             parts[i] = escape_tex(part, cp) | ||||
|                         in_math = not in_math | ||||
|                     value = '$'.join(parts) | ||||
|                 elif self.escapeinside: | ||||
|                     text = value | ||||
|                     value = '' | ||||
|                     while text: | ||||
|                         a, sep1, text = text.partition(self.left) | ||||
|                         if sep1: | ||||
|                             b, sep2, text = text.partition(self.right) | ||||
|                             if sep2: | ||||
|                                 value += escape_tex(a, cp) + b | ||||
|                             else: | ||||
|                                 value += escape_tex(a + sep1 + b, cp) | ||||
|                         else: | ||||
|                             value += escape_tex(a, cp) | ||||
|                 else: | ||||
|                     value = escape_tex(value, cp) | ||||
|             elif ttype not in Token.Escape: | ||||
|                 value = escape_tex(value, cp) | ||||
|             styles = [] | ||||
|             while ttype is not Token: | ||||
|                 try: | ||||
|                     styles.append(t2n[ttype]) | ||||
|                 except KeyError: | ||||
|                     # not in current style | ||||
|                     styles.append(_get_ttype_name(ttype)) | ||||
|                 ttype = ttype.parent | ||||
|             styleval = '+'.join(reversed(styles)) | ||||
|             if styleval: | ||||
|                 spl = value.split('\n') | ||||
|                 for line in spl[:-1]: | ||||
|                     if line: | ||||
|                         outfile.write("\\%s{%s}{%s}" % (cp, styleval, line)) | ||||
|                     outfile.write('\n') | ||||
|                 if spl[-1]: | ||||
|                     outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1])) | ||||
|             else: | ||||
|                 outfile.write(value) | ||||
|  | ||||
|         if not self.nowrap: | ||||
|             outfile.write('\\end{' + self.envname + '}\n') | ||||
|  | ||||
|         if self.full: | ||||
|             encoding = self.encoding or 'utf8' | ||||
|             # map known existings encodings from LaTeX distribution | ||||
|             encoding = { | ||||
|                 'utf_8': 'utf8', | ||||
|                 'latin_1': 'latin1', | ||||
|                 'iso_8859_1': 'latin1', | ||||
|             }.get(encoding.replace('-', '_'), encoding) | ||||
|             realoutfile.write(DOC_TEMPLATE % | ||||
|                 dict(docclass  = self.docclass, | ||||
|                      preamble  = self.preamble, | ||||
|                      title     = self.title, | ||||
|                      encoding  = encoding, | ||||
|                      styledefs = self.get_style_defs(), | ||||
|                      code      = outfile.getvalue())) | ||||
|  | ||||
|  | ||||
| class LatexEmbeddedLexer(Lexer): | ||||
|     """ | ||||
|     This lexer takes one lexer as argument, the lexer for the language | ||||
|     being formatted, and the left and right delimiters for escaped text. | ||||
|  | ||||
|     First everything is scanned using the language lexer to obtain | ||||
|     strings and comments. All other consecutive tokens are merged and | ||||
|     the resulting text is scanned for escaped segments, which are given | ||||
|     the Token.Escape type. Finally text that is not escaped is scanned | ||||
|     again with the language lexer. | ||||
|     """ | ||||
|     def __init__(self, left, right, lang, **options): | ||||
|         self.left = left | ||||
|         self.right = right | ||||
|         self.lang = lang | ||||
|         Lexer.__init__(self, **options) | ||||
|  | ||||
|     def get_tokens_unprocessed(self, text): | ||||
|         # find and remove all the escape tokens (replace with an empty string) | ||||
|         # this is very similar to DelegatingLexer.get_tokens_unprocessed. | ||||
|         buffered = '' | ||||
|         insertions = [] | ||||
|         insertion_buf = [] | ||||
|         for i, t, v in self._find_safe_escape_tokens(text): | ||||
|             if t is None: | ||||
|                 if insertion_buf: | ||||
|                     insertions.append((len(buffered), insertion_buf)) | ||||
|                     insertion_buf = [] | ||||
|                 buffered += v | ||||
|             else: | ||||
|                 insertion_buf.append((i, t, v)) | ||||
|         if insertion_buf: | ||||
|             insertions.append((len(buffered), insertion_buf)) | ||||
|         return do_insertions(insertions, | ||||
|                              self.lang.get_tokens_unprocessed(buffered)) | ||||
|  | ||||
|     def _find_safe_escape_tokens(self, text): | ||||
|         """ find escape tokens that are not in strings or comments """ | ||||
|         for i, t, v in self._filter_to( | ||||
|             self.lang.get_tokens_unprocessed(text), | ||||
|             lambda t: t in Token.Comment or t in Token.String | ||||
|         ): | ||||
|             if t is None: | ||||
|                 for i2, t2, v2 in self._find_escape_tokens(v): | ||||
|                     yield i + i2, t2, v2 | ||||
|             else: | ||||
|                 yield i, None, v | ||||
|  | ||||
|     def _filter_to(self, it, pred): | ||||
|         """ Keep only the tokens that match `pred`, merge the others together """ | ||||
|         buf = '' | ||||
|         idx = 0 | ||||
|         for i, t, v in it: | ||||
|             if pred(t): | ||||
|                 if buf: | ||||
|                     yield idx, None, buf | ||||
|                     buf = '' | ||||
|                 yield i, t, v | ||||
|             else: | ||||
|                 if not buf: | ||||
|                     idx = i | ||||
|                 buf += v | ||||
|         if buf: | ||||
|             yield idx, None, buf | ||||
|  | ||||
|     def _find_escape_tokens(self, text): | ||||
|         """ Find escape tokens within text, give token=None otherwise """ | ||||
|         index = 0 | ||||
|         while text: | ||||
|             a, sep1, text = text.partition(self.left) | ||||
|             if a: | ||||
|                 yield index, None, a | ||||
|                 index += len(a) | ||||
|             if sep1: | ||||
|                 b, sep2, text = text.partition(self.right) | ||||
|                 if sep2: | ||||
|                     yield index + len(sep1), Token.Escape, b | ||||
|                     index += len(sep1) + len(b) + len(sep2) | ||||
|                 else: | ||||
|                     yield index, Token.Error, sep1 | ||||
|                     index += len(sep1) | ||||
|                     text = b | ||||
| @ -0,0 +1,161 @@ | ||||
| """ | ||||
|     pygments.formatters.other | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Other formatters: NullFormatter, RawTokenFormatter. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.util import get_choice_opt | ||||
| from pip._vendor.pygments.token import Token | ||||
| from pip._vendor.pygments.console import colorize | ||||
|  | ||||
| __all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter'] | ||||
|  | ||||
|  | ||||
| class NullFormatter(Formatter): | ||||
|     """ | ||||
|     Output the text unchanged without any formatting. | ||||
|     """ | ||||
|     name = 'Text only' | ||||
|     aliases = ['text', 'null'] | ||||
|     filenames = ['*.txt'] | ||||
|  | ||||
|     def format(self, tokensource, outfile): | ||||
|         enc = self.encoding | ||||
|         for ttype, value in tokensource: | ||||
|             if enc: | ||||
|                 outfile.write(value.encode(enc)) | ||||
|             else: | ||||
|                 outfile.write(value) | ||||
|  | ||||
|  | ||||
| class RawTokenFormatter(Formatter): | ||||
|     r""" | ||||
|     Format tokens as a raw representation for storing token streams. | ||||
|  | ||||
|     The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later | ||||
|     be converted to a token stream with the `RawTokenLexer`, described in the | ||||
|     :doc:`lexer list <lexers>`. | ||||
|  | ||||
|     Only two options are accepted: | ||||
|  | ||||
|     `compress` | ||||
|         If set to ``'gz'`` or ``'bz2'``, compress the output with the given | ||||
|         compression algorithm after encoding (default: ``''``). | ||||
|     `error_color` | ||||
|         If set to a color name, highlight error tokens using that color.  If | ||||
|         set but with no value, defaults to ``'red'``. | ||||
|  | ||||
|         .. versionadded:: 0.11 | ||||
|  | ||||
|     """ | ||||
|     name = 'Raw tokens' | ||||
|     aliases = ['raw', 'tokens'] | ||||
|     filenames = ['*.raw'] | ||||
|  | ||||
|     unicodeoutput = False | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         # We ignore self.encoding if it is set, since it gets set for lexer | ||||
|         # and formatter if given with -Oencoding on the command line. | ||||
|         # The RawTokenFormatter outputs only ASCII. Override here. | ||||
|         self.encoding = 'ascii'  # let pygments.format() do the right thing | ||||
|         self.compress = get_choice_opt(options, 'compress', | ||||
|                                        ['', 'none', 'gz', 'bz2'], '') | ||||
|         self.error_color = options.get('error_color', None) | ||||
|         if self.error_color is True: | ||||
|             self.error_color = 'red' | ||||
|         if self.error_color is not None: | ||||
|             try: | ||||
|                 colorize(self.error_color, '') | ||||
|             except KeyError: | ||||
|                 raise ValueError("Invalid color %r specified" % | ||||
|                                  self.error_color) | ||||
|  | ||||
|     def format(self, tokensource, outfile): | ||||
|         try: | ||||
|             outfile.write(b'') | ||||
|         except TypeError: | ||||
|             raise TypeError('The raw tokens formatter needs a binary ' | ||||
|                             'output file') | ||||
|         if self.compress == 'gz': | ||||
|             import gzip | ||||
|             outfile = gzip.GzipFile('', 'wb', 9, outfile) | ||||
|  | ||||
|             write = outfile.write | ||||
|             flush = outfile.close | ||||
|         elif self.compress == 'bz2': | ||||
|             import bz2 | ||||
|             compressor = bz2.BZ2Compressor(9) | ||||
|  | ||||
|             def write(text): | ||||
|                 outfile.write(compressor.compress(text)) | ||||
|  | ||||
|             def flush(): | ||||
|                 outfile.write(compressor.flush()) | ||||
|                 outfile.flush() | ||||
|         else: | ||||
|             write = outfile.write | ||||
|             flush = outfile.flush | ||||
|  | ||||
|         if self.error_color: | ||||
|             for ttype, value in tokensource: | ||||
|                 line = b"%r\t%r\n" % (ttype, value) | ||||
|                 if ttype is Token.Error: | ||||
|                     write(colorize(self.error_color, line)) | ||||
|                 else: | ||||
|                     write(line) | ||||
|         else: | ||||
|             for ttype, value in tokensource: | ||||
|                 write(b"%r\t%r\n" % (ttype, value)) | ||||
|         flush() | ||||
|  | ||||
|  | ||||
| TESTCASE_BEFORE = '''\ | ||||
|     def testNeedsName(lexer): | ||||
|         fragment = %r | ||||
|         tokens = [ | ||||
| ''' | ||||
| TESTCASE_AFTER = '''\ | ||||
|         ] | ||||
|         assert list(lexer.get_tokens(fragment)) == tokens | ||||
| ''' | ||||
|  | ||||
|  | ||||
| class TestcaseFormatter(Formatter): | ||||
|     """ | ||||
|     Format tokens as appropriate for a new testcase. | ||||
|  | ||||
|     .. versionadded:: 2.0 | ||||
|     """ | ||||
|     name = 'Testcase' | ||||
|     aliases = ['testcase'] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         if self.encoding is not None and self.encoding != 'utf-8': | ||||
|             raise ValueError("Only None and utf-8 are allowed encodings.") | ||||
|  | ||||
|     def format(self, tokensource, outfile): | ||||
|         indentation = ' ' * 12 | ||||
|         rawbuf = [] | ||||
|         outbuf = [] | ||||
|         for ttype, value in tokensource: | ||||
|             rawbuf.append(value) | ||||
|             outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value)) | ||||
|  | ||||
|         before = TESTCASE_BEFORE % (''.join(rawbuf),) | ||||
|         during = ''.join(outbuf) | ||||
|         after = TESTCASE_AFTER | ||||
|         if self.encoding is None: | ||||
|             outfile.write(before + during + after) | ||||
|         else: | ||||
|             outfile.write(before.encode('utf-8')) | ||||
|             outfile.write(during.encode('utf-8')) | ||||
|             outfile.write(after.encode('utf-8')) | ||||
|         outfile.flush() | ||||
| @ -0,0 +1,83 @@ | ||||
| """ | ||||
|     pygments.formatters.pangomarkup | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for Pango markup output. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
|  | ||||
|  | ||||
| __all__ = ['PangoMarkupFormatter'] | ||||
|  | ||||
|  | ||||
| _escape_table = { | ||||
|     ord('&'): '&', | ||||
|     ord('<'): '<', | ||||
| } | ||||
|  | ||||
|  | ||||
| def escape_special_chars(text, table=_escape_table): | ||||
|     """Escape & and < for Pango Markup.""" | ||||
|     return text.translate(table) | ||||
|  | ||||
|  | ||||
| class PangoMarkupFormatter(Formatter): | ||||
|     """ | ||||
|     Format tokens as Pango Markup code. It can then be rendered to an SVG. | ||||
|  | ||||
|     .. versionadded:: 2.9 | ||||
|     """ | ||||
|  | ||||
|     name = 'Pango Markup' | ||||
|     aliases = ['pango', 'pangomarkup'] | ||||
|     filenames = [] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|  | ||||
|         self.styles = {} | ||||
|  | ||||
|         for token, style in self.style: | ||||
|             start = '' | ||||
|             end = '' | ||||
|             if style['color']: | ||||
|                 start += '<span fgcolor="#%s">' % style['color'] | ||||
|                 end = '</span>' + end | ||||
|             if style['bold']: | ||||
|                 start += '<b>' | ||||
|                 end = '</b>' + end | ||||
|             if style['italic']: | ||||
|                 start += '<i>' | ||||
|                 end = '</i>' + end | ||||
|             if style['underline']: | ||||
|                 start += '<u>' | ||||
|                 end = '</u>' + end | ||||
|             self.styles[token] = (start, end) | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         lastval = '' | ||||
|         lasttype = None | ||||
|  | ||||
|         outfile.write('<tt>') | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             while ttype not in self.styles: | ||||
|                 ttype = ttype.parent | ||||
|             if ttype == lasttype: | ||||
|                 lastval += escape_special_chars(value) | ||||
|             else: | ||||
|                 if lastval: | ||||
|                     stylebegin, styleend = self.styles[lasttype] | ||||
|                     outfile.write(stylebegin + lastval + styleend) | ||||
|                 lastval = escape_special_chars(value) | ||||
|                 lasttype = ttype | ||||
|  | ||||
|         if lastval: | ||||
|             stylebegin, styleend = self.styles[lasttype] | ||||
|             outfile.write(stylebegin + lastval + styleend) | ||||
|  | ||||
|         outfile.write('</tt>') | ||||
| @ -0,0 +1,146 @@ | ||||
| """ | ||||
|     pygments.formatters.rtf | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     A formatter that generates RTF files. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.util import get_int_opt, surrogatepair | ||||
|  | ||||
|  | ||||
| __all__ = ['RtfFormatter'] | ||||
|  | ||||
|  | ||||
| class RtfFormatter(Formatter): | ||||
|     """ | ||||
|     Format tokens as RTF markup. This formatter automatically outputs full RTF | ||||
|     documents with color information and other useful stuff. Perfect for Copy and | ||||
|     Paste into Microsoft(R) Word(R) documents. | ||||
|  | ||||
|     Please note that ``encoding`` and ``outencoding`` options are ignored. | ||||
|     The RTF format is ASCII natively, but handles unicode characters correctly | ||||
|     thanks to escape sequences. | ||||
|  | ||||
|     .. versionadded:: 0.6 | ||||
|  | ||||
|     Additional options accepted: | ||||
|  | ||||
|     `style` | ||||
|         The style to use, can be a string or a Style subclass (default: | ||||
|         ``'default'``). | ||||
|  | ||||
|     `fontface` | ||||
|         The used font family, for example ``Bitstream Vera Sans``. Defaults to | ||||
|         some generic font which is supposed to have fixed width. | ||||
|  | ||||
|     `fontsize` | ||||
|         Size of the font used. Size is specified in half points. The | ||||
|         default is 24 half-points, giving a size 12 font. | ||||
|  | ||||
|         .. versionadded:: 2.0 | ||||
|     """ | ||||
|     name = 'RTF' | ||||
|     aliases = ['rtf'] | ||||
|     filenames = ['*.rtf'] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         r""" | ||||
|         Additional options accepted: | ||||
|  | ||||
|         ``fontface`` | ||||
|             Name of the font used. Could for example be ``'Courier New'`` | ||||
|             to further specify the default which is ``'\fmodern'``. The RTF | ||||
|             specification claims that ``\fmodern`` are "Fixed-pitch serif | ||||
|             and sans serif fonts". Hope every RTF implementation thinks | ||||
|             the same about modern... | ||||
|  | ||||
|         """ | ||||
|         Formatter.__init__(self, **options) | ||||
|         self.fontface = options.get('fontface') or '' | ||||
|         self.fontsize = get_int_opt(options, 'fontsize', 0) | ||||
|  | ||||
|     def _escape(self, text): | ||||
|         return text.replace('\\', '\\\\') \ | ||||
|                    .replace('{', '\\{') \ | ||||
|                    .replace('}', '\\}') | ||||
|  | ||||
|     def _escape_text(self, text): | ||||
|         # empty strings, should give a small performance improvement | ||||
|         if not text: | ||||
|             return '' | ||||
|  | ||||
|         # escape text | ||||
|         text = self._escape(text) | ||||
|  | ||||
|         buf = [] | ||||
|         for c in text: | ||||
|             cn = ord(c) | ||||
|             if cn < (2**7): | ||||
|                 # ASCII character | ||||
|                 buf.append(str(c)) | ||||
|             elif (2**7) <= cn < (2**16): | ||||
|                 # single unicode escape sequence | ||||
|                 buf.append('{\\u%d}' % cn) | ||||
|             elif (2**16) <= cn: | ||||
|                 # RTF limits unicode to 16 bits. | ||||
|                 # Force surrogate pairs | ||||
|                 buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn)) | ||||
|  | ||||
|         return ''.join(buf).replace('\n', '\\par\n') | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         # rtf 1.8 header | ||||
|         outfile.write('{\\rtf1\\ansi\\uc0\\deff0' | ||||
|                       '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}' | ||||
|                       '{\\colortbl;' % (self.fontface and | ||||
|                                         ' ' + self._escape(self.fontface) or | ||||
|                                         '')) | ||||
|  | ||||
|         # convert colors and save them in a mapping to access them later. | ||||
|         color_mapping = {} | ||||
|         offset = 1 | ||||
|         for _, style in self.style: | ||||
|             for color in style['color'], style['bgcolor'], style['border']: | ||||
|                 if color and color not in color_mapping: | ||||
|                     color_mapping[color] = offset | ||||
|                     outfile.write('\\red%d\\green%d\\blue%d;' % ( | ||||
|                         int(color[0:2], 16), | ||||
|                         int(color[2:4], 16), | ||||
|                         int(color[4:6], 16) | ||||
|                     )) | ||||
|                     offset += 1 | ||||
|         outfile.write('}\\f0 ') | ||||
|         if self.fontsize: | ||||
|             outfile.write('\\fs%d' % self.fontsize) | ||||
|  | ||||
|         # highlight stream | ||||
|         for ttype, value in tokensource: | ||||
|             while not self.style.styles_token(ttype) and ttype.parent: | ||||
|                 ttype = ttype.parent | ||||
|             style = self.style.style_for_token(ttype) | ||||
|             buf = [] | ||||
|             if style['bgcolor']: | ||||
|                 buf.append('\\cb%d' % color_mapping[style['bgcolor']]) | ||||
|             if style['color']: | ||||
|                 buf.append('\\cf%d' % color_mapping[style['color']]) | ||||
|             if style['bold']: | ||||
|                 buf.append('\\b') | ||||
|             if style['italic']: | ||||
|                 buf.append('\\i') | ||||
|             if style['underline']: | ||||
|                 buf.append('\\ul') | ||||
|             if style['border']: | ||||
|                 buf.append('\\chbrdr\\chcfpat%d' % | ||||
|                            color_mapping[style['border']]) | ||||
|             start = ''.join(buf) | ||||
|             if start: | ||||
|                 outfile.write('{%s ' % start) | ||||
|             outfile.write(self._escape_text(value)) | ||||
|             if start: | ||||
|                 outfile.write('}') | ||||
|  | ||||
|         outfile.write('}') | ||||
| @ -0,0 +1,188 @@ | ||||
| """ | ||||
|     pygments.formatters.svg | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for SVG output. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.token import Comment | ||||
| from pip._vendor.pygments.util import get_bool_opt, get_int_opt | ||||
|  | ||||
| __all__ = ['SvgFormatter'] | ||||
|  | ||||
|  | ||||
| def escape_html(text): | ||||
|     """Escape &, <, > as well as single and double quotes for HTML.""" | ||||
|     return text.replace('&', '&').  \ | ||||
|                 replace('<', '<').   \ | ||||
|                 replace('>', '>').   \ | ||||
|                 replace('"', '"'). \ | ||||
|                 replace("'", ''') | ||||
|  | ||||
|  | ||||
| class2style = {} | ||||
|  | ||||
| class SvgFormatter(Formatter): | ||||
|     """ | ||||
|     Format tokens as an SVG graphics file.  This formatter is still experimental. | ||||
|     Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` | ||||
|     coordinates containing ``<tspan>`` elements with the individual token styles. | ||||
|  | ||||
|     By default, this formatter outputs a full SVG document including doctype | ||||
|     declaration and the ``<svg>`` root element. | ||||
|  | ||||
|     .. versionadded:: 0.9 | ||||
|  | ||||
|     Additional options accepted: | ||||
|  | ||||
|     `nowrap` | ||||
|         Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and | ||||
|         don't add a XML declaration and a doctype.  If true, the `fontfamily` | ||||
|         and `fontsize` options are ignored.  Defaults to ``False``. | ||||
|  | ||||
|     `fontfamily` | ||||
|         The value to give the wrapping ``<g>`` element's ``font-family`` | ||||
|         attribute, defaults to ``"monospace"``. | ||||
|  | ||||
|     `fontsize` | ||||
|         The value to give the wrapping ``<g>`` element's ``font-size`` | ||||
|         attribute, defaults to ``"14px"``. | ||||
|  | ||||
|     `linenos` | ||||
|         If ``True``, add line numbers (default: ``False``). | ||||
|  | ||||
|     `linenostart` | ||||
|         The line number for the first line (default: ``1``). | ||||
|  | ||||
|     `linenostep` | ||||
|         If set to a number n > 1, only every nth line number is printed. | ||||
|          | ||||
|     `linenowidth` | ||||
|         Maximum width devoted to line numbers (default: ``3*ystep``, sufficient | ||||
|         for up to 4-digit line numbers. Increase width for longer code blocks).   | ||||
|          | ||||
|     `xoffset` | ||||
|         Starting offset in X direction, defaults to ``0``. | ||||
|  | ||||
|     `yoffset` | ||||
|         Starting offset in Y direction, defaults to the font size if it is given | ||||
|         in pixels, or ``20`` else.  (This is necessary since text coordinates | ||||
|         refer to the text baseline, not the top edge.) | ||||
|  | ||||
|     `ystep` | ||||
|         Offset to add to the Y coordinate for each subsequent line.  This should | ||||
|         roughly be the text size plus 5.  It defaults to that value if the text | ||||
|         size is given in pixels, or ``25`` else. | ||||
|  | ||||
|     `spacehack` | ||||
|         Convert spaces in the source to `` ``, which are non-breaking | ||||
|         spaces.  SVG provides the ``xml:space`` attribute to control how | ||||
|         whitespace inside tags is handled, in theory, the ``preserve`` value | ||||
|         could be used to keep all whitespace as-is.  However, many current SVG | ||||
|         viewers don't obey that rule, so this option is provided as a workaround | ||||
|         and defaults to ``True``. | ||||
|     """ | ||||
|     name = 'SVG' | ||||
|     aliases = ['svg'] | ||||
|     filenames = ['*.svg'] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         self.nowrap = get_bool_opt(options, 'nowrap', False) | ||||
|         self.fontfamily = options.get('fontfamily', 'monospace') | ||||
|         self.fontsize = options.get('fontsize', '14px') | ||||
|         self.xoffset = get_int_opt(options, 'xoffset', 0) | ||||
|         fs = self.fontsize.strip() | ||||
|         if fs.endswith('px'): fs = fs[:-2].strip() | ||||
|         try: | ||||
|             int_fs = int(fs) | ||||
|         except: | ||||
|             int_fs = 20 | ||||
|         self.yoffset = get_int_opt(options, 'yoffset', int_fs) | ||||
|         self.ystep = get_int_opt(options, 'ystep', int_fs + 5) | ||||
|         self.spacehack = get_bool_opt(options, 'spacehack', True) | ||||
|         self.linenos = get_bool_opt(options,'linenos',False) | ||||
|         self.linenostart = get_int_opt(options,'linenostart',1) | ||||
|         self.linenostep = get_int_opt(options,'linenostep',1) | ||||
|         self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep) | ||||
|         self._stylecache = {} | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         """ | ||||
|         Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` | ||||
|         tuples and write it into ``outfile``. | ||||
|  | ||||
|         For our implementation we put all lines in their own 'line group'. | ||||
|         """ | ||||
|         x = self.xoffset | ||||
|         y = self.yoffset | ||||
|         if not self.nowrap: | ||||
|             if self.encoding: | ||||
|                 outfile.write('<?xml version="1.0" encoding="%s"?>\n' % | ||||
|                               self.encoding) | ||||
|             else: | ||||
|                 outfile.write('<?xml version="1.0"?>\n') | ||||
|             outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" ' | ||||
|                           '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/' | ||||
|                           'svg10.dtd">\n') | ||||
|             outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n') | ||||
|             outfile.write('<g font-family="%s" font-size="%s">\n' % | ||||
|                           (self.fontfamily, self.fontsize)) | ||||
|          | ||||
|         counter = self.linenostart  | ||||
|         counter_step = self.linenostep | ||||
|         counter_style = self._get_style(Comment) | ||||
|         line_x = x | ||||
|          | ||||
|         if self.linenos: | ||||
|             if counter % counter_step == 0: | ||||
|                 outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' % | ||||
|                     (x+self.linenowidth,y,counter_style,counter)) | ||||
|             line_x += self.linenowidth + self.ystep | ||||
|             counter += 1 | ||||
|  | ||||
|         outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y)) | ||||
|         for ttype, value in tokensource: | ||||
|             style = self._get_style(ttype) | ||||
|             tspan = style and '<tspan' + style + '>' or '' | ||||
|             tspanend = tspan and '</tspan>' or '' | ||||
|             value = escape_html(value) | ||||
|             if self.spacehack: | ||||
|                 value = value.expandtabs().replace(' ', ' ') | ||||
|             parts = value.split('\n') | ||||
|             for part in parts[:-1]: | ||||
|                 outfile.write(tspan + part + tspanend) | ||||
|                 y += self.ystep | ||||
|                 outfile.write('</text>\n') | ||||
|                 if self.linenos and counter % counter_step == 0: | ||||
|                     outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' % | ||||
|                         (x+self.linenowidth,y,counter_style,counter)) | ||||
|                  | ||||
|                 counter += 1 | ||||
|                 outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y)) | ||||
|             outfile.write(tspan + parts[-1] + tspanend) | ||||
|         outfile.write('</text>') | ||||
|  | ||||
|         if not self.nowrap: | ||||
|             outfile.write('</g></svg>\n') | ||||
|  | ||||
|     def _get_style(self, tokentype): | ||||
|         if tokentype in self._stylecache: | ||||
|             return self._stylecache[tokentype] | ||||
|         otokentype = tokentype | ||||
|         while not self.style.styles_token(tokentype): | ||||
|             tokentype = tokentype.parent | ||||
|         value = self.style.style_for_token(tokentype) | ||||
|         result = '' | ||||
|         if value['color']: | ||||
|             result = ' fill="#' + value['color'] + '"' | ||||
|         if value['bold']: | ||||
|             result += ' font-weight="bold"' | ||||
|         if value['italic']: | ||||
|             result += ' font-style="italic"' | ||||
|         self._stylecache[otokentype] = result | ||||
|         return result | ||||
| @ -0,0 +1,127 @@ | ||||
| """ | ||||
|     pygments.formatters.terminal | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for terminal output with ANSI sequences. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \ | ||||
|     Number, Operator, Generic, Token, Whitespace | ||||
| from pip._vendor.pygments.console import ansiformat | ||||
| from pip._vendor.pygments.util import get_choice_opt | ||||
|  | ||||
|  | ||||
| __all__ = ['TerminalFormatter'] | ||||
|  | ||||
|  | ||||
| #: Map token types to a tuple of color values for light and dark | ||||
| #: backgrounds. | ||||
| TERMINAL_COLORS = { | ||||
|     Token:              ('',            ''), | ||||
|  | ||||
|     Whitespace:         ('gray',   'brightblack'), | ||||
|     Comment:            ('gray',   'brightblack'), | ||||
|     Comment.Preproc:    ('cyan',        'brightcyan'), | ||||
|     Keyword:            ('blue',    'brightblue'), | ||||
|     Keyword.Type:       ('cyan',        'brightcyan'), | ||||
|     Operator.Word:      ('magenta',      'brightmagenta'), | ||||
|     Name.Builtin:       ('cyan',        'brightcyan'), | ||||
|     Name.Function:      ('green',   'brightgreen'), | ||||
|     Name.Namespace:     ('_cyan_',      '_brightcyan_'), | ||||
|     Name.Class:         ('_green_', '_brightgreen_'), | ||||
|     Name.Exception:     ('cyan',        'brightcyan'), | ||||
|     Name.Decorator:     ('brightblack',    'gray'), | ||||
|     Name.Variable:      ('red',     'brightred'), | ||||
|     Name.Constant:      ('red',     'brightred'), | ||||
|     Name.Attribute:     ('cyan',        'brightcyan'), | ||||
|     Name.Tag:           ('brightblue',        'brightblue'), | ||||
|     String:             ('yellow',       'yellow'), | ||||
|     Number:             ('blue',    'brightblue'), | ||||
|  | ||||
|     Generic.Deleted:    ('brightred',        'brightred'), | ||||
|     Generic.Inserted:   ('green',  'brightgreen'), | ||||
|     Generic.Heading:    ('**',         '**'), | ||||
|     Generic.Subheading: ('*magenta*',   '*brightmagenta*'), | ||||
|     Generic.Prompt:     ('**',         '**'), | ||||
|     Generic.Error:      ('brightred',        'brightred'), | ||||
|  | ||||
|     Error:              ('_brightred_',      '_brightred_'), | ||||
| } | ||||
|  | ||||
|  | ||||
| class TerminalFormatter(Formatter): | ||||
|     r""" | ||||
|     Format tokens with ANSI color sequences, for output in a text console. | ||||
|     Color sequences are terminated at newlines, so that paging the output | ||||
|     works correctly. | ||||
|  | ||||
|     The `get_style_defs()` method doesn't do anything special since there is | ||||
|     no support for common styles. | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `bg` | ||||
|         Set to ``"light"`` or ``"dark"`` depending on the terminal's background | ||||
|         (default: ``"light"``). | ||||
|  | ||||
|     `colorscheme` | ||||
|         A dictionary mapping token types to (lightbg, darkbg) color names or | ||||
|         ``None`` (default: ``None`` = use builtin colorscheme). | ||||
|  | ||||
|     `linenos` | ||||
|         Set to ``True`` to have line numbers on the terminal output as well | ||||
|         (default: ``False`` = no line numbers). | ||||
|     """ | ||||
|     name = 'Terminal' | ||||
|     aliases = ['terminal', 'console'] | ||||
|     filenames = [] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|         self.darkbg = get_choice_opt(options, 'bg', | ||||
|                                      ['light', 'dark'], 'light') == 'dark' | ||||
|         self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS | ||||
|         self.linenos = options.get('linenos', False) | ||||
|         self._lineno = 0 | ||||
|  | ||||
|     def format(self, tokensource, outfile): | ||||
|         return Formatter.format(self, tokensource, outfile) | ||||
|  | ||||
|     def _write_lineno(self, outfile): | ||||
|         self._lineno += 1 | ||||
|         outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno)) | ||||
|  | ||||
|     def _get_color(self, ttype): | ||||
|         # self.colorscheme is a dict containing usually generic types, so we | ||||
|         # have to walk the tree of dots.  The base Token type must be a key, | ||||
|         # even if it's empty string, as in the default above. | ||||
|         colors = self.colorscheme.get(ttype) | ||||
|         while colors is None: | ||||
|             ttype = ttype.parent | ||||
|             colors = self.colorscheme.get(ttype) | ||||
|         return colors[self.darkbg] | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         if self.linenos: | ||||
|             self._write_lineno(outfile) | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             color = self._get_color(ttype) | ||||
|  | ||||
|             for line in value.splitlines(True): | ||||
|                 if color: | ||||
|                     outfile.write(ansiformat(color, line.rstrip('\n'))) | ||||
|                 else: | ||||
|                     outfile.write(line.rstrip('\n')) | ||||
|                 if line.endswith('\n'): | ||||
|                     if self.linenos: | ||||
|                         self._write_lineno(outfile) | ||||
|                     else: | ||||
|                         outfile.write('\n') | ||||
|  | ||||
|         if self.linenos: | ||||
|             outfile.write("\n") | ||||
| @ -0,0 +1,338 @@ | ||||
| """ | ||||
|     pygments.formatters.terminal256 | ||||
|     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Formatter for 256-color terminal output with ANSI sequences. | ||||
|  | ||||
|     RGB-to-XTERM color conversion routines adapted from xterm256-conv | ||||
|     tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2) | ||||
|     by Wolfgang Frisch. | ||||
|  | ||||
|     Formatter version 1. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| # TODO: | ||||
| #  - Options to map style's bold/underline/italic/border attributes | ||||
| #    to some ANSI attrbutes (something like 'italic=underline') | ||||
| #  - An option to output "style RGB to xterm RGB/index" conversion table | ||||
| #  - An option to indicate that we are running in "reverse background" | ||||
| #    xterm. This means that default colors are white-on-black, not | ||||
| #    black-on-while, so colors like "white background" need to be converted | ||||
| #    to "white background, black foreground", etc... | ||||
|  | ||||
| from pip._vendor.pygments.formatter import Formatter | ||||
| from pip._vendor.pygments.console import codes | ||||
| from pip._vendor.pygments.style import ansicolors | ||||
|  | ||||
|  | ||||
| __all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter'] | ||||
|  | ||||
|  | ||||
| class EscapeSequence: | ||||
|     def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False): | ||||
|         self.fg = fg | ||||
|         self.bg = bg | ||||
|         self.bold = bold | ||||
|         self.underline = underline | ||||
|         self.italic = italic | ||||
|  | ||||
|     def escape(self, attrs): | ||||
|         if len(attrs): | ||||
|             return "\x1b[" + ";".join(attrs) + "m" | ||||
|         return "" | ||||
|  | ||||
|     def color_string(self): | ||||
|         attrs = [] | ||||
|         if self.fg is not None: | ||||
|             if self.fg in ansicolors: | ||||
|                 esc = codes[self.fg.replace('ansi','')] | ||||
|                 if ';01m' in esc: | ||||
|                     self.bold = True | ||||
|                 # extract fg color code. | ||||
|                 attrs.append(esc[2:4]) | ||||
|             else: | ||||
|                 attrs.extend(("38", "5", "%i" % self.fg)) | ||||
|         if self.bg is not None: | ||||
|             if self.bg in ansicolors: | ||||
|                 esc = codes[self.bg.replace('ansi','')] | ||||
|                 # extract fg color code, add 10 for bg. | ||||
|                 attrs.append(str(int(esc[2:4])+10)) | ||||
|             else: | ||||
|                 attrs.extend(("48", "5", "%i" % self.bg)) | ||||
|         if self.bold: | ||||
|             attrs.append("01") | ||||
|         if self.underline: | ||||
|             attrs.append("04") | ||||
|         if self.italic: | ||||
|             attrs.append("03") | ||||
|         return self.escape(attrs) | ||||
|  | ||||
|     def true_color_string(self): | ||||
|         attrs = [] | ||||
|         if self.fg: | ||||
|             attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2]))) | ||||
|         if self.bg: | ||||
|             attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2]))) | ||||
|         if self.bold: | ||||
|             attrs.append("01") | ||||
|         if self.underline: | ||||
|             attrs.append("04") | ||||
|         if self.italic: | ||||
|             attrs.append("03") | ||||
|         return self.escape(attrs) | ||||
|  | ||||
|     def reset_string(self): | ||||
|         attrs = [] | ||||
|         if self.fg is not None: | ||||
|             attrs.append("39") | ||||
|         if self.bg is not None: | ||||
|             attrs.append("49") | ||||
|         if self.bold or self.underline or self.italic: | ||||
|             attrs.append("00") | ||||
|         return self.escape(attrs) | ||||
|  | ||||
|  | ||||
| class Terminal256Formatter(Formatter): | ||||
|     """ | ||||
|     Format tokens with ANSI color sequences, for output in a 256-color | ||||
|     terminal or console.  Like in `TerminalFormatter` color sequences | ||||
|     are terminated at newlines, so that paging the output works correctly. | ||||
|  | ||||
|     The formatter takes colors from a style defined by the `style` option | ||||
|     and converts them to nearest ANSI 256-color escape sequences. Bold and | ||||
|     underline attributes from the style are preserved (and displayed). | ||||
|  | ||||
|     .. versionadded:: 0.9 | ||||
|  | ||||
|     .. versionchanged:: 2.2 | ||||
|        If the used style defines foreground colors in the form ``#ansi*``, then | ||||
|        `Terminal256Formatter` will map these to non extended foreground color. | ||||
|        See :ref:`AnsiTerminalStyle` for more information. | ||||
|  | ||||
|     .. versionchanged:: 2.4 | ||||
|        The ANSI color names have been updated with names that are easier to | ||||
|        understand and align with colornames of other projects and terminals. | ||||
|        See :ref:`this table <new-ansi-color-names>` for more information. | ||||
|  | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `style` | ||||
|         The style to use, can be a string or a Style subclass (default: | ||||
|         ``'default'``). | ||||
|  | ||||
|     `linenos` | ||||
|         Set to ``True`` to have line numbers on the terminal output as well | ||||
|         (default: ``False`` = no line numbers). | ||||
|     """ | ||||
|     name = 'Terminal256' | ||||
|     aliases = ['terminal256', 'console256', '256'] | ||||
|     filenames = [] | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         Formatter.__init__(self, **options) | ||||
|  | ||||
|         self.xterm_colors = [] | ||||
|         self.best_match = {} | ||||
|         self.style_string = {} | ||||
|  | ||||
|         self.usebold = 'nobold' not in options | ||||
|         self.useunderline = 'nounderline' not in options | ||||
|         self.useitalic = 'noitalic' not in options | ||||
|  | ||||
|         self._build_color_table()  # build an RGB-to-256 color conversion table | ||||
|         self._setup_styles()  # convert selected style's colors to term. colors | ||||
|  | ||||
|         self.linenos = options.get('linenos', False) | ||||
|         self._lineno = 0 | ||||
|  | ||||
|     def _build_color_table(self): | ||||
|         # colors 0..15: 16 basic colors | ||||
|  | ||||
|         self.xterm_colors.append((0x00, 0x00, 0x00))  # 0 | ||||
|         self.xterm_colors.append((0xcd, 0x00, 0x00))  # 1 | ||||
|         self.xterm_colors.append((0x00, 0xcd, 0x00))  # 2 | ||||
|         self.xterm_colors.append((0xcd, 0xcd, 0x00))  # 3 | ||||
|         self.xterm_colors.append((0x00, 0x00, 0xee))  # 4 | ||||
|         self.xterm_colors.append((0xcd, 0x00, 0xcd))  # 5 | ||||
|         self.xterm_colors.append((0x00, 0xcd, 0xcd))  # 6 | ||||
|         self.xterm_colors.append((0xe5, 0xe5, 0xe5))  # 7 | ||||
|         self.xterm_colors.append((0x7f, 0x7f, 0x7f))  # 8 | ||||
|         self.xterm_colors.append((0xff, 0x00, 0x00))  # 9 | ||||
|         self.xterm_colors.append((0x00, 0xff, 0x00))  # 10 | ||||
|         self.xterm_colors.append((0xff, 0xff, 0x00))  # 11 | ||||
|         self.xterm_colors.append((0x5c, 0x5c, 0xff))  # 12 | ||||
|         self.xterm_colors.append((0xff, 0x00, 0xff))  # 13 | ||||
|         self.xterm_colors.append((0x00, 0xff, 0xff))  # 14 | ||||
|         self.xterm_colors.append((0xff, 0xff, 0xff))  # 15 | ||||
|  | ||||
|         # colors 16..232: the 6x6x6 color cube | ||||
|  | ||||
|         valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff) | ||||
|  | ||||
|         for i in range(217): | ||||
|             r = valuerange[(i // 36) % 6] | ||||
|             g = valuerange[(i // 6) % 6] | ||||
|             b = valuerange[i % 6] | ||||
|             self.xterm_colors.append((r, g, b)) | ||||
|  | ||||
|         # colors 233..253: grayscale | ||||
|  | ||||
|         for i in range(1, 22): | ||||
|             v = 8 + i * 10 | ||||
|             self.xterm_colors.append((v, v, v)) | ||||
|  | ||||
|     def _closest_color(self, r, g, b): | ||||
|         distance = 257*257*3  # "infinity" (>distance from #000000 to #ffffff) | ||||
|         match = 0 | ||||
|  | ||||
|         for i in range(0, 254): | ||||
|             values = self.xterm_colors[i] | ||||
|  | ||||
|             rd = r - values[0] | ||||
|             gd = g - values[1] | ||||
|             bd = b - values[2] | ||||
|             d = rd*rd + gd*gd + bd*bd | ||||
|  | ||||
|             if d < distance: | ||||
|                 match = i | ||||
|                 distance = d | ||||
|         return match | ||||
|  | ||||
|     def _color_index(self, color): | ||||
|         index = self.best_match.get(color, None) | ||||
|         if color in ansicolors: | ||||
|             # strip the `ansi/#ansi` part and look up code | ||||
|             index = color | ||||
|             self.best_match[color] = index | ||||
|         if index is None: | ||||
|             try: | ||||
|                 rgb = int(str(color), 16) | ||||
|             except ValueError: | ||||
|                 rgb = 0 | ||||
|  | ||||
|             r = (rgb >> 16) & 0xff | ||||
|             g = (rgb >> 8) & 0xff | ||||
|             b = rgb & 0xff | ||||
|             index = self._closest_color(r, g, b) | ||||
|             self.best_match[color] = index | ||||
|         return index | ||||
|  | ||||
|     def _setup_styles(self): | ||||
|         for ttype, ndef in self.style: | ||||
|             escape = EscapeSequence() | ||||
|             # get foreground from ansicolor if set | ||||
|             if ndef['ansicolor']: | ||||
|                 escape.fg = self._color_index(ndef['ansicolor']) | ||||
|             elif ndef['color']: | ||||
|                 escape.fg = self._color_index(ndef['color']) | ||||
|             if ndef['bgansicolor']: | ||||
|                 escape.bg = self._color_index(ndef['bgansicolor']) | ||||
|             elif ndef['bgcolor']: | ||||
|                 escape.bg = self._color_index(ndef['bgcolor']) | ||||
|             if self.usebold and ndef['bold']: | ||||
|                 escape.bold = True | ||||
|             if self.useunderline and ndef['underline']: | ||||
|                 escape.underline = True | ||||
|             if self.useitalic and ndef['italic']: | ||||
|                 escape.italic = True | ||||
|             self.style_string[str(ttype)] = (escape.color_string(), | ||||
|                                              escape.reset_string()) | ||||
|  | ||||
|     def _write_lineno(self, outfile): | ||||
|         self._lineno += 1 | ||||
|         outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno)) | ||||
|  | ||||
|     def format(self, tokensource, outfile): | ||||
|         return Formatter.format(self, tokensource, outfile) | ||||
|  | ||||
|     def format_unencoded(self, tokensource, outfile): | ||||
|         if self.linenos: | ||||
|             self._write_lineno(outfile) | ||||
|  | ||||
|         for ttype, value in tokensource: | ||||
|             not_found = True | ||||
|             while ttype and not_found: | ||||
|                 try: | ||||
|                     # outfile.write( "<" + str(ttype) + ">" ) | ||||
|                     on, off = self.style_string[str(ttype)] | ||||
|  | ||||
|                     # Like TerminalFormatter, add "reset colors" escape sequence | ||||
|                     # on newline. | ||||
|                     spl = value.split('\n') | ||||
|                     for line in spl[:-1]: | ||||
|                         if line: | ||||
|                             outfile.write(on + line + off) | ||||
|                         if self.linenos: | ||||
|                             self._write_lineno(outfile) | ||||
|                         else: | ||||
|                             outfile.write('\n') | ||||
|  | ||||
|                     if spl[-1]: | ||||
|                         outfile.write(on + spl[-1] + off) | ||||
|  | ||||
|                     not_found = False | ||||
|                     # outfile.write( '#' + str(ttype) + '#' ) | ||||
|  | ||||
|                 except KeyError: | ||||
|                     # ottype = ttype | ||||
|                     ttype = ttype.parent | ||||
|                     # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' ) | ||||
|  | ||||
|             if not_found: | ||||
|                 outfile.write(value) | ||||
|  | ||||
|         if self.linenos: | ||||
|             outfile.write("\n") | ||||
|  | ||||
|  | ||||
|  | ||||
| class TerminalTrueColorFormatter(Terminal256Formatter): | ||||
|     r""" | ||||
|     Format tokens with ANSI color sequences, for output in a true-color | ||||
|     terminal or console.  Like in `TerminalFormatter` color sequences | ||||
|     are terminated at newlines, so that paging the output works correctly. | ||||
|  | ||||
|     .. versionadded:: 2.1 | ||||
|  | ||||
|     Options accepted: | ||||
|  | ||||
|     `style` | ||||
|         The style to use, can be a string or a Style subclass (default: | ||||
|         ``'default'``). | ||||
|     """ | ||||
|     name = 'TerminalTrueColor' | ||||
|     aliases = ['terminal16m', 'console16m', '16m'] | ||||
|     filenames = [] | ||||
|  | ||||
|     def _build_color_table(self): | ||||
|         pass | ||||
|  | ||||
|     def _color_tuple(self, color): | ||||
|         try: | ||||
|             rgb = int(str(color), 16) | ||||
|         except ValueError: | ||||
|             return None | ||||
|         r = (rgb >> 16) & 0xff | ||||
|         g = (rgb >> 8) & 0xff | ||||
|         b = rgb & 0xff | ||||
|         return (r, g, b) | ||||
|  | ||||
|     def _setup_styles(self): | ||||
|         for ttype, ndef in self.style: | ||||
|             escape = EscapeSequence() | ||||
|             if ndef['color']: | ||||
|                 escape.fg = self._color_tuple(ndef['color']) | ||||
|             if ndef['bgcolor']: | ||||
|                 escape.bg = self._color_tuple(ndef['bgcolor']) | ||||
|             if self.usebold and ndef['bold']: | ||||
|                 escape.bold = True | ||||
|             if self.useunderline and ndef['underline']: | ||||
|                 escape.underline = True | ||||
|             if self.useitalic and ndef['italic']: | ||||
|                 escape.italic = True | ||||
|             self.style_string[str(ttype)] = (escape.true_color_string(), | ||||
|                                              escape.reset_string()) | ||||
							
								
								
									
										882
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										882
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,882 @@ | ||||
| """ | ||||
|     pygments.lexer | ||||
|     ~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Base lexer classes. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import re | ||||
| import sys | ||||
| import time | ||||
|  | ||||
| from pip._vendor.pygments.filter import apply_filters, Filter | ||||
| from pip._vendor.pygments.filters import get_filter_by_name | ||||
| from pip._vendor.pygments.token import Error, Text, Other, _TokenType | ||||
| from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ | ||||
|     make_analysator, Future, guess_decode | ||||
| from pip._vendor.pygments.regexopt import regex_opt | ||||
|  | ||||
| __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', | ||||
|            'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', | ||||
|            'default', 'words'] | ||||
|  | ||||
|  | ||||
| _encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), | ||||
|                  (b'\xff\xfe\0\0', 'utf-32'), | ||||
|                  (b'\0\0\xfe\xff', 'utf-32be'), | ||||
|                  (b'\xff\xfe', 'utf-16'), | ||||
|                  (b'\xfe\xff', 'utf-16be')] | ||||
|  | ||||
| _default_analyse = staticmethod(lambda x: 0.0) | ||||
|  | ||||
|  | ||||
| class LexerMeta(type): | ||||
|     """ | ||||
|     This metaclass automagically converts ``analyse_text`` methods into | ||||
|     static methods which always return float values. | ||||
|     """ | ||||
|  | ||||
|     def __new__(mcs, name, bases, d): | ||||
|         if 'analyse_text' in d: | ||||
|             d['analyse_text'] = make_analysator(d['analyse_text']) | ||||
|         return type.__new__(mcs, name, bases, d) | ||||
|  | ||||
|  | ||||
| class Lexer(metaclass=LexerMeta): | ||||
|     """ | ||||
|     Lexer for a specific language. | ||||
|  | ||||
|     Basic options recognized: | ||||
|     ``stripnl`` | ||||
|         Strip leading and trailing newlines from the input (default: True). | ||||
|     ``stripall`` | ||||
|         Strip all leading and trailing whitespace from the input | ||||
|         (default: False). | ||||
|     ``ensurenl`` | ||||
|         Make sure that the input ends with a newline (default: True).  This | ||||
|         is required for some lexers that consume input linewise. | ||||
|  | ||||
|         .. versionadded:: 1.3 | ||||
|  | ||||
|     ``tabsize`` | ||||
|         If given and greater than 0, expand tabs in the input (default: 0). | ||||
|     ``encoding`` | ||||
|         If given, must be an encoding name. This encoding will be used to | ||||
|         convert the input string to Unicode, if it is not already a Unicode | ||||
|         string (default: ``'guess'``, which uses a simple UTF-8 / Locale / | ||||
|         Latin1 detection.  Can also be ``'chardet'`` to use the chardet | ||||
|         library, if it is installed. | ||||
|     ``inencoding`` | ||||
|         Overrides the ``encoding`` if given. | ||||
|     """ | ||||
|  | ||||
|     #: Name of the lexer | ||||
|     name = None | ||||
|  | ||||
|     #: URL of the language specification/definition | ||||
|     url = None | ||||
|  | ||||
|     #: Shortcuts for the lexer | ||||
|     aliases = [] | ||||
|  | ||||
|     #: File name globs | ||||
|     filenames = [] | ||||
|  | ||||
|     #: Secondary file name globs | ||||
|     alias_filenames = [] | ||||
|  | ||||
|     #: MIME types | ||||
|     mimetypes = [] | ||||
|  | ||||
|     #: Priority, should multiple lexers match and no content is provided | ||||
|     priority = 0 | ||||
|  | ||||
|     def __init__(self, **options): | ||||
|         self.options = options | ||||
|         self.stripnl = get_bool_opt(options, 'stripnl', True) | ||||
|         self.stripall = get_bool_opt(options, 'stripall', False) | ||||
|         self.ensurenl = get_bool_opt(options, 'ensurenl', True) | ||||
|         self.tabsize = get_int_opt(options, 'tabsize', 0) | ||||
|         self.encoding = options.get('encoding', 'guess') | ||||
|         self.encoding = options.get('inencoding') or self.encoding | ||||
|         self.filters = [] | ||||
|         for filter_ in get_list_opt(options, 'filters', ()): | ||||
|             self.add_filter(filter_) | ||||
|  | ||||
|     def __repr__(self): | ||||
|         if self.options: | ||||
|             return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, | ||||
|                                                      self.options) | ||||
|         else: | ||||
|             return '<pygments.lexers.%s>' % self.__class__.__name__ | ||||
|  | ||||
|     def add_filter(self, filter_, **options): | ||||
|         """ | ||||
|         Add a new stream filter to this lexer. | ||||
|         """ | ||||
|         if not isinstance(filter_, Filter): | ||||
|             filter_ = get_filter_by_name(filter_, **options) | ||||
|         self.filters.append(filter_) | ||||
|  | ||||
|     def analyse_text(text): | ||||
|         """ | ||||
|         Has to return a float between ``0`` and ``1`` that indicates | ||||
|         if a lexer wants to highlight this text. Used by ``guess_lexer``. | ||||
|         If this method returns ``0`` it won't highlight it in any case, if | ||||
|         it returns ``1`` highlighting with this lexer is guaranteed. | ||||
|  | ||||
|         The `LexerMeta` metaclass automatically wraps this function so | ||||
|         that it works like a static method (no ``self`` or ``cls`` | ||||
|         parameter) and the return value is automatically converted to | ||||
|         `float`. If the return value is an object that is boolean `False` | ||||
|         it's the same as if the return values was ``0.0``. | ||||
|         """ | ||||
|  | ||||
|     def get_tokens(self, text, unfiltered=False): | ||||
|         """ | ||||
|         Return an iterable of (tokentype, value) pairs generated from | ||||
|         `text`. If `unfiltered` is set to `True`, the filtering mechanism | ||||
|         is bypassed even if filters are defined. | ||||
|  | ||||
|         Also preprocess the text, i.e. expand tabs and strip it if | ||||
|         wanted and applies registered filters. | ||||
|         """ | ||||
|         if not isinstance(text, str): | ||||
|             if self.encoding == 'guess': | ||||
|                 text, _ = guess_decode(text) | ||||
|             elif self.encoding == 'chardet': | ||||
|                 try: | ||||
|                     from pip._vendor import chardet | ||||
|                 except ImportError as e: | ||||
|                     raise ImportError('To enable chardet encoding guessing, ' | ||||
|                                       'please install the chardet library ' | ||||
|                                       'from http://chardet.feedparser.org/') from e | ||||
|                 # check for BOM first | ||||
|                 decoded = None | ||||
|                 for bom, encoding in _encoding_map: | ||||
|                     if text.startswith(bom): | ||||
|                         decoded = text[len(bom):].decode(encoding, 'replace') | ||||
|                         break | ||||
|                 # no BOM found, so use chardet | ||||
|                 if decoded is None: | ||||
|                     enc = chardet.detect(text[:1024])  # Guess using first 1KB | ||||
|                     decoded = text.decode(enc.get('encoding') or 'utf-8', | ||||
|                                           'replace') | ||||
|                 text = decoded | ||||
|             else: | ||||
|                 text = text.decode(self.encoding) | ||||
|                 if text.startswith('\ufeff'): | ||||
|                     text = text[len('\ufeff'):] | ||||
|         else: | ||||
|             if text.startswith('\ufeff'): | ||||
|                 text = text[len('\ufeff'):] | ||||
|  | ||||
|         # text now *is* a unicode string | ||||
|         text = text.replace('\r\n', '\n') | ||||
|         text = text.replace('\r', '\n') | ||||
|         if self.stripall: | ||||
|             text = text.strip() | ||||
|         elif self.stripnl: | ||||
|             text = text.strip('\n') | ||||
|         if self.tabsize > 0: | ||||
|             text = text.expandtabs(self.tabsize) | ||||
|         if self.ensurenl and not text.endswith('\n'): | ||||
|             text += '\n' | ||||
|  | ||||
|         def streamer(): | ||||
|             for _, t, v in self.get_tokens_unprocessed(text): | ||||
|                 yield t, v | ||||
|         stream = streamer() | ||||
|         if not unfiltered: | ||||
|             stream = apply_filters(stream, self.filters, self) | ||||
|         return stream | ||||
|  | ||||
|     def get_tokens_unprocessed(self, text): | ||||
|         """ | ||||
|         Return an iterable of (index, tokentype, value) pairs where "index" | ||||
|         is the starting position of the token within the input text. | ||||
|  | ||||
|         In subclasses, implement this method as a generator to | ||||
|         maximize effectiveness. | ||||
|         """ | ||||
|         raise NotImplementedError | ||||
|  | ||||
|  | ||||
| class DelegatingLexer(Lexer): | ||||
|     """ | ||||
|     This lexer takes two lexer as arguments. A root lexer and | ||||
|     a language lexer. First everything is scanned using the language | ||||
|     lexer, afterwards all ``Other`` tokens are lexed using the root | ||||
|     lexer. | ||||
|  | ||||
|     The lexers from the ``template`` lexer package use this base lexer. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): | ||||
|         self.root_lexer = _root_lexer(**options) | ||||
|         self.language_lexer = _language_lexer(**options) | ||||
|         self.needle = _needle | ||||
|         Lexer.__init__(self, **options) | ||||
|  | ||||
|     def get_tokens_unprocessed(self, text): | ||||
|         buffered = '' | ||||
|         insertions = [] | ||||
|         lng_buffer = [] | ||||
|         for i, t, v in self.language_lexer.get_tokens_unprocessed(text): | ||||
|             if t is self.needle: | ||||
|                 if lng_buffer: | ||||
|                     insertions.append((len(buffered), lng_buffer)) | ||||
|                     lng_buffer = [] | ||||
|                 buffered += v | ||||
|             else: | ||||
|                 lng_buffer.append((i, t, v)) | ||||
|         if lng_buffer: | ||||
|             insertions.append((len(buffered), lng_buffer)) | ||||
|         return do_insertions(insertions, | ||||
|                              self.root_lexer.get_tokens_unprocessed(buffered)) | ||||
|  | ||||
|  | ||||
| # ------------------------------------------------------------------------------ | ||||
| # RegexLexer and ExtendedRegexLexer | ||||
| # | ||||
|  | ||||
|  | ||||
| class include(str):  # pylint: disable=invalid-name | ||||
|     """ | ||||
|     Indicates that a state should include rules from another state. | ||||
|     """ | ||||
|     pass | ||||
|  | ||||
|  | ||||
| class _inherit: | ||||
|     """ | ||||
|     Indicates the a state should inherit from its superclass. | ||||
|     """ | ||||
|     def __repr__(self): | ||||
|         return 'inherit' | ||||
|  | ||||
| inherit = _inherit()  # pylint: disable=invalid-name | ||||
|  | ||||
|  | ||||
| class combined(tuple):  # pylint: disable=invalid-name | ||||
|     """ | ||||
|     Indicates a state combined from multiple states. | ||||
|     """ | ||||
|  | ||||
|     def __new__(cls, *args): | ||||
|         return tuple.__new__(cls, args) | ||||
|  | ||||
|     def __init__(self, *args): | ||||
|         # tuple.__init__ doesn't do anything | ||||
|         pass | ||||
|  | ||||
|  | ||||
| class _PseudoMatch: | ||||
|     """ | ||||
|     A pseudo match object constructed from a string. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, start, text): | ||||
|         self._text = text | ||||
|         self._start = start | ||||
|  | ||||
|     def start(self, arg=None): | ||||
|         return self._start | ||||
|  | ||||
|     def end(self, arg=None): | ||||
|         return self._start + len(self._text) | ||||
|  | ||||
|     def group(self, arg=None): | ||||
|         if arg: | ||||
|             raise IndexError('No such group') | ||||
|         return self._text | ||||
|  | ||||
|     def groups(self): | ||||
|         return (self._text,) | ||||
|  | ||||
|     def groupdict(self): | ||||
|         return {} | ||||
|  | ||||
|  | ||||
| def bygroups(*args): | ||||
|     """ | ||||
|     Callback that yields multiple actions for each group in the match. | ||||
|     """ | ||||
|     def callback(lexer, match, ctx=None): | ||||
|         for i, action in enumerate(args): | ||||
|             if action is None: | ||||
|                 continue | ||||
|             elif type(action) is _TokenType: | ||||
|                 data = match.group(i + 1) | ||||
|                 if data: | ||||
|                     yield match.start(i + 1), action, data | ||||
|             else: | ||||
|                 data = match.group(i + 1) | ||||
|                 if data is not None: | ||||
|                     if ctx: | ||||
|                         ctx.pos = match.start(i + 1) | ||||
|                     for item in action(lexer, | ||||
|                                        _PseudoMatch(match.start(i + 1), data), ctx): | ||||
|                         if item: | ||||
|                             yield item | ||||
|         if ctx: | ||||
|             ctx.pos = match.end() | ||||
|     return callback | ||||
|  | ||||
|  | ||||
| class _This: | ||||
|     """ | ||||
|     Special singleton used for indicating the caller class. | ||||
|     Used by ``using``. | ||||
|     """ | ||||
|  | ||||
| this = _This() | ||||
|  | ||||
|  | ||||
| def using(_other, **kwargs): | ||||
|     """ | ||||
|     Callback that processes the match with a different lexer. | ||||
|  | ||||
|     The keyword arguments are forwarded to the lexer, except `state` which | ||||
|     is handled separately. | ||||
|  | ||||
|     `state` specifies the state that the new lexer will start in, and can | ||||
|     be an enumerable such as ('root', 'inline', 'string') or a simple | ||||
|     string which is assumed to be on top of the root state. | ||||
|  | ||||
|     Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. | ||||
|     """ | ||||
|     gt_kwargs = {} | ||||
|     if 'state' in kwargs: | ||||
|         s = kwargs.pop('state') | ||||
|         if isinstance(s, (list, tuple)): | ||||
|             gt_kwargs['stack'] = s | ||||
|         else: | ||||
|             gt_kwargs['stack'] = ('root', s) | ||||
|  | ||||
|     if _other is this: | ||||
|         def callback(lexer, match, ctx=None): | ||||
|             # if keyword arguments are given the callback | ||||
|             # function has to create a new lexer instance | ||||
|             if kwargs: | ||||
|                 # XXX: cache that somehow | ||||
|                 kwargs.update(lexer.options) | ||||
|                 lx = lexer.__class__(**kwargs) | ||||
|             else: | ||||
|                 lx = lexer | ||||
|             s = match.start() | ||||
|             for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): | ||||
|                 yield i + s, t, v | ||||
|             if ctx: | ||||
|                 ctx.pos = match.end() | ||||
|     else: | ||||
|         def callback(lexer, match, ctx=None): | ||||
|             # XXX: cache that somehow | ||||
|             kwargs.update(lexer.options) | ||||
|             lx = _other(**kwargs) | ||||
|  | ||||
|             s = match.start() | ||||
|             for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): | ||||
|                 yield i + s, t, v | ||||
|             if ctx: | ||||
|                 ctx.pos = match.end() | ||||
|     return callback | ||||
|  | ||||
|  | ||||
| class default: | ||||
|     """ | ||||
|     Indicates a state or state action (e.g. #pop) to apply. | ||||
|     For example default('#pop') is equivalent to ('', Token, '#pop') | ||||
|     Note that state tuples may be used as well. | ||||
|  | ||||
|     .. versionadded:: 2.0 | ||||
|     """ | ||||
|     def __init__(self, state): | ||||
|         self.state = state | ||||
|  | ||||
|  | ||||
| class words(Future): | ||||
|     """ | ||||
|     Indicates a list of literal words that is transformed into an optimized | ||||
|     regex that matches any of the words. | ||||
|  | ||||
|     .. versionadded:: 2.0 | ||||
|     """ | ||||
|     def __init__(self, words, prefix='', suffix=''): | ||||
|         self.words = words | ||||
|         self.prefix = prefix | ||||
|         self.suffix = suffix | ||||
|  | ||||
|     def get(self): | ||||
|         return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) | ||||
|  | ||||
|  | ||||
| class RegexLexerMeta(LexerMeta): | ||||
|     """ | ||||
|     Metaclass for RegexLexer, creates the self._tokens attribute from | ||||
|     self.tokens on the first instantiation. | ||||
|     """ | ||||
|  | ||||
|     def _process_regex(cls, regex, rflags, state): | ||||
|         """Preprocess the regular expression component of a token definition.""" | ||||
|         if isinstance(regex, Future): | ||||
|             regex = regex.get() | ||||
|         return re.compile(regex, rflags).match | ||||
|  | ||||
|     def _process_token(cls, token): | ||||
|         """Preprocess the token component of a token definition.""" | ||||
|         assert type(token) is _TokenType or callable(token), \ | ||||
|             'token type must be simple type or callable, not %r' % (token,) | ||||
|         return token | ||||
|  | ||||
|     def _process_new_state(cls, new_state, unprocessed, processed): | ||||
|         """Preprocess the state transition action of a token definition.""" | ||||
|         if isinstance(new_state, str): | ||||
|             # an existing state | ||||
|             if new_state == '#pop': | ||||
|                 return -1 | ||||
|             elif new_state in unprocessed: | ||||
|                 return (new_state,) | ||||
|             elif new_state == '#push': | ||||
|                 return new_state | ||||
|             elif new_state[:5] == '#pop:': | ||||
|                 return -int(new_state[5:]) | ||||
|             else: | ||||
|                 assert False, 'unknown new state %r' % new_state | ||||
|         elif isinstance(new_state, combined): | ||||
|             # combine a new state from existing ones | ||||
|             tmp_state = '_tmp_%d' % cls._tmpname | ||||
|             cls._tmpname += 1 | ||||
|             itokens = [] | ||||
|             for istate in new_state: | ||||
|                 assert istate != new_state, 'circular state ref %r' % istate | ||||
|                 itokens.extend(cls._process_state(unprocessed, | ||||
|                                                   processed, istate)) | ||||
|             processed[tmp_state] = itokens | ||||
|             return (tmp_state,) | ||||
|         elif isinstance(new_state, tuple): | ||||
|             # push more than one state | ||||
|             for istate in new_state: | ||||
|                 assert (istate in unprocessed or | ||||
|                         istate in ('#pop', '#push')), \ | ||||
|                     'unknown new state ' + istate | ||||
|             return new_state | ||||
|         else: | ||||
|             assert False, 'unknown new state def %r' % new_state | ||||
|  | ||||
|     def _process_state(cls, unprocessed, processed, state): | ||||
|         """Preprocess a single state definition.""" | ||||
|         assert type(state) is str, "wrong state name %r" % state | ||||
|         assert state[0] != '#', "invalid state name %r" % state | ||||
|         if state in processed: | ||||
|             return processed[state] | ||||
|         tokens = processed[state] = [] | ||||
|         rflags = cls.flags | ||||
|         for tdef in unprocessed[state]: | ||||
|             if isinstance(tdef, include): | ||||
|                 # it's a state reference | ||||
|                 assert tdef != state, "circular state reference %r" % state | ||||
|                 tokens.extend(cls._process_state(unprocessed, processed, | ||||
|                                                  str(tdef))) | ||||
|                 continue | ||||
|             if isinstance(tdef, _inherit): | ||||
|                 # should be processed already, but may not in the case of: | ||||
|                 # 1. the state has no counterpart in any parent | ||||
|                 # 2. the state includes more than one 'inherit' | ||||
|                 continue | ||||
|             if isinstance(tdef, default): | ||||
|                 new_state = cls._process_new_state(tdef.state, unprocessed, processed) | ||||
|                 tokens.append((re.compile('').match, None, new_state)) | ||||
|                 continue | ||||
|  | ||||
|             assert type(tdef) is tuple, "wrong rule def %r" % tdef | ||||
|  | ||||
|             try: | ||||
|                 rex = cls._process_regex(tdef[0], rflags, state) | ||||
|             except Exception as err: | ||||
|                 raise ValueError("uncompilable regex %r in state %r of %r: %s" % | ||||
|                                  (tdef[0], state, cls, err)) from err | ||||
|  | ||||
|             token = cls._process_token(tdef[1]) | ||||
|  | ||||
|             if len(tdef) == 2: | ||||
|                 new_state = None | ||||
|             else: | ||||
|                 new_state = cls._process_new_state(tdef[2], | ||||
|                                                    unprocessed, processed) | ||||
|  | ||||
|             tokens.append((rex, token, new_state)) | ||||
|         return tokens | ||||
|  | ||||
|     def process_tokendef(cls, name, tokendefs=None): | ||||
|         """Preprocess a dictionary of token definitions.""" | ||||
|         processed = cls._all_tokens[name] = {} | ||||
|         tokendefs = tokendefs or cls.tokens[name] | ||||
|         for state in list(tokendefs): | ||||
|             cls._process_state(tokendefs, processed, state) | ||||
|         return processed | ||||
|  | ||||
|     def get_tokendefs(cls): | ||||
|         """ | ||||
|         Merge tokens from superclasses in MRO order, returning a single tokendef | ||||
|         dictionary. | ||||
|  | ||||
|         Any state that is not defined by a subclass will be inherited | ||||
|         automatically.  States that *are* defined by subclasses will, by | ||||
|         default, override that state in the superclass.  If a subclass wishes to | ||||
|         inherit definitions from a superclass, it can use the special value | ||||
|         "inherit", which will cause the superclass' state definition to be | ||||
|         included at that point in the state. | ||||
|         """ | ||||
|         tokens = {} | ||||
|         inheritable = {} | ||||
|         for c in cls.__mro__: | ||||
|             toks = c.__dict__.get('tokens', {}) | ||||
|  | ||||
|             for state, items in toks.items(): | ||||
|                 curitems = tokens.get(state) | ||||
|                 if curitems is None: | ||||
|                     # N.b. because this is assigned by reference, sufficiently | ||||
|                     # deep hierarchies are processed incrementally (e.g. for | ||||
|                     # A(B), B(C), C(RegexLexer), B will be premodified so X(B) | ||||
|                     # will not see any inherits in B). | ||||
|                     tokens[state] = items | ||||
|                     try: | ||||
|                         inherit_ndx = items.index(inherit) | ||||
|                     except ValueError: | ||||
|                         continue | ||||
|                     inheritable[state] = inherit_ndx | ||||
|                     continue | ||||
|  | ||||
|                 inherit_ndx = inheritable.pop(state, None) | ||||
|                 if inherit_ndx is None: | ||||
|                     continue | ||||
|  | ||||
|                 # Replace the "inherit" value with the items | ||||
|                 curitems[inherit_ndx:inherit_ndx+1] = items | ||||
|                 try: | ||||
|                     # N.b. this is the index in items (that is, the superclass | ||||
|                     # copy), so offset required when storing below. | ||||
|                     new_inh_ndx = items.index(inherit) | ||||
|                 except ValueError: | ||||
|                     pass | ||||
|                 else: | ||||
|                     inheritable[state] = inherit_ndx + new_inh_ndx | ||||
|  | ||||
|         return tokens | ||||
|  | ||||
|     def __call__(cls, *args, **kwds): | ||||
|         """Instantiate cls after preprocessing its token definitions.""" | ||||
|         if '_tokens' not in cls.__dict__: | ||||
|             cls._all_tokens = {} | ||||
|             cls._tmpname = 0 | ||||
|             if hasattr(cls, 'token_variants') and cls.token_variants: | ||||
|                 # don't process yet | ||||
|                 pass | ||||
|             else: | ||||
|                 cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) | ||||
|  | ||||
|         return type.__call__(cls, *args, **kwds) | ||||
|  | ||||
|  | ||||
| class RegexLexer(Lexer, metaclass=RegexLexerMeta): | ||||
|     """ | ||||
|     Base for simple stateful regular expression-based lexers. | ||||
|     Simplifies the lexing process so that you need only | ||||
|     provide a list of states and regular expressions. | ||||
|     """ | ||||
|  | ||||
|     #: Flags for compiling the regular expressions. | ||||
|     #: Defaults to MULTILINE. | ||||
|     flags = re.MULTILINE | ||||
|  | ||||
|     #: At all time there is a stack of states. Initially, the stack contains | ||||
|     #: a single state 'root'. The top of the stack is called "the current state". | ||||
|     #: | ||||
|     #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` | ||||
|     #: | ||||
|     #: ``new_state`` can be omitted to signify no state transition. | ||||
|     #: If ``new_state`` is a string, it is pushed on the stack. This ensure | ||||
|     #: the new current state is ``new_state``. | ||||
|     #: If ``new_state`` is a tuple of strings, all of those strings are pushed | ||||
|     #: on the stack and the current state will be the last element of the list. | ||||
|     #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` | ||||
|     #: to signify a new, anonymous state combined from the rules of two | ||||
|     #: or more existing ones. | ||||
|     #: Furthermore, it can be '#pop' to signify going back one step in | ||||
|     #: the state stack, or '#push' to push the current state on the stack | ||||
|     #: again. Note that if you push while in a combined state, the combined | ||||
|     #: state itself is pushed, and not only the state in which the rule is | ||||
|     #: defined. | ||||
|     #: | ||||
|     #: The tuple can also be replaced with ``include('state')``, in which | ||||
|     #: case the rules from the state named by the string are included in the | ||||
|     #: current one. | ||||
|     tokens = {} | ||||
|  | ||||
|     def get_tokens_unprocessed(self, text, stack=('root',)): | ||||
|         """ | ||||
|         Split ``text`` into (tokentype, text) pairs. | ||||
|  | ||||
|         ``stack`` is the initial stack (default: ``['root']``) | ||||
|         """ | ||||
|         pos = 0 | ||||
|         tokendefs = self._tokens | ||||
|         statestack = list(stack) | ||||
|         statetokens = tokendefs[statestack[-1]] | ||||
|         while 1: | ||||
|             for rexmatch, action, new_state in statetokens: | ||||
|                 m = rexmatch(text, pos) | ||||
|                 if m: | ||||
|                     if action is not None: | ||||
|                         if type(action) is _TokenType: | ||||
|                             yield pos, action, m.group() | ||||
|                         else: | ||||
|                             yield from action(self, m) | ||||
|                     pos = m.end() | ||||
|                     if new_state is not None: | ||||
|                         # state transition | ||||
|                         if isinstance(new_state, tuple): | ||||
|                             for state in new_state: | ||||
|                                 if state == '#pop': | ||||
|                                     if len(statestack) > 1: | ||||
|                                         statestack.pop() | ||||
|                                 elif state == '#push': | ||||
|                                     statestack.append(statestack[-1]) | ||||
|                                 else: | ||||
|                                     statestack.append(state) | ||||
|                         elif isinstance(new_state, int): | ||||
|                             # pop, but keep at least one state on the stack | ||||
|                             # (random code leading to unexpected pops should | ||||
|                             # not allow exceptions) | ||||
|                             if abs(new_state) >= len(statestack): | ||||
|                                 del statestack[1:] | ||||
|                             else: | ||||
|                                 del statestack[new_state:] | ||||
|                         elif new_state == '#push': | ||||
|                             statestack.append(statestack[-1]) | ||||
|                         else: | ||||
|                             assert False, "wrong state def: %r" % new_state | ||||
|                         statetokens = tokendefs[statestack[-1]] | ||||
|                     break | ||||
|             else: | ||||
|                 # We are here only if all state tokens have been considered | ||||
|                 # and there was not a match on any of them. | ||||
|                 try: | ||||
|                     if text[pos] == '\n': | ||||
|                         # at EOL, reset state to "root" | ||||
|                         statestack = ['root'] | ||||
|                         statetokens = tokendefs['root'] | ||||
|                         yield pos, Text, '\n' | ||||
|                         pos += 1 | ||||
|                         continue | ||||
|                     yield pos, Error, text[pos] | ||||
|                     pos += 1 | ||||
|                 except IndexError: | ||||
|                     break | ||||
|  | ||||
|  | ||||
| class LexerContext: | ||||
|     """ | ||||
|     A helper object that holds lexer position data. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, text, pos, stack=None, end=None): | ||||
|         self.text = text | ||||
|         self.pos = pos | ||||
|         self.end = end or len(text)  # end=0 not supported ;-) | ||||
|         self.stack = stack or ['root'] | ||||
|  | ||||
|     def __repr__(self): | ||||
|         return 'LexerContext(%r, %r, %r)' % ( | ||||
|             self.text, self.pos, self.stack) | ||||
|  | ||||
|  | ||||
| class ExtendedRegexLexer(RegexLexer): | ||||
|     """ | ||||
|     A RegexLexer that uses a context object to store its state. | ||||
|     """ | ||||
|  | ||||
|     def get_tokens_unprocessed(self, text=None, context=None): | ||||
|         """ | ||||
|         Split ``text`` into (tokentype, text) pairs. | ||||
|         If ``context`` is given, use this lexer context instead. | ||||
|         """ | ||||
|         tokendefs = self._tokens | ||||
|         if not context: | ||||
|             ctx = LexerContext(text, 0) | ||||
|             statetokens = tokendefs['root'] | ||||
|         else: | ||||
|             ctx = context | ||||
|             statetokens = tokendefs[ctx.stack[-1]] | ||||
|             text = ctx.text | ||||
|         while 1: | ||||
|             for rexmatch, action, new_state in statetokens: | ||||
|                 m = rexmatch(text, ctx.pos, ctx.end) | ||||
|                 if m: | ||||
|                     if action is not None: | ||||
|                         if type(action) is _TokenType: | ||||
|                             yield ctx.pos, action, m.group() | ||||
|                             ctx.pos = m.end() | ||||
|                         else: | ||||
|                             yield from action(self, m, ctx) | ||||
|                             if not new_state: | ||||
|                                 # altered the state stack? | ||||
|                                 statetokens = tokendefs[ctx.stack[-1]] | ||||
|                     # CAUTION: callback must set ctx.pos! | ||||
|                     if new_state is not None: | ||||
|                         # state transition | ||||
|                         if isinstance(new_state, tuple): | ||||
|                             for state in new_state: | ||||
|                                 if state == '#pop': | ||||
|                                     if len(ctx.stack) > 1: | ||||
|                                         ctx.stack.pop() | ||||
|                                 elif state == '#push': | ||||
|                                     ctx.stack.append(ctx.stack[-1]) | ||||
|                                 else: | ||||
|                                     ctx.stack.append(state) | ||||
|                         elif isinstance(new_state, int): | ||||
|                             # see RegexLexer for why this check is made | ||||
|                             if abs(new_state) >= len(ctx.stack): | ||||
|                                 del ctx.stack[1:] | ||||
|                             else: | ||||
|                                 del ctx.stack[new_state:] | ||||
|                         elif new_state == '#push': | ||||
|                             ctx.stack.append(ctx.stack[-1]) | ||||
|                         else: | ||||
|                             assert False, "wrong state def: %r" % new_state | ||||
|                         statetokens = tokendefs[ctx.stack[-1]] | ||||
|                     break | ||||
|             else: | ||||
|                 try: | ||||
|                     if ctx.pos >= ctx.end: | ||||
|                         break | ||||
|                     if text[ctx.pos] == '\n': | ||||
|                         # at EOL, reset state to "root" | ||||
|                         ctx.stack = ['root'] | ||||
|                         statetokens = tokendefs['root'] | ||||
|                         yield ctx.pos, Text, '\n' | ||||
|                         ctx.pos += 1 | ||||
|                         continue | ||||
|                     yield ctx.pos, Error, text[ctx.pos] | ||||
|                     ctx.pos += 1 | ||||
|                 except IndexError: | ||||
|                     break | ||||
|  | ||||
|  | ||||
| def do_insertions(insertions, tokens): | ||||
|     """ | ||||
|     Helper for lexers which must combine the results of several | ||||
|     sublexers. | ||||
|  | ||||
|     ``insertions`` is a list of ``(index, itokens)`` pairs. | ||||
|     Each ``itokens`` iterable should be inserted at position | ||||
|     ``index`` into the token stream given by the ``tokens`` | ||||
|     argument. | ||||
|  | ||||
|     The result is a combined token stream. | ||||
|  | ||||
|     TODO: clean up the code here. | ||||
|     """ | ||||
|     insertions = iter(insertions) | ||||
|     try: | ||||
|         index, itokens = next(insertions) | ||||
|     except StopIteration: | ||||
|         # no insertions | ||||
|         yield from tokens | ||||
|         return | ||||
|  | ||||
|     realpos = None | ||||
|     insleft = True | ||||
|  | ||||
|     # iterate over the token stream where we want to insert | ||||
|     # the tokens from the insertion list. | ||||
|     for i, t, v in tokens: | ||||
|         # first iteration. store the position of first item | ||||
|         if realpos is None: | ||||
|             realpos = i | ||||
|         oldi = 0 | ||||
|         while insleft and i + len(v) >= index: | ||||
|             tmpval = v[oldi:index - i] | ||||
|             if tmpval: | ||||
|                 yield realpos, t, tmpval | ||||
|                 realpos += len(tmpval) | ||||
|             for it_index, it_token, it_value in itokens: | ||||
|                 yield realpos, it_token, it_value | ||||
|                 realpos += len(it_value) | ||||
|             oldi = index - i | ||||
|             try: | ||||
|                 index, itokens = next(insertions) | ||||
|             except StopIteration: | ||||
|                 insleft = False | ||||
|                 break  # not strictly necessary | ||||
|         if oldi < len(v): | ||||
|             yield realpos, t, v[oldi:] | ||||
|             realpos += len(v) - oldi | ||||
|  | ||||
|     # leftover tokens | ||||
|     while insleft: | ||||
|         # no normal tokens, set realpos to zero | ||||
|         realpos = realpos or 0 | ||||
|         for p, t, v in itokens: | ||||
|             yield realpos, t, v | ||||
|             realpos += len(v) | ||||
|         try: | ||||
|             index, itokens = next(insertions) | ||||
|         except StopIteration: | ||||
|             insleft = False | ||||
|             break  # not strictly necessary | ||||
|  | ||||
|  | ||||
| class ProfilingRegexLexerMeta(RegexLexerMeta): | ||||
|     """Metaclass for ProfilingRegexLexer, collects regex timing info.""" | ||||
|  | ||||
|     def _process_regex(cls, regex, rflags, state): | ||||
|         if isinstance(regex, words): | ||||
|             rex = regex_opt(regex.words, prefix=regex.prefix, | ||||
|                             suffix=regex.suffix) | ||||
|         else: | ||||
|             rex = regex | ||||
|         compiled = re.compile(rex, rflags) | ||||
|  | ||||
|         def match_func(text, pos, endpos=sys.maxsize): | ||||
|             info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) | ||||
|             t0 = time.time() | ||||
|             res = compiled.match(text, pos, endpos) | ||||
|             t1 = time.time() | ||||
|             info[0] += 1 | ||||
|             info[1] += t1 - t0 | ||||
|             return res | ||||
|         return match_func | ||||
|  | ||||
|  | ||||
| class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): | ||||
|     """Drop-in replacement for RegexLexer that does profiling of its regexes.""" | ||||
|  | ||||
|     _prof_data = [] | ||||
|     _prof_sort_index = 4  # defaults to time per call | ||||
|  | ||||
|     def get_tokens_unprocessed(self, text, stack=('root',)): | ||||
|         # this needs to be a stack, since using(this) will produce nested calls | ||||
|         self.__class__._prof_data.append({}) | ||||
|         yield from RegexLexer.get_tokens_unprocessed(self, text, stack) | ||||
|         rawdata = self.__class__._prof_data.pop() | ||||
|         data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], | ||||
|                         n, 1000 * t, 1000 * t / n) | ||||
|                        for ((s, r), (n, t)) in rawdata.items()), | ||||
|                       key=lambda x: x[self._prof_sort_index], | ||||
|                       reverse=True) | ||||
|         sum_total = sum(x[3] for x in data) | ||||
|  | ||||
|         print() | ||||
|         print('Profiling result for %s lexing %d chars in %.3f ms' % | ||||
|               (self.__class__.__name__, len(text), sum_total)) | ||||
|         print('=' * 110) | ||||
|         print('%-20s %-64s ncalls  tottime  percall' % ('state', 'regex')) | ||||
|         print('-' * 110) | ||||
|         for d in data: | ||||
|             print('%-20s %-65s %5d %8.4f %8.4f' % d) | ||||
|         print('=' * 110) | ||||
| @ -0,0 +1,335 @@ | ||||
| """ | ||||
|     pygments.lexers | ||||
|     ~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Pygments lexers. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import re | ||||
| import sys | ||||
| import types | ||||
| from fnmatch import fnmatch | ||||
| from os.path import basename | ||||
|  | ||||
| from pip._vendor.pygments.lexers._mapping import LEXERS | ||||
| from pip._vendor.pygments.modeline import get_filetype_from_buffer | ||||
| from pip._vendor.pygments.plugin import find_plugin_lexers | ||||
| from pip._vendor.pygments.util import ClassNotFound, guess_decode | ||||
|  | ||||
| COMPAT = { | ||||
|     'Python3Lexer': 'PythonLexer', | ||||
|     'Python3TracebackLexer': 'PythonTracebackLexer', | ||||
| } | ||||
|  | ||||
| __all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class', | ||||
|            'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT) | ||||
|  | ||||
| _lexer_cache = {} | ||||
|  | ||||
| def _load_lexers(module_name): | ||||
|     """Load a lexer (and all others in the module too).""" | ||||
|     mod = __import__(module_name, None, None, ['__all__']) | ||||
|     for lexer_name in mod.__all__: | ||||
|         cls = getattr(mod, lexer_name) | ||||
|         _lexer_cache[cls.name] = cls | ||||
|  | ||||
|  | ||||
| def get_all_lexers(plugins=True): | ||||
|     """Return a generator of tuples in the form ``(name, aliases, | ||||
|     filenames, mimetypes)`` of all know lexers. | ||||
|  | ||||
|     If *plugins* is true (the default), plugin lexers supplied by entrypoints | ||||
|     are also returned.  Otherwise, only builtin ones are considered. | ||||
|     """ | ||||
|     for item in LEXERS.values(): | ||||
|         yield item[1:] | ||||
|     if plugins: | ||||
|         for lexer in find_plugin_lexers(): | ||||
|             yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes | ||||
|  | ||||
|  | ||||
| def find_lexer_class(name): | ||||
|     """Lookup a lexer class by name. | ||||
|  | ||||
|     Return None if not found. | ||||
|     """ | ||||
|     if name in _lexer_cache: | ||||
|         return _lexer_cache[name] | ||||
|     # lookup builtin lexers | ||||
|     for module_name, lname, aliases, _, _ in LEXERS.values(): | ||||
|         if name == lname: | ||||
|             _load_lexers(module_name) | ||||
|             return _lexer_cache[name] | ||||
|     # continue with lexers from setuptools entrypoints | ||||
|     for cls in find_plugin_lexers(): | ||||
|         if cls.name == name: | ||||
|             return cls | ||||
|  | ||||
|  | ||||
| def find_lexer_class_by_name(_alias): | ||||
|     """Lookup a lexer class by alias. | ||||
|  | ||||
|     Like `get_lexer_by_name`, but does not instantiate the class. | ||||
|  | ||||
|     .. versionadded:: 2.2 | ||||
|     """ | ||||
|     if not _alias: | ||||
|         raise ClassNotFound('no lexer for alias %r found' % _alias) | ||||
|     # lookup builtin lexers | ||||
|     for module_name, name, aliases, _, _ in LEXERS.values(): | ||||
|         if _alias.lower() in aliases: | ||||
|             if name not in _lexer_cache: | ||||
|                 _load_lexers(module_name) | ||||
|             return _lexer_cache[name] | ||||
|     # continue with lexers from setuptools entrypoints | ||||
|     for cls in find_plugin_lexers(): | ||||
|         if _alias.lower() in cls.aliases: | ||||
|             return cls | ||||
|     raise ClassNotFound('no lexer for alias %r found' % _alias) | ||||
|  | ||||
|  | ||||
| def get_lexer_by_name(_alias, **options): | ||||
|     """Get a lexer by an alias. | ||||
|  | ||||
|     Raises ClassNotFound if not found. | ||||
|     """ | ||||
|     if not _alias: | ||||
|         raise ClassNotFound('no lexer for alias %r found' % _alias) | ||||
|  | ||||
|     # lookup builtin lexers | ||||
|     for module_name, name, aliases, _, _ in LEXERS.values(): | ||||
|         if _alias.lower() in aliases: | ||||
|             if name not in _lexer_cache: | ||||
|                 _load_lexers(module_name) | ||||
|             return _lexer_cache[name](**options) | ||||
|     # continue with lexers from setuptools entrypoints | ||||
|     for cls in find_plugin_lexers(): | ||||
|         if _alias.lower() in cls.aliases: | ||||
|             return cls(**options) | ||||
|     raise ClassNotFound('no lexer for alias %r found' % _alias) | ||||
|  | ||||
|  | ||||
| def load_lexer_from_file(filename, lexername="CustomLexer", **options): | ||||
|     """Load a lexer from a file. | ||||
|  | ||||
|     This method expects a file located relative to the current working | ||||
|     directory, which contains a Lexer class. By default, it expects the | ||||
|     Lexer to be name CustomLexer; you can specify your own class name | ||||
|     as the second argument to this function. | ||||
|  | ||||
|     Users should be very careful with the input, because this method | ||||
|     is equivalent to running eval on the input file. | ||||
|  | ||||
|     Raises ClassNotFound if there are any problems importing the Lexer. | ||||
|  | ||||
|     .. versionadded:: 2.2 | ||||
|     """ | ||||
|     try: | ||||
|         # This empty dict will contain the namespace for the exec'd file | ||||
|         custom_namespace = {} | ||||
|         with open(filename, 'rb') as f: | ||||
|             exec(f.read(), custom_namespace) | ||||
|         # Retrieve the class `lexername` from that namespace | ||||
|         if lexername not in custom_namespace: | ||||
|             raise ClassNotFound('no valid %s class found in %s' % | ||||
|                                 (lexername, filename)) | ||||
|         lexer_class = custom_namespace[lexername] | ||||
|         # And finally instantiate it with the options | ||||
|         return lexer_class(**options) | ||||
|     except OSError as err: | ||||
|         raise ClassNotFound('cannot read %s: %s' % (filename, err)) | ||||
|     except ClassNotFound: | ||||
|         raise | ||||
|     except Exception as err: | ||||
|         raise ClassNotFound('error when loading custom lexer: %s' % err) | ||||
|  | ||||
|  | ||||
| def find_lexer_class_for_filename(_fn, code=None): | ||||
|     """Get a lexer for a filename. | ||||
|  | ||||
|     If multiple lexers match the filename pattern, use ``analyse_text()`` to | ||||
|     figure out which one is more appropriate. | ||||
|  | ||||
|     Returns None if not found. | ||||
|     """ | ||||
|     matches = [] | ||||
|     fn = basename(_fn) | ||||
|     for modname, name, _, filenames, _ in LEXERS.values(): | ||||
|         for filename in filenames: | ||||
|             if fnmatch(fn, filename): | ||||
|                 if name not in _lexer_cache: | ||||
|                     _load_lexers(modname) | ||||
|                 matches.append((_lexer_cache[name], filename)) | ||||
|     for cls in find_plugin_lexers(): | ||||
|         for filename in cls.filenames: | ||||
|             if fnmatch(fn, filename): | ||||
|                 matches.append((cls, filename)) | ||||
|  | ||||
|     if isinstance(code, bytes): | ||||
|         # decode it, since all analyse_text functions expect unicode | ||||
|         code = guess_decode(code) | ||||
|  | ||||
|     def get_rating(info): | ||||
|         cls, filename = info | ||||
|         # explicit patterns get a bonus | ||||
|         bonus = '*' not in filename and 0.5 or 0 | ||||
|         # The class _always_ defines analyse_text because it's included in | ||||
|         # the Lexer class.  The default implementation returns None which | ||||
|         # gets turned into 0.0.  Run scripts/detect_missing_analyse_text.py | ||||
|         # to find lexers which need it overridden. | ||||
|         if code: | ||||
|             return cls.analyse_text(code) + bonus, cls.__name__ | ||||
|         return cls.priority + bonus, cls.__name__ | ||||
|  | ||||
|     if matches: | ||||
|         matches.sort(key=get_rating) | ||||
|         # print "Possible lexers, after sort:", matches | ||||
|         return matches[-1][0] | ||||
|  | ||||
|  | ||||
| def get_lexer_for_filename(_fn, code=None, **options): | ||||
|     """Get a lexer for a filename. | ||||
|  | ||||
|     If multiple lexers match the filename pattern, use ``analyse_text()`` to | ||||
|     figure out which one is more appropriate. | ||||
|  | ||||
|     Raises ClassNotFound if not found. | ||||
|     """ | ||||
|     res = find_lexer_class_for_filename(_fn, code) | ||||
|     if not res: | ||||
|         raise ClassNotFound('no lexer for filename %r found' % _fn) | ||||
|     return res(**options) | ||||
|  | ||||
|  | ||||
| def get_lexer_for_mimetype(_mime, **options): | ||||
|     """Get a lexer for a mimetype. | ||||
|  | ||||
|     Raises ClassNotFound if not found. | ||||
|     """ | ||||
|     for modname, name, _, _, mimetypes in LEXERS.values(): | ||||
|         if _mime in mimetypes: | ||||
|             if name not in _lexer_cache: | ||||
|                 _load_lexers(modname) | ||||
|             return _lexer_cache[name](**options) | ||||
|     for cls in find_plugin_lexers(): | ||||
|         if _mime in cls.mimetypes: | ||||
|             return cls(**options) | ||||
|     raise ClassNotFound('no lexer for mimetype %r found' % _mime) | ||||
|  | ||||
|  | ||||
| def _iter_lexerclasses(plugins=True): | ||||
|     """Return an iterator over all lexer classes.""" | ||||
|     for key in sorted(LEXERS): | ||||
|         module_name, name = LEXERS[key][:2] | ||||
|         if name not in _lexer_cache: | ||||
|             _load_lexers(module_name) | ||||
|         yield _lexer_cache[name] | ||||
|     if plugins: | ||||
|         yield from find_plugin_lexers() | ||||
|  | ||||
|  | ||||
| def guess_lexer_for_filename(_fn, _text, **options): | ||||
|     """ | ||||
|     Lookup all lexers that handle those filenames primary (``filenames``) | ||||
|     or secondary (``alias_filenames``). Then run a text analysis for those | ||||
|     lexers and choose the best result. | ||||
|  | ||||
|     usage:: | ||||
|  | ||||
|         >>> from pygments.lexers import guess_lexer_for_filename | ||||
|         >>> guess_lexer_for_filename('hello.html', '<%= @foo %>') | ||||
|         <pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c> | ||||
|         >>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>') | ||||
|         <pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac> | ||||
|         >>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }') | ||||
|         <pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c> | ||||
|     """ | ||||
|     fn = basename(_fn) | ||||
|     primary = {} | ||||
|     matching_lexers = set() | ||||
|     for lexer in _iter_lexerclasses(): | ||||
|         for filename in lexer.filenames: | ||||
|             if fnmatch(fn, filename): | ||||
|                 matching_lexers.add(lexer) | ||||
|                 primary[lexer] = True | ||||
|         for filename in lexer.alias_filenames: | ||||
|             if fnmatch(fn, filename): | ||||
|                 matching_lexers.add(lexer) | ||||
|                 primary[lexer] = False | ||||
|     if not matching_lexers: | ||||
|         raise ClassNotFound('no lexer for filename %r found' % fn) | ||||
|     if len(matching_lexers) == 1: | ||||
|         return matching_lexers.pop()(**options) | ||||
|     result = [] | ||||
|     for lexer in matching_lexers: | ||||
|         rv = lexer.analyse_text(_text) | ||||
|         if rv == 1.0: | ||||
|             return lexer(**options) | ||||
|         result.append((rv, lexer)) | ||||
|  | ||||
|     def type_sort(t): | ||||
|         # sort by: | ||||
|         # - analyse score | ||||
|         # - is primary filename pattern? | ||||
|         # - priority | ||||
|         # - last resort: class name | ||||
|         return (t[0], primary[t[1]], t[1].priority, t[1].__name__) | ||||
|     result.sort(key=type_sort) | ||||
|  | ||||
|     return result[-1][1](**options) | ||||
|  | ||||
|  | ||||
| def guess_lexer(_text, **options): | ||||
|     """Guess a lexer by strong distinctions in the text (eg, shebang).""" | ||||
|  | ||||
|     if not isinstance(_text, str): | ||||
|         inencoding = options.get('inencoding', options.get('encoding')) | ||||
|         if inencoding: | ||||
|             _text = _text.decode(inencoding or 'utf8') | ||||
|         else: | ||||
|             _text, _ = guess_decode(_text) | ||||
|  | ||||
|     # try to get a vim modeline first | ||||
|     ft = get_filetype_from_buffer(_text) | ||||
|  | ||||
|     if ft is not None: | ||||
|         try: | ||||
|             return get_lexer_by_name(ft, **options) | ||||
|         except ClassNotFound: | ||||
|             pass | ||||
|  | ||||
|     best_lexer = [0.0, None] | ||||
|     for lexer in _iter_lexerclasses(): | ||||
|         rv = lexer.analyse_text(_text) | ||||
|         if rv == 1.0: | ||||
|             return lexer(**options) | ||||
|         if rv > best_lexer[0]: | ||||
|             best_lexer[:] = (rv, lexer) | ||||
|     if not best_lexer[0] or best_lexer[1] is None: | ||||
|         raise ClassNotFound('no lexer matching the text found') | ||||
|     return best_lexer[1](**options) | ||||
|  | ||||
|  | ||||
| class _automodule(types.ModuleType): | ||||
|     """Automatically import lexers.""" | ||||
|  | ||||
|     def __getattr__(self, name): | ||||
|         info = LEXERS.get(name) | ||||
|         if info: | ||||
|             _load_lexers(info[0]) | ||||
|             cls = _lexer_cache[info[1]] | ||||
|             setattr(self, name, cls) | ||||
|             return cls | ||||
|         if name in COMPAT: | ||||
|             return getattr(self, COMPAT[name]) | ||||
|         raise AttributeError(name) | ||||
|  | ||||
|  | ||||
| oldmod = sys.modules[__name__] | ||||
| newmod = _automodule(__name__) | ||||
| newmod.__dict__.update(oldmod.__dict__) | ||||
| sys.modules[__name__] = newmod | ||||
| del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types | ||||
| @ -0,0 +1,541 @@ | ||||
| # Automatically generated by scripts/gen_mapfiles.py. | ||||
| # DO NOT EDIT BY HAND; run `make mapfiles` instead. | ||||
|  | ||||
| LEXERS = { | ||||
|     'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)), | ||||
|     'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()), | ||||
|     'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()), | ||||
|     'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)), | ||||
|     'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')), | ||||
|     'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')), | ||||
|     'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), | ||||
|     'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()), | ||||
|     'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)), | ||||
|     'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()), | ||||
|     'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)), | ||||
|     'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)), | ||||
|     'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()), | ||||
|     'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()), | ||||
|     'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()), | ||||
|     'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()), | ||||
|     'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()), | ||||
|     'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()), | ||||
|     'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()), | ||||
|     'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()), | ||||
|     'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()), | ||||
|     'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()), | ||||
|     'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()), | ||||
|     'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()), | ||||
|     'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)), | ||||
|     'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()), | ||||
|     'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)), | ||||
|     'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()), | ||||
|     'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')), | ||||
|     'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), | ||||
|     'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)), | ||||
|     'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()), | ||||
|     'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), | ||||
|     'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), | ||||
|     'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), | ||||
|     'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()), | ||||
|     'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)), | ||||
|     'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()), | ||||
|     'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()), | ||||
|     'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()), | ||||
|     'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()), | ||||
|     'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')), | ||||
|     'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')), | ||||
|     'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)), | ||||
|     'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)), | ||||
|     'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)), | ||||
|     'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')), | ||||
|     'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)), | ||||
|     'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), | ||||
|     'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), | ||||
|     'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)), | ||||
|     'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()), | ||||
|     'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)), | ||||
|     'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()), | ||||
|     'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)), | ||||
|     'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()), | ||||
|     'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()), | ||||
|     'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')), | ||||
|     'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)), | ||||
|     'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)), | ||||
|     'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()), | ||||
|     'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()), | ||||
|     'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), | ||||
|     'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)), | ||||
|     'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()), | ||||
|     'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()), | ||||
|     'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()), | ||||
|     'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()), | ||||
|     'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()), | ||||
|     'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)), | ||||
|     'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)), | ||||
|     'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()), | ||||
|     'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')), | ||||
|     'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()), | ||||
|     'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()), | ||||
|     'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')), | ||||
|     'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')), | ||||
|     'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')), | ||||
|     'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')), | ||||
|     'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)), | ||||
|     'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)), | ||||
|     'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()), | ||||
|     'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')), | ||||
|     'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')), | ||||
|     'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()), | ||||
|     'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)), | ||||
|     'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)), | ||||
|     'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()), | ||||
|     'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)), | ||||
|     'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()), | ||||
|     'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()), | ||||
|     'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)), | ||||
|     'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)), | ||||
|     'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)), | ||||
|     'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)), | ||||
|     'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')), | ||||
|     'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)), | ||||
|     'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()), | ||||
|     'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)), | ||||
|     'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)), | ||||
|     'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)), | ||||
|     'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()), | ||||
|     'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()), | ||||
|     'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()), | ||||
|     'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')), | ||||
|     'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)), | ||||
|     'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)), | ||||
|     'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)), | ||||
|     'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)), | ||||
|     'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)), | ||||
|     'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)), | ||||
|     'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()), | ||||
|     'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')), | ||||
|     'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)), | ||||
|     'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), | ||||
|     'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), | ||||
|     'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), | ||||
|     'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)), | ||||
|     'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()), | ||||
|     'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)), | ||||
|     'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)), | ||||
|     'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), | ||||
|     'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')), | ||||
|     'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')), | ||||
|     'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)), | ||||
|     'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)), | ||||
|     'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')), | ||||
|     'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)), | ||||
|     'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)), | ||||
|     'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)), | ||||
|     'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)), | ||||
|     'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')), | ||||
|     'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)), | ||||
|     'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)), | ||||
|     'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)), | ||||
|     'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)), | ||||
|     'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)), | ||||
|     'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)), | ||||
|     'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)), | ||||
|     'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)), | ||||
|     'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')), | ||||
|     'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)), | ||||
|     'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)), | ||||
|     'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)), | ||||
|     'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)), | ||||
|     'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)), | ||||
|     'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)), | ||||
|     'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)), | ||||
|     'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()), | ||||
|     'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)), | ||||
|     'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)), | ||||
|     'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)), | ||||
|     'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)), | ||||
|     'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)), | ||||
|     'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)), | ||||
|     'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)), | ||||
|     'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()), | ||||
|     'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)), | ||||
|     'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)), | ||||
|     'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()), | ||||
|     'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)), | ||||
|     'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()), | ||||
|     'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)), | ||||
|     'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()), | ||||
|     'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)), | ||||
|     'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)), | ||||
|     'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()), | ||||
|     'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')), | ||||
|     'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)), | ||||
|     'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()), | ||||
|     'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)), | ||||
|     'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()), | ||||
|     'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')), | ||||
|     'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')), | ||||
|     'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')), | ||||
|     'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)), | ||||
|     'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)), | ||||
|     'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)), | ||||
|     'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()), | ||||
|     'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)), | ||||
|     'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)), | ||||
|     'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)), | ||||
|     'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')), | ||||
|     'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')), | ||||
|     'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)), | ||||
|     'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)), | ||||
|     'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()), | ||||
|     'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)), | ||||
|     'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')), | ||||
|     'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()), | ||||
|     'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)), | ||||
|     'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')), | ||||
|     'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()), | ||||
|     'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)), | ||||
|     'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()), | ||||
|     'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')), | ||||
|     'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)), | ||||
|     'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')), | ||||
|     'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')), | ||||
|     'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)), | ||||
|     'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()), | ||||
|     'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()), | ||||
|     'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')), | ||||
|     'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')), | ||||
|     'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)), | ||||
|     'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()), | ||||
|     'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)), | ||||
|     'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)), | ||||
|     'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()), | ||||
|     'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()), | ||||
|     'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()), | ||||
|     'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig', '*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ('text/x-ini', 'text/inf')), | ||||
|     'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)), | ||||
|     'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)), | ||||
|     'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)), | ||||
|     'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)), | ||||
|     'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)), | ||||
|     'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()), | ||||
|     'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)), | ||||
|     'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()), | ||||
|     'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()), | ||||
|     'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)), | ||||
|     'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')), | ||||
|     'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')), | ||||
|     'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')), | ||||
|     'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')), | ||||
|     'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')), | ||||
|     'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')), | ||||
|     'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()), | ||||
|     'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)), | ||||
|     'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')), | ||||
|     'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()), | ||||
|     'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)), | ||||
|     'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')), | ||||
|     'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)), | ||||
|     'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()), | ||||
|     'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')), | ||||
|     'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')), | ||||
|     'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()), | ||||
|     'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')), | ||||
|     'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)), | ||||
|     'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()), | ||||
|     'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)), | ||||
|     'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)), | ||||
|     'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()), | ||||
|     'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)), | ||||
|     'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)), | ||||
|     'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')), | ||||
|     'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')), | ||||
|     'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)), | ||||
|     'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)), | ||||
|     'LeanLexer': ('pip._vendor.pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)), | ||||
|     'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)), | ||||
|     'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)), | ||||
|     'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()), | ||||
|     'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)), | ||||
|     'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()), | ||||
|     'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)), | ||||
|     'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)), | ||||
|     'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)), | ||||
|     'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)), | ||||
|     'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)), | ||||
|     'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)), | ||||
|     'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()), | ||||
|     'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()), | ||||
|     'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)), | ||||
|     'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)), | ||||
|     'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')), | ||||
|     'MCFunctionLexer': ('pip._vendor.pygments.lexers.mcfunction', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)), | ||||
|     'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')), | ||||
|     'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)), | ||||
|     'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()), | ||||
|     'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()), | ||||
|     'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)), | ||||
|     'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)), | ||||
|     'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)), | ||||
|     'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')), | ||||
|     'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)), | ||||
|     'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)), | ||||
|     'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')), | ||||
|     'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)), | ||||
|     'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)), | ||||
|     'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)), | ||||
|     'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')), | ||||
|     'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)), | ||||
|     'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()), | ||||
|     'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()), | ||||
|     'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)), | ||||
|     'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)), | ||||
|     'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')), | ||||
|     'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)), | ||||
|     'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)), | ||||
|     'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)), | ||||
|     'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)), | ||||
|     'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()), | ||||
|     'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')), | ||||
|     'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()), | ||||
|     'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()), | ||||
|     'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()), | ||||
|     'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()), | ||||
|     'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()), | ||||
|     'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()), | ||||
|     'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)), | ||||
|     'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()), | ||||
|     'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()), | ||||
|     'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()), | ||||
|     'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)), | ||||
|     'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)), | ||||
|     'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)), | ||||
|     'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')), | ||||
|     'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)), | ||||
|     'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)), | ||||
|     'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)), | ||||
|     'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)), | ||||
|     'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)), | ||||
|     'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)), | ||||
|     'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)), | ||||
|     'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)), | ||||
|     'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()), | ||||
|     'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')), | ||||
|     'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)), | ||||
|     'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)), | ||||
|     'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)), | ||||
|     'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()), | ||||
|     'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)), | ||||
|     'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)), | ||||
|     'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()), | ||||
|     'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()), | ||||
|     'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()), | ||||
|     'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)), | ||||
|     'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)), | ||||
|     'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)), | ||||
|     'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)), | ||||
|     'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)), | ||||
|     'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)), | ||||
|     'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)), | ||||
|     'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()), | ||||
|     'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)), | ||||
|     'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)), | ||||
|     'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')), | ||||
|     'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()), | ||||
|     'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()), | ||||
|     'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()), | ||||
|     'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)), | ||||
|     'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)), | ||||
|     'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)), | ||||
|     'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')), | ||||
|     'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')), | ||||
|     'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)), | ||||
|     'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)), | ||||
|     'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)), | ||||
|     'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()), | ||||
|     'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)), | ||||
|     'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()), | ||||
|     'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()), | ||||
|     'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)), | ||||
|     'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)), | ||||
|     'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)), | ||||
|     'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)), | ||||
|     'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)), | ||||
|     'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()), | ||||
|     'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()), | ||||
|     'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()), | ||||
|     'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)), | ||||
|     'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()), | ||||
|     'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)), | ||||
|     'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()), | ||||
|     'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()), | ||||
|     'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')), | ||||
|     'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()), | ||||
|     'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)), | ||||
|     'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')), | ||||
|     'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)), | ||||
|     'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)), | ||||
|     'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')), | ||||
|     'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')), | ||||
|     'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()), | ||||
|     'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)), | ||||
|     'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()), | ||||
|     'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()), | ||||
|     'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()), | ||||
|     'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')), | ||||
|     'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()), | ||||
|     'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()), | ||||
|     'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)), | ||||
|     'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')), | ||||
|     'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()), | ||||
|     'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()), | ||||
|     'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()), | ||||
|     'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()), | ||||
|     'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()), | ||||
|     'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()), | ||||
|     'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()), | ||||
|     'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()), | ||||
|     'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)), | ||||
|     'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)), | ||||
|     'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)), | ||||
|     'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)), | ||||
|     'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')), | ||||
|     'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()), | ||||
|     'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)), | ||||
|     'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()), | ||||
|     'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)), | ||||
|     'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)), | ||||
|     'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)), | ||||
|     'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)), | ||||
|     'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()), | ||||
|     'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()), | ||||
|     'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)), | ||||
|     'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)), | ||||
|     'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)), | ||||
|     'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')), | ||||
|     'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()), | ||||
|     'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)), | ||||
|     'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')), | ||||
|     'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')), | ||||
|     'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), | ||||
|     'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), | ||||
|     'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), | ||||
|     'SNBTLexer': ('pip._vendor.pygments.lexers.mcfunction', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)), | ||||
|     'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), | ||||
|     'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)), | ||||
|     'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()), | ||||
|     'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)), | ||||
|     'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)), | ||||
|     'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()), | ||||
|     'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')), | ||||
|     'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)), | ||||
|     'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)), | ||||
|     'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)), | ||||
|     'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)), | ||||
|     'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')), | ||||
|     'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()), | ||||
|     'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()), | ||||
|     'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()), | ||||
|     'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()), | ||||
|     'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)), | ||||
|     'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()), | ||||
|     'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)), | ||||
|     'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)), | ||||
|     'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()), | ||||
|     'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)), | ||||
|     'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()), | ||||
|     'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)), | ||||
|     'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()), | ||||
|     'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()), | ||||
|     'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()), | ||||
|     'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)), | ||||
|     'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()), | ||||
|     'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)), | ||||
|     'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)), | ||||
|     'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()), | ||||
|     'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)), | ||||
|     'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)), | ||||
|     'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)), | ||||
|     'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()), | ||||
|     'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)), | ||||
|     'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()), | ||||
|     'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')), | ||||
|     'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')), | ||||
|     'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)), | ||||
|     'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)), | ||||
|     'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)), | ||||
|     'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()), | ||||
|     'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()), | ||||
|     'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()), | ||||
|     'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()), | ||||
|     'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)), | ||||
|     'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)), | ||||
|     'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), | ||||
|     'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), | ||||
|     'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()), | ||||
|     'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)), | ||||
|     'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()), | ||||
|     'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)), | ||||
|     'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()), | ||||
|     'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()), | ||||
|     'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')), | ||||
|     'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')), | ||||
|     'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), | ||||
|     'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()), | ||||
|     'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), | ||||
|     'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)), | ||||
|     'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), | ||||
|     'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), | ||||
|     'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), | ||||
|     'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), | ||||
|     'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)), | ||||
|     'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)), | ||||
|     'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')), | ||||
|     'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()), | ||||
|     'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()), | ||||
|     'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)), | ||||
|     'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()), | ||||
|     'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()), | ||||
|     'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)), | ||||
|     'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()), | ||||
|     'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)), | ||||
|     'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()), | ||||
|     'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()), | ||||
|     'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)), | ||||
|     'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)), | ||||
|     'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()), | ||||
|     'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()), | ||||
|     'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)), | ||||
|     'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), | ||||
|     'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')), | ||||
|     'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)), | ||||
|     'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()), | ||||
|     'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)), | ||||
|     'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)), | ||||
|     'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)), | ||||
|     'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)), | ||||
|     'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()), | ||||
|     'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()), | ||||
|     'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()), | ||||
|     'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)), | ||||
|     'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)), | ||||
|     'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()), | ||||
|     'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')), | ||||
|     'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')), | ||||
|     'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)), | ||||
|     'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')), | ||||
|     'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)), | ||||
|     'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)), | ||||
|     'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()), | ||||
|     'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')), | ||||
|     'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)), | ||||
|     'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()), | ||||
|     'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')), | ||||
|     'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)), | ||||
|     'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)), | ||||
|     'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()), | ||||
|     'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()), | ||||
|     'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)), | ||||
|     'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()), | ||||
| } | ||||
							
								
								
									
										1204
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1204
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -0,0 +1,43 @@ | ||||
| """ | ||||
|     pygments.modeline | ||||
|     ~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     A simple modeline parser (based on pymodeline). | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import re | ||||
|  | ||||
| __all__ = ['get_filetype_from_buffer'] | ||||
|  | ||||
|  | ||||
| modeline_re = re.compile(r''' | ||||
|     (?: vi | vim | ex ) (?: [<=>]? \d* )? : | ||||
|     .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ ) | ||||
| ''', re.VERBOSE) | ||||
|  | ||||
|  | ||||
| def get_filetype_from_line(l): | ||||
|     m = modeline_re.search(l) | ||||
|     if m: | ||||
|         return m.group(1) | ||||
|  | ||||
|  | ||||
| def get_filetype_from_buffer(buf, max_lines=5): | ||||
|     """ | ||||
|     Scan the buffer for modelines and return filetype if one is found. | ||||
|     """ | ||||
|     lines = buf.splitlines() | ||||
|     for l in lines[-1:-max_lines-1:-1]: | ||||
|         ret = get_filetype_from_line(l) | ||||
|         if ret: | ||||
|             return ret | ||||
|     for i in range(max_lines, -1, -1): | ||||
|         if i < len(lines): | ||||
|             ret = get_filetype_from_line(lines[i]) | ||||
|             if ret: | ||||
|                 return ret | ||||
|  | ||||
|     return None | ||||
							
								
								
									
										88
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/plugin.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/plugin.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,88 @@ | ||||
| """ | ||||
|     pygments.plugin | ||||
|     ~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Pygments plugin interface. By default, this tries to use | ||||
|     ``importlib.metadata``, which is in the Python standard | ||||
|     library since Python 3.8, or its ``importlib_metadata`` | ||||
|     backport for earlier versions of Python. It falls back on | ||||
|     ``pkg_resources`` if not found. Finally, if ``pkg_resources`` | ||||
|     is not found either, no plugins are loaded at all. | ||||
|  | ||||
|     lexer plugins:: | ||||
|  | ||||
|         [pygments.lexers] | ||||
|         yourlexer = yourmodule:YourLexer | ||||
|  | ||||
|     formatter plugins:: | ||||
|  | ||||
|         [pygments.formatters] | ||||
|         yourformatter = yourformatter:YourFormatter | ||||
|         /.ext = yourformatter:YourFormatter | ||||
|  | ||||
|     As you can see, you can define extensions for the formatter | ||||
|     with a leading slash. | ||||
|  | ||||
|     syntax plugins:: | ||||
|  | ||||
|         [pygments.styles] | ||||
|         yourstyle = yourstyle:YourStyle | ||||
|  | ||||
|     filter plugin:: | ||||
|  | ||||
|         [pygments.filter] | ||||
|         yourfilter = yourfilter:YourFilter | ||||
|  | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| LEXER_ENTRY_POINT = 'pygments.lexers' | ||||
| FORMATTER_ENTRY_POINT = 'pygments.formatters' | ||||
| STYLE_ENTRY_POINT = 'pygments.styles' | ||||
| FILTER_ENTRY_POINT = 'pygments.filters' | ||||
|  | ||||
|  | ||||
| def iter_entry_points(group_name): | ||||
|     try: | ||||
|         from importlib.metadata import entry_points | ||||
|     except ImportError: | ||||
|         try: | ||||
|             from importlib_metadata import entry_points | ||||
|         except ImportError: | ||||
|             try: | ||||
|                 from pip._vendor.pkg_resources import iter_entry_points | ||||
|             except (ImportError, OSError): | ||||
|                 return [] | ||||
|             else: | ||||
|                 return iter_entry_points(group_name) | ||||
|     groups = entry_points() | ||||
|     if hasattr(groups, 'select'): | ||||
|         # New interface in Python 3.10 and newer versions of the | ||||
|         # importlib_metadata backport. | ||||
|         return groups.select(group=group_name) | ||||
|     else: | ||||
|         # Older interface, deprecated in Python 3.10 and recent | ||||
|         # importlib_metadata, but we need it in Python 3.8 and 3.9. | ||||
|         return groups.get(group_name, []) | ||||
|  | ||||
|  | ||||
| def find_plugin_lexers(): | ||||
|     for entrypoint in iter_entry_points(LEXER_ENTRY_POINT): | ||||
|         yield entrypoint.load() | ||||
|  | ||||
|  | ||||
| def find_plugin_formatters(): | ||||
|     for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT): | ||||
|         yield entrypoint.name, entrypoint.load() | ||||
|  | ||||
|  | ||||
| def find_plugin_styles(): | ||||
|     for entrypoint in iter_entry_points(STYLE_ENTRY_POINT): | ||||
|         yield entrypoint.name, entrypoint.load() | ||||
|  | ||||
|  | ||||
| def find_plugin_filters(): | ||||
|     for entrypoint in iter_entry_points(FILTER_ENTRY_POINT): | ||||
|         yield entrypoint.name, entrypoint.load() | ||||
| @ -0,0 +1,91 @@ | ||||
| """ | ||||
|     pygments.regexopt | ||||
|     ~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     An algorithm that generates optimized regexes for matching long lists of | ||||
|     literal strings. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import re | ||||
| from re import escape | ||||
| from os.path import commonprefix | ||||
| from itertools import groupby | ||||
| from operator import itemgetter | ||||
|  | ||||
| CS_ESCAPE = re.compile(r'[\[\^\\\-\]]') | ||||
| FIRST_ELEMENT = itemgetter(0) | ||||
|  | ||||
|  | ||||
| def make_charset(letters): | ||||
|     return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']' | ||||
|  | ||||
|  | ||||
| def regex_opt_inner(strings, open_paren): | ||||
|     """Return a regex that matches any string in the sorted list of strings.""" | ||||
|     close_paren = open_paren and ')' or '' | ||||
|     # print strings, repr(open_paren) | ||||
|     if not strings: | ||||
|         # print '-> nothing left' | ||||
|         return '' | ||||
|     first = strings[0] | ||||
|     if len(strings) == 1: | ||||
|         # print '-> only 1 string' | ||||
|         return open_paren + escape(first) + close_paren | ||||
|     if not first: | ||||
|         # print '-> first string empty' | ||||
|         return open_paren + regex_opt_inner(strings[1:], '(?:') \ | ||||
|             + '?' + close_paren | ||||
|     if len(first) == 1: | ||||
|         # multiple one-char strings? make a charset | ||||
|         oneletter = [] | ||||
|         rest = [] | ||||
|         for s in strings: | ||||
|             if len(s) == 1: | ||||
|                 oneletter.append(s) | ||||
|             else: | ||||
|                 rest.append(s) | ||||
|         if len(oneletter) > 1:  # do we have more than one oneletter string? | ||||
|             if rest: | ||||
|                 # print '-> 1-character + rest' | ||||
|                 return open_paren + regex_opt_inner(rest, '') + '|' \ | ||||
|                     + make_charset(oneletter) + close_paren | ||||
|             # print '-> only 1-character' | ||||
|             return open_paren + make_charset(oneletter) + close_paren | ||||
|     prefix = commonprefix(strings) | ||||
|     if prefix: | ||||
|         plen = len(prefix) | ||||
|         # we have a prefix for all strings | ||||
|         # print '-> prefix:', prefix | ||||
|         return open_paren + escape(prefix) \ | ||||
|             + regex_opt_inner([s[plen:] for s in strings], '(?:') \ | ||||
|             + close_paren | ||||
|     # is there a suffix? | ||||
|     strings_rev = [s[::-1] for s in strings] | ||||
|     suffix = commonprefix(strings_rev) | ||||
|     if suffix: | ||||
|         slen = len(suffix) | ||||
|         # print '-> suffix:', suffix[::-1] | ||||
|         return open_paren \ | ||||
|             + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \ | ||||
|             + escape(suffix[::-1]) + close_paren | ||||
|     # recurse on common 1-string prefixes | ||||
|     # print '-> last resort' | ||||
|     return open_paren + \ | ||||
|         '|'.join(regex_opt_inner(list(group[1]), '') | ||||
|                  for group in groupby(strings, lambda s: s[0] == first[0])) \ | ||||
|         + close_paren | ||||
|  | ||||
|  | ||||
| def regex_opt(strings, prefix='', suffix=''): | ||||
|     """Return a compiled regex that matches any string in the given list. | ||||
|  | ||||
|     The strings to match must be literal strings, not regexes.  They will be | ||||
|     regex-escaped. | ||||
|  | ||||
|     *prefix* and *suffix* are pre- and appended to the final regex. | ||||
|     """ | ||||
|     strings = sorted(strings) | ||||
|     return prefix + regex_opt_inner(strings, '(') + suffix | ||||
							
								
								
									
										104
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/scanner.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/scanner.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,104 @@ | ||||
| """ | ||||
|     pygments.scanner | ||||
|     ~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     This library implements a regex based scanner. Some languages | ||||
|     like Pascal are easy to parse but have some keywords that | ||||
|     depend on the context. Because of this it's impossible to lex | ||||
|     that just by using a regular expression lexer like the | ||||
|     `RegexLexer`. | ||||
|  | ||||
|     Have a look at the `DelphiLexer` to get an idea of how to use | ||||
|     this scanner. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
| import re | ||||
|  | ||||
|  | ||||
| class EndOfText(RuntimeError): | ||||
|     """ | ||||
|     Raise if end of text is reached and the user | ||||
|     tried to call a match function. | ||||
|     """ | ||||
|  | ||||
|  | ||||
| class Scanner: | ||||
|     """ | ||||
|     Simple scanner | ||||
|  | ||||
|     All method patterns are regular expression strings (not | ||||
|     compiled expressions!) | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, text, flags=0): | ||||
|         """ | ||||
|         :param text:    The text which should be scanned | ||||
|         :param flags:   default regular expression flags | ||||
|         """ | ||||
|         self.data = text | ||||
|         self.data_length = len(text) | ||||
|         self.start_pos = 0 | ||||
|         self.pos = 0 | ||||
|         self.flags = flags | ||||
|         self.last = None | ||||
|         self.match = None | ||||
|         self._re_cache = {} | ||||
|  | ||||
|     def eos(self): | ||||
|         """`True` if the scanner reached the end of text.""" | ||||
|         return self.pos >= self.data_length | ||||
|     eos = property(eos, eos.__doc__) | ||||
|  | ||||
|     def check(self, pattern): | ||||
|         """ | ||||
|         Apply `pattern` on the current position and return | ||||
|         the match object. (Doesn't touch pos). Use this for | ||||
|         lookahead. | ||||
|         """ | ||||
|         if self.eos: | ||||
|             raise EndOfText() | ||||
|         if pattern not in self._re_cache: | ||||
|             self._re_cache[pattern] = re.compile(pattern, self.flags) | ||||
|         return self._re_cache[pattern].match(self.data, self.pos) | ||||
|  | ||||
|     def test(self, pattern): | ||||
|         """Apply a pattern on the current position and check | ||||
|         if it patches. Doesn't touch pos. | ||||
|         """ | ||||
|         return self.check(pattern) is not None | ||||
|  | ||||
|     def scan(self, pattern): | ||||
|         """ | ||||
|         Scan the text for the given pattern and update pos/match | ||||
|         and related fields. The return value is a boolean that | ||||
|         indicates if the pattern matched. The matched value is | ||||
|         stored on the instance as ``match``, the last value is | ||||
|         stored as ``last``. ``start_pos`` is the position of the | ||||
|         pointer before the pattern was matched, ``pos`` is the | ||||
|         end position. | ||||
|         """ | ||||
|         if self.eos: | ||||
|             raise EndOfText() | ||||
|         if pattern not in self._re_cache: | ||||
|             self._re_cache[pattern] = re.compile(pattern, self.flags) | ||||
|         self.last = self.match | ||||
|         m = self._re_cache[pattern].match(self.data, self.pos) | ||||
|         if m is None: | ||||
|             return False | ||||
|         self.start_pos = m.start() | ||||
|         self.pos = m.end() | ||||
|         self.match = m.group() | ||||
|         return True | ||||
|  | ||||
|     def get_char(self): | ||||
|         """Scan exactly one char.""" | ||||
|         self.scan('.') | ||||
|  | ||||
|     def __repr__(self): | ||||
|         return '<%s %d/%d>' % ( | ||||
|             self.__class__.__name__, | ||||
|             self.pos, | ||||
|             self.data_length | ||||
|         ) | ||||
							
								
								
									
										155
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/sphinxext.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										155
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/sphinxext.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,155 @@ | ||||
| """ | ||||
|     pygments.sphinxext | ||||
|     ~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Sphinx extension to generate automatic documentation of lexers, | ||||
|     formatters and filters. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import sys | ||||
|  | ||||
| from docutils import nodes | ||||
| from docutils.statemachine import ViewList | ||||
| from docutils.parsers.rst import Directive | ||||
| from sphinx.util.nodes import nested_parse_with_titles | ||||
|  | ||||
|  | ||||
| MODULEDOC = ''' | ||||
| .. module:: %s | ||||
|  | ||||
| %s | ||||
| %s | ||||
| ''' | ||||
|  | ||||
| LEXERDOC = ''' | ||||
| .. class:: %s | ||||
|  | ||||
|     :Short names: %s | ||||
|     :Filenames:   %s | ||||
|     :MIME types:  %s | ||||
|  | ||||
|     %s | ||||
|  | ||||
| ''' | ||||
|  | ||||
| FMTERDOC = ''' | ||||
| .. class:: %s | ||||
|  | ||||
|     :Short names: %s | ||||
|     :Filenames: %s | ||||
|  | ||||
|     %s | ||||
|  | ||||
| ''' | ||||
|  | ||||
| FILTERDOC = ''' | ||||
| .. class:: %s | ||||
|  | ||||
|     :Name: %s | ||||
|  | ||||
|     %s | ||||
|  | ||||
| ''' | ||||
|  | ||||
|  | ||||
| class PygmentsDoc(Directive): | ||||
|     """ | ||||
|     A directive to collect all lexers/formatters/filters and generate | ||||
|     autoclass directives for them. | ||||
|     """ | ||||
|     has_content = False | ||||
|     required_arguments = 1 | ||||
|     optional_arguments = 0 | ||||
|     final_argument_whitespace = False | ||||
|     option_spec = {} | ||||
|  | ||||
|     def run(self): | ||||
|         self.filenames = set() | ||||
|         if self.arguments[0] == 'lexers': | ||||
|             out = self.document_lexers() | ||||
|         elif self.arguments[0] == 'formatters': | ||||
|             out = self.document_formatters() | ||||
|         elif self.arguments[0] == 'filters': | ||||
|             out = self.document_filters() | ||||
|         else: | ||||
|             raise Exception('invalid argument for "pygmentsdoc" directive') | ||||
|         node = nodes.compound() | ||||
|         vl = ViewList(out.split('\n'), source='') | ||||
|         nested_parse_with_titles(self.state, vl, node) | ||||
|         for fn in self.filenames: | ||||
|             self.state.document.settings.record_dependencies.add(fn) | ||||
|         return node.children | ||||
|  | ||||
|     def document_lexers(self): | ||||
|         from pip._vendor.pygments.lexers._mapping import LEXERS | ||||
|         out = [] | ||||
|         modules = {} | ||||
|         moduledocstrings = {} | ||||
|         for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]): | ||||
|             module = data[0] | ||||
|             mod = __import__(module, None, None, [classname]) | ||||
|             self.filenames.add(mod.__file__) | ||||
|             cls = getattr(mod, classname) | ||||
|             if not cls.__doc__: | ||||
|                 print("Warning: %s does not have a docstring." % classname) | ||||
|             docstring = cls.__doc__ | ||||
|             if isinstance(docstring, bytes): | ||||
|                 docstring = docstring.decode('utf8') | ||||
|             modules.setdefault(module, []).append(( | ||||
|                 classname, | ||||
|                 ', '.join(data[2]) or 'None', | ||||
|                 ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None', | ||||
|                 ', '.join(data[4]) or 'None', | ||||
|                 docstring)) | ||||
|             if module not in moduledocstrings: | ||||
|                 moddoc = mod.__doc__ | ||||
|                 if isinstance(moddoc, bytes): | ||||
|                     moddoc = moddoc.decode('utf8') | ||||
|                 moduledocstrings[module] = moddoc | ||||
|  | ||||
|         for module, lexers in sorted(modules.items(), key=lambda x: x[0]): | ||||
|             if moduledocstrings[module] is None: | ||||
|                 raise Exception("Missing docstring for %s" % (module,)) | ||||
|             heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.') | ||||
|             out.append(MODULEDOC % (module, heading, '-'*len(heading))) | ||||
|             for data in lexers: | ||||
|                 out.append(LEXERDOC % data) | ||||
|  | ||||
|         return ''.join(out) | ||||
|  | ||||
|     def document_formatters(self): | ||||
|         from pip._vendor.pygments.formatters import FORMATTERS | ||||
|  | ||||
|         out = [] | ||||
|         for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]): | ||||
|             module = data[0] | ||||
|             mod = __import__(module, None, None, [classname]) | ||||
|             self.filenames.add(mod.__file__) | ||||
|             cls = getattr(mod, classname) | ||||
|             docstring = cls.__doc__ | ||||
|             if isinstance(docstring, bytes): | ||||
|                 docstring = docstring.decode('utf8') | ||||
|             heading = cls.__name__ | ||||
|             out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None', | ||||
|                                    ', '.join(data[3]).replace('*', '\\*') or 'None', | ||||
|                                    docstring)) | ||||
|         return ''.join(out) | ||||
|  | ||||
|     def document_filters(self): | ||||
|         from pip._vendor.pygments.filters import FILTERS | ||||
|  | ||||
|         out = [] | ||||
|         for name, cls in FILTERS.items(): | ||||
|             self.filenames.add(sys.modules[cls.__module__].__file__) | ||||
|             docstring = cls.__doc__ | ||||
|             if isinstance(docstring, bytes): | ||||
|                 docstring = docstring.decode('utf8') | ||||
|             out.append(FILTERDOC % (cls.__name__, name, docstring)) | ||||
|         return ''.join(out) | ||||
|  | ||||
|  | ||||
| def setup(app): | ||||
|     app.add_directive('pygmentsdoc', PygmentsDoc) | ||||
							
								
								
									
										197
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/style.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										197
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/style.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,197 @@ | ||||
| """ | ||||
|     pygments.style | ||||
|     ~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Basic style object. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.token import Token, STANDARD_TYPES | ||||
|  | ||||
| # Default mapping of ansixxx to RGB colors. | ||||
| _ansimap = { | ||||
|     # dark | ||||
|     'ansiblack': '000000', | ||||
|     'ansired': '7f0000', | ||||
|     'ansigreen': '007f00', | ||||
|     'ansiyellow': '7f7fe0', | ||||
|     'ansiblue': '00007f', | ||||
|     'ansimagenta': '7f007f', | ||||
|     'ansicyan': '007f7f', | ||||
|     'ansigray': 'e5e5e5', | ||||
|     # normal | ||||
|     'ansibrightblack': '555555', | ||||
|     'ansibrightred': 'ff0000', | ||||
|     'ansibrightgreen': '00ff00', | ||||
|     'ansibrightyellow': 'ffff00', | ||||
|     'ansibrightblue': '0000ff', | ||||
|     'ansibrightmagenta': 'ff00ff', | ||||
|     'ansibrightcyan': '00ffff', | ||||
|     'ansiwhite': 'ffffff', | ||||
| } | ||||
| # mapping of deprecated #ansixxx colors to new color names | ||||
| _deprecated_ansicolors = { | ||||
|     # dark | ||||
|     '#ansiblack': 'ansiblack', | ||||
|     '#ansidarkred': 'ansired', | ||||
|     '#ansidarkgreen': 'ansigreen', | ||||
|     '#ansibrown': 'ansiyellow', | ||||
|     '#ansidarkblue': 'ansiblue', | ||||
|     '#ansipurple': 'ansimagenta', | ||||
|     '#ansiteal': 'ansicyan', | ||||
|     '#ansilightgray': 'ansigray', | ||||
|     # normal | ||||
|     '#ansidarkgray': 'ansibrightblack', | ||||
|     '#ansired': 'ansibrightred', | ||||
|     '#ansigreen': 'ansibrightgreen', | ||||
|     '#ansiyellow': 'ansibrightyellow', | ||||
|     '#ansiblue': 'ansibrightblue', | ||||
|     '#ansifuchsia': 'ansibrightmagenta', | ||||
|     '#ansiturquoise': 'ansibrightcyan', | ||||
|     '#ansiwhite': 'ansiwhite', | ||||
| } | ||||
| ansicolors = set(_ansimap) | ||||
|  | ||||
|  | ||||
| class StyleMeta(type): | ||||
|  | ||||
|     def __new__(mcs, name, bases, dct): | ||||
|         obj = type.__new__(mcs, name, bases, dct) | ||||
|         for token in STANDARD_TYPES: | ||||
|             if token not in obj.styles: | ||||
|                 obj.styles[token] = '' | ||||
|  | ||||
|         def colorformat(text): | ||||
|             if text in ansicolors: | ||||
|                 return text | ||||
|             if text[0:1] == '#': | ||||
|                 col = text[1:] | ||||
|                 if len(col) == 6: | ||||
|                     return col | ||||
|                 elif len(col) == 3: | ||||
|                     return col[0] * 2 + col[1] * 2 + col[2] * 2 | ||||
|             elif text == '': | ||||
|                 return '' | ||||
|             elif text.startswith('var') or text.startswith('calc'): | ||||
|                 return text | ||||
|             assert False, "wrong color format %r" % text | ||||
|  | ||||
|         _styles = obj._styles = {} | ||||
|  | ||||
|         for ttype in obj.styles: | ||||
|             for token in ttype.split(): | ||||
|                 if token in _styles: | ||||
|                     continue | ||||
|                 ndef = _styles.get(token.parent, None) | ||||
|                 styledefs = obj.styles.get(token, '').split() | ||||
|                 if not ndef or token is None: | ||||
|                     ndef = ['', 0, 0, 0, '', '', 0, 0, 0] | ||||
|                 elif 'noinherit' in styledefs and token is not Token: | ||||
|                     ndef = _styles[Token][:] | ||||
|                 else: | ||||
|                     ndef = ndef[:] | ||||
|                 _styles[token] = ndef | ||||
|                 for styledef in obj.styles.get(token, '').split(): | ||||
|                     if styledef == 'noinherit': | ||||
|                         pass | ||||
|                     elif styledef == 'bold': | ||||
|                         ndef[1] = 1 | ||||
|                     elif styledef == 'nobold': | ||||
|                         ndef[1] = 0 | ||||
|                     elif styledef == 'italic': | ||||
|                         ndef[2] = 1 | ||||
|                     elif styledef == 'noitalic': | ||||
|                         ndef[2] = 0 | ||||
|                     elif styledef == 'underline': | ||||
|                         ndef[3] = 1 | ||||
|                     elif styledef == 'nounderline': | ||||
|                         ndef[3] = 0 | ||||
|                     elif styledef[:3] == 'bg:': | ||||
|                         ndef[4] = colorformat(styledef[3:]) | ||||
|                     elif styledef[:7] == 'border:': | ||||
|                         ndef[5] = colorformat(styledef[7:]) | ||||
|                     elif styledef == 'roman': | ||||
|                         ndef[6] = 1 | ||||
|                     elif styledef == 'sans': | ||||
|                         ndef[7] = 1 | ||||
|                     elif styledef == 'mono': | ||||
|                         ndef[8] = 1 | ||||
|                     else: | ||||
|                         ndef[0] = colorformat(styledef) | ||||
|  | ||||
|         return obj | ||||
|  | ||||
|     def style_for_token(cls, token): | ||||
|         t = cls._styles[token] | ||||
|         ansicolor = bgansicolor = None | ||||
|         color = t[0] | ||||
|         if color in _deprecated_ansicolors: | ||||
|             color = _deprecated_ansicolors[color] | ||||
|         if color in ansicolors: | ||||
|             ansicolor = color | ||||
|             color = _ansimap[color] | ||||
|         bgcolor = t[4] | ||||
|         if bgcolor in _deprecated_ansicolors: | ||||
|             bgcolor = _deprecated_ansicolors[bgcolor] | ||||
|         if bgcolor in ansicolors: | ||||
|             bgansicolor = bgcolor | ||||
|             bgcolor = _ansimap[bgcolor] | ||||
|  | ||||
|         return { | ||||
|             'color':        color or None, | ||||
|             'bold':         bool(t[1]), | ||||
|             'italic':       bool(t[2]), | ||||
|             'underline':    bool(t[3]), | ||||
|             'bgcolor':      bgcolor or None, | ||||
|             'border':       t[5] or None, | ||||
|             'roman':        bool(t[6]) or None, | ||||
|             'sans':         bool(t[7]) or None, | ||||
|             'mono':         bool(t[8]) or None, | ||||
|             'ansicolor':    ansicolor, | ||||
|             'bgansicolor':  bgansicolor, | ||||
|         } | ||||
|  | ||||
|     def list_styles(cls): | ||||
|         return list(cls) | ||||
|  | ||||
|     def styles_token(cls, ttype): | ||||
|         return ttype in cls._styles | ||||
|  | ||||
|     def __iter__(cls): | ||||
|         for token in cls._styles: | ||||
|             yield token, cls.style_for_token(token) | ||||
|  | ||||
|     def __len__(cls): | ||||
|         return len(cls._styles) | ||||
|  | ||||
|  | ||||
| class Style(metaclass=StyleMeta): | ||||
|  | ||||
|     #: overall background color (``None`` means transparent) | ||||
|     background_color = '#ffffff' | ||||
|  | ||||
|     #: highlight background color | ||||
|     highlight_color = '#ffffcc' | ||||
|  | ||||
|     #: line number font color | ||||
|     line_number_color = 'inherit' | ||||
|  | ||||
|     #: line number background color | ||||
|     line_number_background_color = 'transparent' | ||||
|  | ||||
|     #: special line number font color | ||||
|     line_number_special_color = '#000000' | ||||
|  | ||||
|     #: special line number background color | ||||
|     line_number_special_background_color = '#ffffc0' | ||||
|  | ||||
|     #: Style definitions for individual token types. | ||||
|     styles = {} | ||||
|  | ||||
|     # Attribute for lexers defined within Pygments. If set | ||||
|     # to True, the style is not shown in the style gallery | ||||
|     # on the website. This is intended for language-specific | ||||
|     # styles. | ||||
|     web_style_gallery_exclude = False | ||||
| @ -0,0 +1,97 @@ | ||||
| """ | ||||
|     pygments.styles | ||||
|     ~~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Contains built-in styles. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| from pip._vendor.pygments.plugin import find_plugin_styles | ||||
| from pip._vendor.pygments.util import ClassNotFound | ||||
|  | ||||
|  | ||||
| #: Maps style names to 'submodule::classname'. | ||||
| STYLE_MAP = { | ||||
|     'default':  'default::DefaultStyle', | ||||
|     'emacs':    'emacs::EmacsStyle', | ||||
|     'friendly': 'friendly::FriendlyStyle', | ||||
|     'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle', | ||||
|     'colorful': 'colorful::ColorfulStyle', | ||||
|     'autumn':   'autumn::AutumnStyle', | ||||
|     'murphy':   'murphy::MurphyStyle', | ||||
|     'manni':    'manni::ManniStyle', | ||||
|     'material': 'material::MaterialStyle', | ||||
|     'monokai':  'monokai::MonokaiStyle', | ||||
|     'perldoc':  'perldoc::PerldocStyle', | ||||
|     'pastie':   'pastie::PastieStyle', | ||||
|     'borland':  'borland::BorlandStyle', | ||||
|     'trac':     'trac::TracStyle', | ||||
|     'native':   'native::NativeStyle', | ||||
|     'fruity':   'fruity::FruityStyle', | ||||
|     'bw':       'bw::BlackWhiteStyle', | ||||
|     'vim':      'vim::VimStyle', | ||||
|     'vs':       'vs::VisualStudioStyle', | ||||
|     'tango':    'tango::TangoStyle', | ||||
|     'rrt':      'rrt::RrtStyle', | ||||
|     'xcode':    'xcode::XcodeStyle', | ||||
|     'igor':     'igor::IgorStyle', | ||||
|     'paraiso-light': 'paraiso_light::ParaisoLightStyle', | ||||
|     'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle', | ||||
|     'lovelace': 'lovelace::LovelaceStyle', | ||||
|     'algol':    'algol::AlgolStyle', | ||||
|     'algol_nu': 'algol_nu::Algol_NuStyle', | ||||
|     'arduino':  'arduino::ArduinoStyle', | ||||
|     'rainbow_dash': 'rainbow_dash::RainbowDashStyle', | ||||
|     'abap':     'abap::AbapStyle', | ||||
|     'solarized-dark': 'solarized::SolarizedDarkStyle', | ||||
|     'solarized-light': 'solarized::SolarizedLightStyle', | ||||
|     'sas':         'sas::SasStyle', | ||||
|     'staroffice' : 'staroffice::StarofficeStyle', | ||||
|     'stata':       'stata_light::StataLightStyle', | ||||
|     'stata-light': 'stata_light::StataLightStyle', | ||||
|     'stata-dark':  'stata_dark::StataDarkStyle', | ||||
|     'inkpot':      'inkpot::InkPotStyle', | ||||
|     'zenburn': 'zenburn::ZenburnStyle', | ||||
|     'gruvbox-dark': 'gruvbox::GruvboxDarkStyle', | ||||
|     'gruvbox-light': 'gruvbox::GruvboxLightStyle', | ||||
|     'dracula': 'dracula::DraculaStyle', | ||||
|     'one-dark': 'onedark::OneDarkStyle', | ||||
|     'lilypond' : 'lilypond::LilyPondStyle', | ||||
|     'nord': 'nord::NordStyle', | ||||
|     'nord-darker': 'nord::NordDarkerStyle', | ||||
|     'github-dark': 'gh_dark::GhDarkStyle' | ||||
| } | ||||
|  | ||||
|  | ||||
| def get_style_by_name(name): | ||||
|     if name in STYLE_MAP: | ||||
|         mod, cls = STYLE_MAP[name].split('::') | ||||
|         builtin = "yes" | ||||
|     else: | ||||
|         for found_name, style in find_plugin_styles(): | ||||
|             if name == found_name: | ||||
|                 return style | ||||
|         # perhaps it got dropped into our styles package | ||||
|         builtin = "" | ||||
|         mod = name | ||||
|         cls = name.title() + "Style" | ||||
|  | ||||
|     try: | ||||
|         mod = __import__('pygments.styles.' + mod, None, None, [cls]) | ||||
|     except ImportError: | ||||
|         raise ClassNotFound("Could not find style module %r" % mod + | ||||
|                          (builtin and ", though it should be builtin") + ".") | ||||
|     try: | ||||
|         return getattr(mod, cls) | ||||
|     except AttributeError: | ||||
|         raise ClassNotFound("Could not find style class %r in style module." % cls) | ||||
|  | ||||
|  | ||||
| def get_all_styles(): | ||||
|     """Return a generator for all styles by name, | ||||
|     both builtin and plugin.""" | ||||
|     yield from STYLE_MAP | ||||
|     for name, _ in find_plugin_styles(): | ||||
|         yield name | ||||
							
								
								
									
										213
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/token.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										213
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/token.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,213 @@ | ||||
| """ | ||||
|     pygments.token | ||||
|     ~~~~~~~~~~~~~~ | ||||
|  | ||||
|     Basic token types and the standard tokens. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
|  | ||||
| class _TokenType(tuple): | ||||
|     parent = None | ||||
|  | ||||
|     def split(self): | ||||
|         buf = [] | ||||
|         node = self | ||||
|         while node is not None: | ||||
|             buf.append(node) | ||||
|             node = node.parent | ||||
|         buf.reverse() | ||||
|         return buf | ||||
|  | ||||
|     def __init__(self, *args): | ||||
|         # no need to call super.__init__ | ||||
|         self.subtypes = set() | ||||
|  | ||||
|     def __contains__(self, val): | ||||
|         return self is val or ( | ||||
|             type(val) is self.__class__ and | ||||
|             val[:len(self)] == self | ||||
|         ) | ||||
|  | ||||
|     def __getattr__(self, val): | ||||
|         if not val or not val[0].isupper(): | ||||
|             return tuple.__getattribute__(self, val) | ||||
|         new = _TokenType(self + (val,)) | ||||
|         setattr(self, val, new) | ||||
|         self.subtypes.add(new) | ||||
|         new.parent = self | ||||
|         return new | ||||
|  | ||||
|     def __repr__(self): | ||||
|         return 'Token' + (self and '.' or '') + '.'.join(self) | ||||
|  | ||||
|     def __copy__(self): | ||||
|         # These instances are supposed to be singletons | ||||
|         return self | ||||
|  | ||||
|     def __deepcopy__(self, memo): | ||||
|         # These instances are supposed to be singletons | ||||
|         return self | ||||
|  | ||||
|  | ||||
| Token = _TokenType() | ||||
|  | ||||
| # Special token types | ||||
| Text = Token.Text | ||||
| Whitespace = Text.Whitespace | ||||
| Escape = Token.Escape | ||||
| Error = Token.Error | ||||
| # Text that doesn't belong to this lexer (e.g. HTML in PHP) | ||||
| Other = Token.Other | ||||
|  | ||||
| # Common token types for source code | ||||
| Keyword = Token.Keyword | ||||
| Name = Token.Name | ||||
| Literal = Token.Literal | ||||
| String = Literal.String | ||||
| Number = Literal.Number | ||||
| Punctuation = Token.Punctuation | ||||
| Operator = Token.Operator | ||||
| Comment = Token.Comment | ||||
|  | ||||
| # Generic types for non-source code | ||||
| Generic = Token.Generic | ||||
|  | ||||
| # String and some others are not direct children of Token. | ||||
| # alias them: | ||||
| Token.Token = Token | ||||
| Token.String = String | ||||
| Token.Number = Number | ||||
|  | ||||
|  | ||||
| def is_token_subtype(ttype, other): | ||||
|     """ | ||||
|     Return True if ``ttype`` is a subtype of ``other``. | ||||
|  | ||||
|     exists for backwards compatibility. use ``ttype in other`` now. | ||||
|     """ | ||||
|     return ttype in other | ||||
|  | ||||
|  | ||||
| def string_to_tokentype(s): | ||||
|     """ | ||||
|     Convert a string into a token type:: | ||||
|  | ||||
|         >>> string_to_token('String.Double') | ||||
|         Token.Literal.String.Double | ||||
|         >>> string_to_token('Token.Literal.Number') | ||||
|         Token.Literal.Number | ||||
|         >>> string_to_token('') | ||||
|         Token | ||||
|  | ||||
|     Tokens that are already tokens are returned unchanged: | ||||
|  | ||||
|         >>> string_to_token(String) | ||||
|         Token.Literal.String | ||||
|     """ | ||||
|     if isinstance(s, _TokenType): | ||||
|         return s | ||||
|     if not s: | ||||
|         return Token | ||||
|     node = Token | ||||
|     for item in s.split('.'): | ||||
|         node = getattr(node, item) | ||||
|     return node | ||||
|  | ||||
|  | ||||
| # Map standard token types to short names, used in CSS class naming. | ||||
| # If you add a new item, please be sure to run this file to perform | ||||
| # a consistency check for duplicate values. | ||||
| STANDARD_TYPES = { | ||||
|     Token:                         '', | ||||
|  | ||||
|     Text:                          '', | ||||
|     Whitespace:                    'w', | ||||
|     Escape:                        'esc', | ||||
|     Error:                         'err', | ||||
|     Other:                         'x', | ||||
|  | ||||
|     Keyword:                       'k', | ||||
|     Keyword.Constant:              'kc', | ||||
|     Keyword.Declaration:           'kd', | ||||
|     Keyword.Namespace:             'kn', | ||||
|     Keyword.Pseudo:                'kp', | ||||
|     Keyword.Reserved:              'kr', | ||||
|     Keyword.Type:                  'kt', | ||||
|  | ||||
|     Name:                          'n', | ||||
|     Name.Attribute:                'na', | ||||
|     Name.Builtin:                  'nb', | ||||
|     Name.Builtin.Pseudo:           'bp', | ||||
|     Name.Class:                    'nc', | ||||
|     Name.Constant:                 'no', | ||||
|     Name.Decorator:                'nd', | ||||
|     Name.Entity:                   'ni', | ||||
|     Name.Exception:                'ne', | ||||
|     Name.Function:                 'nf', | ||||
|     Name.Function.Magic:           'fm', | ||||
|     Name.Property:                 'py', | ||||
|     Name.Label:                    'nl', | ||||
|     Name.Namespace:                'nn', | ||||
|     Name.Other:                    'nx', | ||||
|     Name.Tag:                      'nt', | ||||
|     Name.Variable:                 'nv', | ||||
|     Name.Variable.Class:           'vc', | ||||
|     Name.Variable.Global:          'vg', | ||||
|     Name.Variable.Instance:        'vi', | ||||
|     Name.Variable.Magic:           'vm', | ||||
|  | ||||
|     Literal:                       'l', | ||||
|     Literal.Date:                  'ld', | ||||
|  | ||||
|     String:                        's', | ||||
|     String.Affix:                  'sa', | ||||
|     String.Backtick:               'sb', | ||||
|     String.Char:                   'sc', | ||||
|     String.Delimiter:              'dl', | ||||
|     String.Doc:                    'sd', | ||||
|     String.Double:                 's2', | ||||
|     String.Escape:                 'se', | ||||
|     String.Heredoc:                'sh', | ||||
|     String.Interpol:               'si', | ||||
|     String.Other:                  'sx', | ||||
|     String.Regex:                  'sr', | ||||
|     String.Single:                 's1', | ||||
|     String.Symbol:                 'ss', | ||||
|  | ||||
|     Number:                        'm', | ||||
|     Number.Bin:                    'mb', | ||||
|     Number.Float:                  'mf', | ||||
|     Number.Hex:                    'mh', | ||||
|     Number.Integer:                'mi', | ||||
|     Number.Integer.Long:           'il', | ||||
|     Number.Oct:                    'mo', | ||||
|  | ||||
|     Operator:                      'o', | ||||
|     Operator.Word:                 'ow', | ||||
|  | ||||
|     Punctuation:                   'p', | ||||
|     Punctuation.Marker:            'pm', | ||||
|  | ||||
|     Comment:                       'c', | ||||
|     Comment.Hashbang:              'ch', | ||||
|     Comment.Multiline:             'cm', | ||||
|     Comment.Preproc:               'cp', | ||||
|     Comment.PreprocFile:           'cpf', | ||||
|     Comment.Single:                'c1', | ||||
|     Comment.Special:               'cs', | ||||
|  | ||||
|     Generic:                       'g', | ||||
|     Generic.Deleted:               'gd', | ||||
|     Generic.Emph:                  'ge', | ||||
|     Generic.Error:                 'gr', | ||||
|     Generic.Heading:               'gh', | ||||
|     Generic.Inserted:              'gi', | ||||
|     Generic.Output:                'go', | ||||
|     Generic.Prompt:                'gp', | ||||
|     Generic.Strong:                'gs', | ||||
|     Generic.Subheading:            'gu', | ||||
|     Generic.Traceback:             'gt', | ||||
| } | ||||
							
								
								
									
										153
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										153
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										308
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/util.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										308
									
								
								lib/python3.11/site-packages/pip/_vendor/pygments/util.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,308 @@ | ||||
| """ | ||||
|     pygments.util | ||||
|     ~~~~~~~~~~~~~ | ||||
|  | ||||
|     Utility functions. | ||||
|  | ||||
|     :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. | ||||
|     :license: BSD, see LICENSE for details. | ||||
| """ | ||||
|  | ||||
| import re | ||||
| from io import TextIOWrapper | ||||
|  | ||||
|  | ||||
| split_path_re = re.compile(r'[/\\ ]') | ||||
| doctype_lookup_re = re.compile(r''' | ||||
|     <!DOCTYPE\s+( | ||||
|      [a-zA-Z_][a-zA-Z0-9]* | ||||
|      (?: \s+      # optional in HTML5 | ||||
|      [a-zA-Z_][a-zA-Z0-9]*\s+ | ||||
|      "[^"]*")? | ||||
|      ) | ||||
|      [^>]*> | ||||
| ''', re.DOTALL | re.MULTILINE | re.VERBOSE) | ||||
| tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>', | ||||
|                     re.IGNORECASE | re.DOTALL | re.MULTILINE) | ||||
| xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I) | ||||
|  | ||||
|  | ||||
| class ClassNotFound(ValueError): | ||||
|     """Raised if one of the lookup functions didn't find a matching class.""" | ||||
|  | ||||
|  | ||||
| class OptionError(Exception): | ||||
|     pass | ||||
|  | ||||
|  | ||||
| def get_choice_opt(options, optname, allowed, default=None, normcase=False): | ||||
|     string = options.get(optname, default) | ||||
|     if normcase: | ||||
|         string = string.lower() | ||||
|     if string not in allowed: | ||||
|         raise OptionError('Value for option %s must be one of %s' % | ||||
|                           (optname, ', '.join(map(str, allowed)))) | ||||
|     return string | ||||
|  | ||||
|  | ||||
| def get_bool_opt(options, optname, default=None): | ||||
|     string = options.get(optname, default) | ||||
|     if isinstance(string, bool): | ||||
|         return string | ||||
|     elif isinstance(string, int): | ||||
|         return bool(string) | ||||
|     elif not isinstance(string, str): | ||||
|         raise OptionError('Invalid type %r for option %s; use ' | ||||
|                           '1/0, yes/no, true/false, on/off' % ( | ||||
|                               string, optname)) | ||||
|     elif string.lower() in ('1', 'yes', 'true', 'on'): | ||||
|         return True | ||||
|     elif string.lower() in ('0', 'no', 'false', 'off'): | ||||
|         return False | ||||
|     else: | ||||
|         raise OptionError('Invalid value %r for option %s; use ' | ||||
|                           '1/0, yes/no, true/false, on/off' % ( | ||||
|                               string, optname)) | ||||
|  | ||||
|  | ||||
| def get_int_opt(options, optname, default=None): | ||||
|     string = options.get(optname, default) | ||||
|     try: | ||||
|         return int(string) | ||||
|     except TypeError: | ||||
|         raise OptionError('Invalid type %r for option %s; you ' | ||||
|                           'must give an integer value' % ( | ||||
|                               string, optname)) | ||||
|     except ValueError: | ||||
|         raise OptionError('Invalid value %r for option %s; you ' | ||||
|                           'must give an integer value' % ( | ||||
|                               string, optname)) | ||||
|  | ||||
|  | ||||
| def get_list_opt(options, optname, default=None): | ||||
|     val = options.get(optname, default) | ||||
|     if isinstance(val, str): | ||||
|         return val.split() | ||||
|     elif isinstance(val, (list, tuple)): | ||||
|         return list(val) | ||||
|     else: | ||||
|         raise OptionError('Invalid type %r for option %s; you ' | ||||
|                           'must give a list value' % ( | ||||
|                               val, optname)) | ||||
|  | ||||
|  | ||||
| def docstring_headline(obj): | ||||
|     if not obj.__doc__: | ||||
|         return '' | ||||
|     res = [] | ||||
|     for line in obj.__doc__.strip().splitlines(): | ||||
|         if line.strip(): | ||||
|             res.append(" " + line.strip()) | ||||
|         else: | ||||
|             break | ||||
|     return ''.join(res).lstrip() | ||||
|  | ||||
|  | ||||
| def make_analysator(f): | ||||
|     """Return a static text analyser function that returns float values.""" | ||||
|     def text_analyse(text): | ||||
|         try: | ||||
|             rv = f(text) | ||||
|         except Exception: | ||||
|             return 0.0 | ||||
|         if not rv: | ||||
|             return 0.0 | ||||
|         try: | ||||
|             return min(1.0, max(0.0, float(rv))) | ||||
|         except (ValueError, TypeError): | ||||
|             return 0.0 | ||||
|     text_analyse.__doc__ = f.__doc__ | ||||
|     return staticmethod(text_analyse) | ||||
|  | ||||
|  | ||||
| def shebang_matches(text, regex): | ||||
|     r"""Check if the given regular expression matches the last part of the | ||||
|     shebang if one exists. | ||||
|  | ||||
|         >>> from pygments.util import shebang_matches | ||||
|         >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?') | ||||
|         True | ||||
|         >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?') | ||||
|         True | ||||
|         >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?') | ||||
|         False | ||||
|         >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?') | ||||
|         False | ||||
|         >>> shebang_matches('#!/usr/bin/startsomethingwith python', | ||||
|         ...                 r'python(2\.\d)?') | ||||
|         True | ||||
|  | ||||
|     It also checks for common windows executable file extensions:: | ||||
|  | ||||
|         >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?') | ||||
|         True | ||||
|  | ||||
|     Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does | ||||
|     the same as ``'perl -e'``) | ||||
|  | ||||
|     Note that this method automatically searches the whole string (eg: | ||||
|     the regular expression is wrapped in ``'^$'``) | ||||
|     """ | ||||
|     index = text.find('\n') | ||||
|     if index >= 0: | ||||
|         first_line = text[:index].lower() | ||||
|     else: | ||||
|         first_line = text.lower() | ||||
|     if first_line.startswith('#!'): | ||||
|         try: | ||||
|             found = [x for x in split_path_re.split(first_line[2:].strip()) | ||||
|                      if x and not x.startswith('-')][-1] | ||||
|         except IndexError: | ||||
|             return False | ||||
|         regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE) | ||||
|         if regex.search(found) is not None: | ||||
|             return True | ||||
|     return False | ||||
|  | ||||
|  | ||||
| def doctype_matches(text, regex): | ||||
|     """Check if the doctype matches a regular expression (if present). | ||||
|  | ||||
|     Note that this method only checks the first part of a DOCTYPE. | ||||
|     eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' | ||||
|     """ | ||||
|     m = doctype_lookup_re.search(text) | ||||
|     if m is None: | ||||
|         return False | ||||
|     doctype = m.group(1) | ||||
|     return re.compile(regex, re.I).match(doctype.strip()) is not None | ||||
|  | ||||
|  | ||||
| def html_doctype_matches(text): | ||||
|     """Check if the file looks like it has a html doctype.""" | ||||
|     return doctype_matches(text, r'html') | ||||
|  | ||||
|  | ||||
| _looks_like_xml_cache = {} | ||||
|  | ||||
|  | ||||
| def looks_like_xml(text): | ||||
|     """Check if a doctype exists or if we have some tags.""" | ||||
|     if xml_decl_re.match(text): | ||||
|         return True | ||||
|     key = hash(text) | ||||
|     try: | ||||
|         return _looks_like_xml_cache[key] | ||||
|     except KeyError: | ||||
|         m = doctype_lookup_re.search(text) | ||||
|         if m is not None: | ||||
|             return True | ||||
|         rv = tag_re.search(text[:1000]) is not None | ||||
|         _looks_like_xml_cache[key] = rv | ||||
|         return rv | ||||
|  | ||||
|  | ||||
| def surrogatepair(c): | ||||
|     """Given a unicode character code with length greater than 16 bits, | ||||
|     return the two 16 bit surrogate pair. | ||||
|     """ | ||||
|     # From example D28 of: | ||||
|     # http://www.unicode.org/book/ch03.pdf | ||||
|     return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff))) | ||||
|  | ||||
|  | ||||
| def format_lines(var_name, seq, raw=False, indent_level=0): | ||||
|     """Formats a sequence of strings for output.""" | ||||
|     lines = [] | ||||
|     base_indent = ' ' * indent_level * 4 | ||||
|     inner_indent = ' ' * (indent_level + 1) * 4 | ||||
|     lines.append(base_indent + var_name + ' = (') | ||||
|     if raw: | ||||
|         # These should be preformatted reprs of, say, tuples. | ||||
|         for i in seq: | ||||
|             lines.append(inner_indent + i + ',') | ||||
|     else: | ||||
|         for i in seq: | ||||
|             # Force use of single quotes | ||||
|             r = repr(i + '"') | ||||
|             lines.append(inner_indent + r[:-2] + r[-1] + ',') | ||||
|     lines.append(base_indent + ')') | ||||
|     return '\n'.join(lines) | ||||
|  | ||||
|  | ||||
| def duplicates_removed(it, already_seen=()): | ||||
|     """ | ||||
|     Returns a list with duplicates removed from the iterable `it`. | ||||
|  | ||||
|     Order is preserved. | ||||
|     """ | ||||
|     lst = [] | ||||
|     seen = set() | ||||
|     for i in it: | ||||
|         if i in seen or i in already_seen: | ||||
|             continue | ||||
|         lst.append(i) | ||||
|         seen.add(i) | ||||
|     return lst | ||||
|  | ||||
|  | ||||
| class Future: | ||||
|     """Generic class to defer some work. | ||||
|  | ||||
|     Handled specially in RegexLexerMeta, to support regex string construction at | ||||
|     first use. | ||||
|     """ | ||||
|     def get(self): | ||||
|         raise NotImplementedError | ||||
|  | ||||
|  | ||||
| def guess_decode(text): | ||||
|     """Decode *text* with guessed encoding. | ||||
|  | ||||
|     First try UTF-8; this should fail for non-UTF-8 encodings. | ||||
|     Then try the preferred locale encoding. | ||||
|     Fall back to latin-1, which always works. | ||||
|     """ | ||||
|     try: | ||||
|         text = text.decode('utf-8') | ||||
|         return text, 'utf-8' | ||||
|     except UnicodeDecodeError: | ||||
|         try: | ||||
|             import locale | ||||
|             prefencoding = locale.getpreferredencoding() | ||||
|             text = text.decode() | ||||
|             return text, prefencoding | ||||
|         except (UnicodeDecodeError, LookupError): | ||||
|             text = text.decode('latin1') | ||||
|             return text, 'latin1' | ||||
|  | ||||
|  | ||||
| def guess_decode_from_terminal(text, term): | ||||
|     """Decode *text* coming from terminal *term*. | ||||
|  | ||||
|     First try the terminal encoding, if given. | ||||
|     Then try UTF-8.  Then try the preferred locale encoding. | ||||
|     Fall back to latin-1, which always works. | ||||
|     """ | ||||
|     if getattr(term, 'encoding', None): | ||||
|         try: | ||||
|             text = text.decode(term.encoding) | ||||
|         except UnicodeDecodeError: | ||||
|             pass | ||||
|         else: | ||||
|             return text, term.encoding | ||||
|     return guess_decode(text) | ||||
|  | ||||
|  | ||||
| def terminal_encoding(term): | ||||
|     """Return our best guess of encoding for the given *term*.""" | ||||
|     if getattr(term, 'encoding', None): | ||||
|         return term.encoding | ||||
|     import locale | ||||
|     return locale.getpreferredencoding() | ||||
|  | ||||
|  | ||||
| class UnclosingTextIOWrapper(TextIOWrapper): | ||||
|     # Don't close underlying buffer on destruction. | ||||
|     def close(self): | ||||
|         self.flush() | ||||
		Reference in New Issue
	
	Block a user