12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319 |
- """
- pygments.lexers.configs
- ~~~~~~~~~~~~~~~~~~~~~~~
- Lexers for configuration file formats.
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
- """
- import re
- from pygments.lexer import ExtendedRegexLexer, RegexLexer, default, words, \
- bygroups, include, using, line_re
- from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Whitespace, Literal, Error, Generic
- from pygments.lexers.shell import BashLexer
- from pygments.lexers.data import JsonLexer
- __all__ = ['IniLexer', 'SystemdLexer', 'DesktopLexer', 'RegeditLexer', 'PropertiesLexer',
- 'KconfigLexer', 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
- 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
- 'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
- 'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer',
- 'NestedTextLexer', 'SingularityLexer', 'UnixConfigLexer']
- class IniLexer(RegexLexer):
- """
- Lexer for configuration files in INI style.
- """
- name = 'INI'
- aliases = ['ini', 'cfg', 'dosini']
- filenames = [
- '*.ini', '*.cfg', '*.inf', '.editorconfig',
- ]
- mimetypes = ['text/x-ini', 'text/inf']
- tokens = {
- 'root': [
- (r'\s+', Whitespace),
- (r'[;#].*', Comment.Single),
- (r'(\[.*?\])([ \t]*)$', bygroups(Keyword, Whitespace)),
- (r'(.*?)([ \t]*)([=:])([ \t]*)([^;#\n]*)(\\)(\s+)',
- bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String,
- Text, Whitespace),
- "value"),
- (r'(.*?)([ \t]*)([=:])([ \t]*)([^ ;#\n]*(?: +[^ ;#\n]+)*)',
- bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String)),
- # standalone option, supported by some INI parsers
- (r'(.+?)$', Name.Attribute),
- ],
- 'value': [ # line continuation
- (r'\s+', Whitespace),
- (r'(\s*)(.*)(\\)([ \t]*)',
- bygroups(Whitespace, String, Text, Whitespace)),
- (r'.*$', String, "#pop"),
- ],
- }
- def analyse_text(text):
- npos = text.find('\n')
- if npos < 3:
- return False
- if text[0] == '[' and text[npos-1] == ']':
- return 0.8
- return False
- class DesktopLexer(RegexLexer):
- """
- Lexer for .desktop files.
- .. versionadded:: 2.16
- """
- name = 'Desktop file'
- url = "https://specifications.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html"
- aliases = ['desktop']
- filenames = ['*.desktop']
- tokens = {
- 'root': [
- (r'^[ \t]*\n', Whitespace),
- (r'^(#.*)(\n)', bygroups(Comment.Single, Whitespace)),
- (r'(\[[^\]\n]+\])(\n)', bygroups(Keyword, Whitespace)),
- (r'([-A-Za-z0-9]+)(\[[^\] \t=]+\])?([ \t]*)(=)([ \t]*)([^\n]*)([ \t\n]*\n)',
- bygroups(Name.Attribute, Name.Namespace, Whitespace, Operator, Whitespace, String, Whitespace)),
- ],
- }
- def analyse_text(text):
- if text.startswith("[Desktop Entry]"):
- return 1.0
- if re.search(r"^\[Desktop Entry\][ \t]*$", text[:500], re.MULTILINE) is not None:
- return 0.9
- return 0.0
- class SystemdLexer(RegexLexer):
- """
- Lexer for systemd unit files.
- .. versionadded:: 2.16
- """
- name = 'Systemd'
- url = "https://www.freedesktop.org/software/systemd/man/systemd.syntax.html"
- aliases = ['systemd']
- filenames = [
- '*.service', '*.socket', '*.device', '*.mount', '*.automount',
- '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope',
- ]
- tokens = {
- 'root': [
- (r'^[ \t]*\n', Whitespace),
- (r'^([;#].*)(\n)', bygroups(Comment.Single, Whitespace)),
- (r'(\[[^\]\n]+\])(\n)', bygroups(Keyword, Whitespace)),
- (r'([^=]+)([ \t]*)(=)([ \t]*)([^\n]*)(\\)(\n)',
- bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String,
- Text, Whitespace),
- "value"),
- (r'([^=]+)([ \t]*)(=)([ \t]*)([^\n]*)(\n)',
- bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String, Whitespace)),
- ],
- 'value': [
- # line continuation
- (r'^([;#].*)(\n)', bygroups(Comment.Single, Whitespace)),
- (r'([ \t]*)([^\n]*)(\\)(\n)',
- bygroups(Whitespace, String, Text, Whitespace)),
- (r'([ \t]*)([^\n]*)(\n)',
- bygroups(Whitespace, String, Whitespace), "#pop"),
- ],
- }
- def analyse_text(text):
- if text.startswith("[Unit]"):
- return 1.0
- if re.search(r"^\[Unit\][ \t]*$", text[:500], re.MULTILINE) is not None:
- return 0.9
- return 0.0
- class RegeditLexer(RegexLexer):
- """
- Lexer for Windows Registry files produced by regedit.
- .. versionadded:: 1.6
- """
- name = 'reg'
- url = 'http://en.wikipedia.org/wiki/Windows_Registry#.REG_files'
- aliases = ['registry']
- filenames = ['*.reg']
- mimetypes = ['text/x-windows-registry']
- tokens = {
- 'root': [
- (r'Windows Registry Editor.*', Text),
- (r'\s+', Whitespace),
- (r'[;#].*', Comment.Single),
- (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
- bygroups(Keyword, Operator, Name.Builtin, Keyword)),
- # String keys, which obey somewhat normal escaping
- (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
- bygroups(Name.Attribute, Whitespace, Operator, Whitespace),
- 'value'),
- # Bare keys (includes @)
- (r'(.*?)([ \t]*)(=)([ \t]*)',
- bygroups(Name.Attribute, Whitespace, Operator, Whitespace),
- 'value'),
- ],
- 'value': [
- (r'-', Operator, '#pop'), # delete value
- (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
- bygroups(Name.Variable, Punctuation, Number), '#pop'),
- # As far as I know, .reg files do not support line continuation.
- (r'.+', String, '#pop'),
- default('#pop'),
- ]
- }
- def analyse_text(text):
- return text.startswith('Windows Registry Editor')
- class PropertiesLexer(RegexLexer):
- """
- Lexer for configuration files in Java's properties format.
- Note: trailing whitespace counts as part of the value as per spec
- .. versionadded:: 1.4
- """
- name = 'Properties'
- aliases = ['properties', 'jproperties']
- filenames = ['*.properties']
- mimetypes = ['text/x-java-properties']
- tokens = {
- 'root': [
- # comments
- (r'[!#].*|/{2}.*', Comment.Single),
- # ending a comment or whitespace-only line
- (r'\n', Whitespace),
- # eat whitespace at the beginning of a line
- (r'^[^\S\n]+', Whitespace),
- # start lexing a key
- default('key'),
- ],
- 'key': [
- # non-escaped key characters
- (r'[^\\:=\s]+', Name.Attribute),
- # escapes
- include('escapes'),
- # separator is the first non-escaped whitespace or colon or '=' on the line;
- # if it's whitespace, = and : are gobbled after it
- (r'([^\S\n]*)([:=])([^\S\n]*)',
- bygroups(Whitespace, Operator, Whitespace),
- ('#pop', 'value')),
- (r'[^\S\n]+', Whitespace, ('#pop', 'value')),
- # maybe we got no value after all
- (r'\n', Whitespace, '#pop'),
- ],
- 'value': [
- # non-escaped value characters
- (r'[^\\\n]+', String),
- # escapes
- include('escapes'),
- # end the value on an unescaped newline
- (r'\n', Whitespace, '#pop'),
- ],
- 'escapes': [
- # line continuations; these gobble whitespace at the beginning of the next line
- (r'(\\\n)([^\S\n]*)', bygroups(String.Escape, Whitespace)),
- # other escapes
- (r'\\(.|\n)', String.Escape),
- ],
- }
- def _rx_indent(level):
- # Kconfig *always* interprets a tab as 8 spaces, so this is the default.
- # Edit this if you are in an environment where KconfigLexer gets expanded
- # input (tabs expanded to spaces) and the expansion tab width is != 8,
- # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
- # Value range here is 2 <= {tab_width} <= 8.
- tab_width = 8
- # Regex matching a given indentation {level}, assuming that indentation is
- # a multiple of {tab_width}. In other cases there might be problems.
- if tab_width == 2:
- space_repeat = '+'
- else:
- space_repeat = '{1,%d}' % (tab_width - 1)
- if level == 1:
- level_repeat = ''
- else:
- level_repeat = '{%s}' % level
- return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
- class KconfigLexer(RegexLexer):
- """
- For Linux-style Kconfig files.
- .. versionadded:: 1.6
- """
- name = 'Kconfig'
- aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
- # Adjust this if new kconfig file names appear in your environment
- filenames = ['Kconfig*', '*Config.in*', 'external.in*',
- 'standard-modules.in']
- mimetypes = ['text/x-kconfig']
- # No re.MULTILINE, indentation-aware help text needs line-by-line handling
- flags = 0
- def call_indent(level):
- # If indentation >= {level} is detected, enter state 'indent{level}'
- return (_rx_indent(level), String.Doc, 'indent%s' % level)
- def do_indent(level):
- # Print paragraphs of indentation level >= {level} as String.Doc,
- # ignoring blank lines. Then return to 'root' state.
- return [
- (_rx_indent(level), String.Doc),
- (r'\s*\n', Text),
- default('#pop:2')
- ]
- tokens = {
- 'root': [
- (r'\s+', Whitespace),
- (r'#.*?\n', Comment.Single),
- (words((
- 'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
- 'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
- 'source', 'prompt', 'select', 'depends on', 'default',
- 'range', 'option'), suffix=r'\b'),
- Keyword),
- (r'(---help---|help)[\t ]*\n', Keyword, 'help'),
- (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
- Name.Builtin),
- (r'[!=&|]', Operator),
- (r'[()]', Punctuation),
- (r'[0-9]+', Number.Integer),
- (r"'(''|[^'])*'", String.Single),
- (r'"(""|[^"])*"', String.Double),
- (r'\S+', Text),
- ],
- # Help text is indented, multi-line and ends when a lower indentation
- # level is detected.
- 'help': [
- # Skip blank lines after help token, if any
- (r'\s*\n', Text),
- # Determine the first help line's indentation level heuristically(!).
- # Attention: this is not perfect, but works for 99% of "normal"
- # indentation schemes up to a max. indentation level of 7.
- call_indent(7),
- call_indent(6),
- call_indent(5),
- call_indent(4),
- call_indent(3),
- call_indent(2),
- call_indent(1),
- default('#pop'), # for incomplete help sections without text
- ],
- # Handle text for indentation levels 7 to 1
- 'indent7': do_indent(7),
- 'indent6': do_indent(6),
- 'indent5': do_indent(5),
- 'indent4': do_indent(4),
- 'indent3': do_indent(3),
- 'indent2': do_indent(2),
- 'indent1': do_indent(1),
- }
- class Cfengine3Lexer(RegexLexer):
- """
- Lexer for CFEngine3 policy files.
- .. versionadded:: 1.5
- """
- name = 'CFEngine3'
- url = 'http://cfengine.org'
- aliases = ['cfengine3', 'cf3']
- filenames = ['*.cf']
- mimetypes = []
- tokens = {
- 'root': [
- (r'#.*?\n', Comment),
- (r'(body)(\s+)(\S+)(\s+)(control)',
- bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
- (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
- bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function, Punctuation),
- 'arglist'),
- (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
- bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function)),
- (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
- bygroups(Punctuation, Name.Variable, Punctuation,
- Whitespace, Keyword.Type, Whitespace, Operator, Whitespace)),
- (r'(\S+)(\s*)(=>)(\s*)',
- bygroups(Keyword.Reserved, Whitespace, Operator, Text)),
- (r'"', String, 'string'),
- (r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
- (r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
- (r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
- (r'@[{(][^)}]+[})]', Name.Variable),
- (r'[(){},;]', Punctuation),
- (r'=>', Operator),
- (r'->', Operator),
- (r'\d+\.\d+', Number.Float),
- (r'\d+', Number.Integer),
- (r'\w+', Name.Function),
- (r'\s+', Whitespace),
- ],
- 'string': [
- (r'\$[{(]', String.Interpol, 'interpol'),
- (r'\\.', String.Escape),
- (r'"', String, '#pop'),
- (r'\n', String),
- (r'.', String),
- ],
- 'interpol': [
- (r'\$[{(]', String.Interpol, '#push'),
- (r'[})]', String.Interpol, '#pop'),
- (r'[^${()}]+', String.Interpol),
- ],
- 'arglist': [
- (r'\)', Punctuation, '#pop'),
- (r',', Punctuation),
- (r'\w+', Name.Variable),
- (r'\s+', Whitespace),
- ],
- }
- class ApacheConfLexer(RegexLexer):
- """
- Lexer for configuration files following the Apache config file
- format.
- .. versionadded:: 0.6
- """
- name = 'ApacheConf'
- aliases = ['apacheconf', 'aconf', 'apache']
- filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
- mimetypes = ['text/x-apacheconf']
- flags = re.MULTILINE | re.IGNORECASE
- tokens = {
- 'root': [
- (r'\s+', Whitespace),
- (r'#(.*\\\n)+.*$|(#.*?)$', Comment),
- (r'(<[^\s>/][^\s>]*)(?:(\s+)(.*))?(>)',
- bygroups(Name.Tag, Whitespace, String, Name.Tag)),
- (r'(</[^\s>]+)(>)',
- bygroups(Name.Tag, Name.Tag)),
- (r'[a-z]\w*', Name.Builtin, 'value'),
- (r'\.+', Text),
- ],
- 'value': [
- (r'\\\n', Text),
- (r'\n+', Whitespace, '#pop'),
- (r'\\', Text),
- (r'[^\S\n]+', Whitespace),
- (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
- (r'\d+', Number),
- (r'/([*a-z0-9][*\w./-]+)', String.Other),
- (r'(on|off|none|any|all|double|email|dns|min|minimal|'
- r'os|productonly|full|emerg|alert|crit|error|warn|'
- r'notice|info|debug|registry|script|inetd|standalone|'
- r'user|group)\b', Keyword),
- (r'"([^"\\]*(?:\\(.|\n)[^"\\]*)*)"', String.Double),
- (r'[^\s"\\]+', Text)
- ],
- }
- class SquidConfLexer(RegexLexer):
- """
- Lexer for squid configuration files.
- .. versionadded:: 0.9
- """
- name = 'SquidConf'
- url = 'http://www.squid-cache.org/'
- aliases = ['squidconf', 'squid.conf', 'squid']
- filenames = ['squid.conf']
- mimetypes = ['text/x-squidconf']
- flags = re.IGNORECASE
- keywords = (
- "access_log", "acl", "always_direct", "announce_host",
- "announce_period", "announce_port", "announce_to", "anonymize_headers",
- "append_domain", "as_whois_server", "auth_param_basic",
- "authenticate_children", "authenticate_program", "authenticate_ttl",
- "broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
- "cache_dir", "cache_dns_program", "cache_effective_group",
- "cache_effective_user", "cache_host", "cache_host_acl",
- "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
- "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
- "cache_peer_access", "cache_replacement_policy", "cache_stoplist",
- "cache_stoplist_pattern", "cache_store_log", "cache_swap",
- "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
- "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
- "dead_peer_timeout", "debug_options", "delay_access", "delay_class",
- "delay_initial_bucket_level", "delay_parameters", "delay_pools",
- "deny_info", "dns_children", "dns_defnames", "dns_nameservers",
- "dns_testnames", "emulate_httpd_log", "err_html_text",
- "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
- "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
- "ftp_passive", "ftp_user", "half_closed_clients", "header_access",
- "header_replace", "hierarchy_stoplist", "high_response_time_warning",
- "high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
- "http_anonymizer", "httpd_accel", "httpd_accel_host",
- "httpd_accel_port", "httpd_accel_uses_host_header",
- "httpd_accel_with_proxy", "http_port", "http_reply_access",
- "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
- "ident_lookup", "ident_lookup_access", "ident_timeout",
- "incoming_http_average", "incoming_icp_average", "inside_firewall",
- "ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
- "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
- "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
- "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
- "mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
- "memory_pools_limit", "memory_replacement_policy", "mime_table",
- "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
- "minimum_object_size", "minimum_retry_timeout", "miss_access",
- "negative_dns_ttl", "negative_ttl", "neighbor_timeout",
- "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
- "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
- "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
- "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
- "quick_abort", "quick_abort_max", "quick_abort_min",
- "quick_abort_pct", "range_offset_limit", "read_timeout",
- "redirect_children", "redirect_program",
- "redirect_rewrites_host_header", "reference_age",
- "refresh_pattern", "reload_into_ims", "request_body_max_size",
- "request_size", "request_timeout", "shutdown_lifetime",
- "single_parent_bypass", "siteselect_timeout", "snmp_access",
- "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
- "store_avg_object_size", "store_objects_per_bucket",
- "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
- "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
- "test_reachability", "udp_hit_obj", "udp_hit_obj_size",
- "udp_incoming_address", "udp_outgoing_address", "unique_hostname",
- "unlinkd_program", "uri_whitespace", "useragent_log",
- "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
- )
- opts = (
- "proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
- "multicast-responder", "on", "off", "all", "deny", "allow", "via",
- "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
- "credentialsttl", "none", "disable", "offline_toggle", "diskd",
- )
- actions = (
- "shutdown", "info", "parameter", "server_list", "client_list",
- r'squid.conf',
- )
- actions_stats = (
- "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
- "redirector", "io", "reply_headers", "filedescriptors", "netdb",
- )
- actions_log = ("status", "enable", "disable", "clear")
- acls = (
- "url_regex", "urlpath_regex", "referer_regex", "port", "proto",
- "req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
- "dst", "time", "dstdomain", "ident", "snmp_community",
- )
- ip_re = (
- r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
- r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
- r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
- r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
- r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
- r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
- r'[1-9]?\d)){3}))'
- )
- tokens = {
- 'root': [
- (r'\s+', Whitespace),
- (r'#', Comment, 'comment'),
- (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
- # Actions
- (words(actions, prefix=r'\b', suffix=r'\b'), String),
- (words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
- (words(actions_log, prefix=r'log/', suffix=r'='), String),
- (words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
- (ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
- (r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
- (r'\S+', Text),
- ],
- 'comment': [
- (r'\s*TAG:.*', String.Escape, '#pop'),
- (r'.+', Comment, '#pop'),
- default('#pop'),
- ],
- }
- class NginxConfLexer(RegexLexer):
- """
- Lexer for Nginx configuration files.
- .. versionadded:: 0.11
- """
- name = 'Nginx configuration file'
- url = 'http://nginx.net/'
- aliases = ['nginx']
- filenames = ['nginx.conf']
- mimetypes = ['text/x-nginx-conf']
- tokens = {
- 'root': [
- (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Whitespace, Name)),
- (r'[^\s;#]+', Keyword, 'stmt'),
- include('base'),
- ],
- 'block': [
- (r'\}', Punctuation, '#pop:2'),
- (r'[^\s;#]+', Keyword.Namespace, 'stmt'),
- include('base'),
- ],
- 'stmt': [
- (r'\{', Punctuation, 'block'),
- (r';', Punctuation, '#pop'),
- include('base'),
- ],
- 'base': [
- (r'#.*\n', Comment.Single),
- (r'on|off', Name.Constant),
- (r'\$[^\s;#()]+', Name.Variable),
- (r'([a-z0-9.-]+)(:)([0-9]+)',
- bygroups(Name, Punctuation, Number.Integer)),
- (r'[a-z-]+/[a-z-+]+', String), # mimetype
- # (r'[a-zA-Z._-]+', Keyword),
- (r'[0-9]+[km]?\b', Number.Integer),
- (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Whitespace, String.Regex)),
- (r'[:=~]', Punctuation),
- (r'[^\s;#{}$]+', String), # catch all
- (r'/[^\s;#]*', Name), # pathname
- (r'\s+', Whitespace),
- (r'[$;]', Text), # leftover characters
- ],
- }
- class LighttpdConfLexer(RegexLexer):
- """
- Lexer for Lighttpd configuration files.
- .. versionadded:: 0.11
- """
- name = 'Lighttpd configuration file'
- url = 'http://lighttpd.net/'
- aliases = ['lighttpd', 'lighty']
- filenames = ['lighttpd.conf']
- mimetypes = ['text/x-lighttpd-conf']
- tokens = {
- 'root': [
- (r'#.*\n', Comment.Single),
- (r'/\S*', Name), # pathname
- (r'[a-zA-Z._-]+', Keyword),
- (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
- (r'[0-9]+', Number),
- (r'=>|=~|\+=|==|=|\+', Operator),
- (r'\$[A-Z]+', Name.Builtin),
- (r'[(){}\[\],]', Punctuation),
- (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
- (r'\s+', Whitespace),
- ],
- }
- class DockerLexer(RegexLexer):
- """
- Lexer for Docker configuration files.
- .. versionadded:: 2.0
- """
- name = 'Docker'
- url = 'http://docker.io'
- aliases = ['docker', 'dockerfile']
- filenames = ['Dockerfile', '*.docker']
- mimetypes = ['text/x-dockerfile-config']
- _keywords = (r'(?:MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)')
- _bash_keywords = (r'(?:RUN|CMD|ENTRYPOINT|ENV|ARG|LABEL|ADD|COPY)')
- _lb = r'(?:\s*\\?\s*)' # dockerfile line break regex
- flags = re.IGNORECASE | re.MULTILINE
- tokens = {
- 'root': [
- (r'#.*', Comment),
- (r'(FROM)([ \t]*)(\S*)([ \t]*)(?:(AS)([ \t]*)(\S*))?',
- bygroups(Keyword, Whitespace, String, Whitespace, Keyword, Whitespace, String)),
- (r'(ONBUILD)(\s+)(%s)' % (_lb,), bygroups(Keyword, Whitespace, using(BashLexer))),
- (r'(HEALTHCHECK)(\s+)((%s--\w+=\w+%s)*)' % (_lb, _lb),
- bygroups(Keyword, Whitespace, using(BashLexer))),
- (r'(VOLUME|ENTRYPOINT|CMD|SHELL)(\s+)(%s)(\[.*?\])' % (_lb,),
- bygroups(Keyword, Whitespace, using(BashLexer), using(JsonLexer))),
- (r'(LABEL|ENV|ARG)(\s+)((%s\w+=\w+%s)*)' % (_lb, _lb),
- bygroups(Keyword, Whitespace, using(BashLexer))),
- (r'(%s|VOLUME)\b(\s+)(.*)' % (_keywords), bygroups(Keyword, Whitespace, String)),
- (r'(%s)(\s+)' % (_bash_keywords,), bygroups(Keyword, Whitespace)),
- (r'(.*\\\n)*.+', using(BashLexer)),
- ]
- }
- class TerraformLexer(ExtendedRegexLexer):
- """
- Lexer for terraformi ``.tf`` files.
- .. versionadded:: 2.1
- """
- name = 'Terraform'
- url = 'https://www.terraform.io/'
- aliases = ['terraform', 'tf', 'hcl']
- filenames = ['*.tf', '*.hcl']
- mimetypes = ['application/x-tf', 'application/x-terraform']
- classes = ('backend', 'data', 'module', 'output', 'provider',
- 'provisioner', 'resource', 'variable')
- classes_re = "({})".format(('|').join(classes))
- types = ('string', 'number', 'bool', 'list', 'tuple', 'map', 'set', 'object', 'null')
- numeric_functions = ('abs', 'ceil', 'floor', 'log', 'max',
- 'mix', 'parseint', 'pow', 'signum')
- string_functions = ('chomp', 'format', 'formatlist', 'indent',
- 'join', 'lower', 'regex', 'regexall', 'replace',
- 'split', 'strrev', 'substr', 'title', 'trim',
- 'trimprefix', 'trimsuffix', 'trimspace', 'upper'
- )
- collection_functions = ('alltrue', 'anytrue', 'chunklist', 'coalesce',
- 'coalescelist', 'compact', 'concat', 'contains',
- 'distinct', 'element', 'flatten', 'index', 'keys',
- 'length', 'list', 'lookup', 'map', 'matchkeys',
- 'merge', 'range', 'reverse', 'setintersection',
- 'setproduct', 'setsubtract', 'setunion', 'slice',
- 'sort', 'sum', 'transpose', 'values', 'zipmap'
- )
- encoding_functions = ('base64decode', 'base64encode', 'base64gzip',
- 'csvdecode', 'jsondecode', 'jsonencode', 'textdecodebase64',
- 'textencodebase64', 'urlencode', 'yamldecode', 'yamlencode')
- filesystem_functions = ('abspath', 'dirname', 'pathexpand', 'basename',
- 'file', 'fileexists', 'fileset', 'filebase64', 'templatefile')
- date_time_functions = ('formatdate', 'timeadd', 'timestamp')
- hash_crypto_functions = ('base64sha256', 'base64sha512', 'bcrypt', 'filebase64sha256',
- 'filebase64sha512', 'filemd5', 'filesha1', 'filesha256', 'filesha512',
- 'md5', 'rsadecrypt', 'sha1', 'sha256', 'sha512', 'uuid', 'uuidv5')
- ip_network_functions = ('cidrhost', 'cidrnetmask', 'cidrsubnet', 'cidrsubnets')
- type_conversion_functions = ('can', 'defaults', 'tobool', 'tolist', 'tomap',
- 'tonumber', 'toset', 'tostring', 'try')
- builtins = numeric_functions + string_functions + collection_functions + encoding_functions +\
- filesystem_functions + date_time_functions + hash_crypto_functions + ip_network_functions +\
- type_conversion_functions
- builtins_re = "({})".format(('|').join(builtins))
- def heredoc_callback(self, match, ctx):
- # Parse a terraform heredoc
- # match: 1 = <<[-]?, 2 = name 3 = rest of line
- start = match.start(1)
- yield start, Operator, match.group(1) # <<[-]?
- yield match.start(2), String.Delimiter, match.group(2) # heredoc name
- ctx.pos = match.start(3)
- ctx.end = match.end(3)
- yield ctx.pos, String.Heredoc, match.group(3)
- ctx.pos = match.end()
- hdname = match.group(2)
- tolerant = True # leading whitespace is always accepted
- lines = []
- for match in line_re.finditer(ctx.text, ctx.pos):
- if tolerant:
- check = match.group().strip()
- else:
- check = match.group().rstrip()
- if check == hdname:
- for amatch in lines:
- yield amatch.start(), String.Heredoc, amatch.group()
- yield match.start(), String.Delimiter, match.group()
- ctx.pos = match.end()
- break
- else:
- lines.append(match)
- else:
- # end of heredoc not found -- error!
- for amatch in lines:
- yield amatch.start(), Error, amatch.group()
- ctx.end = len(ctx.text)
- tokens = {
- 'root': [
- include('basic'),
- include('whitespace'),
- # Strings
- (r'(".*")', bygroups(String.Double)),
- # Constants
- (words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Name.Constant),
- # Types
- (words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
- include('identifier'),
- include('punctuation'),
- (r'[0-9]+', Number),
- ],
- 'basic': [
- (r'\s*/\*', Comment.Multiline, 'comment'),
- (r'\s*(#|//).*\n', Comment.Single),
- include('whitespace'),
- # e.g. terraform {
- # e.g. egress {
- (r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=?)(\s*)(\{)',
- bygroups(Whitespace, Name.Builtin, Whitespace, Operator, Whitespace, Punctuation)),
- # Assignment with attributes, e.g. something = ...
- (r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=)(\s*)',
- bygroups(Whitespace, Name.Attribute, Whitespace, Operator, Whitespace)),
- # Assignment with environment variables and similar, e.g. "something" = ...
- # or key value assignment, e.g. "SlotName" : ...
- (r'(\s*)("\S+")(\s*)([=:])(\s*)',
- bygroups(Whitespace, Literal.String.Double, Whitespace, Operator, Whitespace)),
- # Functions, e.g. jsonencode(element("value"))
- (builtins_re + r'(\()', bygroups(Name.Function, Punctuation)),
- # List of attributes, e.g. ignore_changes = [last_modified, filename]
- (r'(\[)([a-z_,\s]+)(\])', bygroups(Punctuation, Name.Builtin, Punctuation)),
- # e.g. resource "aws_security_group" "allow_tls" {
- # e.g. backend "consul" {
- (classes_re + r'(\s+)("[0-9a-zA-Z-_]+")?(\s*)("[0-9a-zA-Z-_]+")(\s+)(\{)',
- bygroups(Keyword.Reserved, Whitespace, Name.Class, Whitespace, Name.Variable, Whitespace, Punctuation)),
- # here-doc style delimited strings
- (r'(<<-?)\s*([a-zA-Z_]\w*)(.*?\n)', heredoc_callback),
- ],
- 'identifier': [
- (r'\b(var\.[0-9a-zA-Z-_\.\[\]]+)\b', bygroups(Name.Variable)),
- (r'\b([0-9a-zA-Z-_\[\]]+\.[0-9a-zA-Z-_\.\[\]]+)\b',
- bygroups(Name.Variable)),
- ],
- 'punctuation': [
- (r'[\[\]()\{\},.?:!=]', Punctuation),
- ],
- 'comment': [
- (r'[^*/]', Comment.Multiline),
- (r'/\*', Comment.Multiline, '#push'),
- (r'\*/', Comment.Multiline, '#pop'),
- (r'[*/]', Comment.Multiline)
- ],
- 'whitespace': [
- (r'\n', Whitespace),
- (r'\s+', Whitespace),
- (r'(\\)(\n)', bygroups(Text, Whitespace)),
- ],
- }
- class TermcapLexer(RegexLexer):
- """
- Lexer for termcap database source.
- This is very simple and minimal.
- .. versionadded:: 2.1
- """
- name = 'Termcap'
- aliases = ['termcap']
- filenames = ['termcap', 'termcap.src']
- mimetypes = []
- # NOTE:
- # * multiline with trailing backslash
- # * separator is ':'
- # * to embed colon as data, we must use \072
- # * space after separator is not allowed (mayve)
- tokens = {
- 'root': [
- (r'^#.*', Comment),
- (r'^[^\s#:|]+', Name.Tag, 'names'),
- (r'\s+', Whitespace),
- ],
- 'names': [
- (r'\n', Whitespace, '#pop'),
- (r':', Punctuation, 'defs'),
- (r'\|', Punctuation),
- (r'[^:|]+', Name.Attribute),
- ],
- 'defs': [
- (r'(\\)(\n[ \t]*)', bygroups(Text, Whitespace)),
- (r'\n[ \t]*', Whitespace, '#pop:2'),
- (r'(#)([0-9]+)', bygroups(Operator, Number)),
- (r'=', Operator, 'data'),
- (r':', Punctuation),
- (r'[^\s:=#]+', Name.Class),
- ],
- 'data': [
- (r'\\072', Literal),
- (r':', Punctuation, '#pop'),
- (r'[^:\\]+', Literal), # for performance
- (r'.', Literal),
- ],
- }
- class TerminfoLexer(RegexLexer):
- """
- Lexer for terminfo database source.
- This is very simple and minimal.
- .. versionadded:: 2.1
- """
- name = 'Terminfo'
- aliases = ['terminfo']
- filenames = ['terminfo', 'terminfo.src']
- mimetypes = []
- # NOTE:
- # * multiline with leading whitespace
- # * separator is ','
- # * to embed comma as data, we can use \,
- # * space after separator is allowed
- tokens = {
- 'root': [
- (r'^#.*$', Comment),
- (r'^[^\s#,|]+', Name.Tag, 'names'),
- (r'\s+', Whitespace),
- ],
- 'names': [
- (r'\n', Whitespace, '#pop'),
- (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace), 'defs'),
- (r'\|', Punctuation),
- (r'[^,|]+', Name.Attribute),
- ],
- 'defs': [
- (r'\n[ \t]+', Whitespace),
- (r'\n', Whitespace, '#pop:2'),
- (r'(#)([0-9]+)', bygroups(Operator, Number)),
- (r'=', Operator, 'data'),
- (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace)),
- (r'[^\s,=#]+', Name.Class),
- ],
- 'data': [
- (r'\\[,\\]', Literal),
- (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace), '#pop'),
- (r'[^\\,]+', Literal), # for performance
- (r'.', Literal),
- ],
- }
- class PkgConfigLexer(RegexLexer):
- """
- Lexer for pkg-config
- (see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
- .. versionadded:: 2.1
- """
- name = 'PkgConfig'
- url = 'http://www.freedesktop.org/wiki/Software/pkg-config/'
- aliases = ['pkgconfig']
- filenames = ['*.pc']
- mimetypes = []
- tokens = {
- 'root': [
- (r'#.*$', Comment.Single),
- # variable definitions
- (r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
- # keyword lines
- (r'^([\w.]+)(:)',
- bygroups(Name.Tag, Punctuation), 'spvalue'),
- # variable references
- include('interp'),
- # fallback
- (r'\s+', Whitespace),
- (r'[^${}#=:\n.]+', Text),
- (r'.', Text),
- ],
- 'interp': [
- # you can escape literal "$" as "$$"
- (r'\$\$', Text),
- # variable references
- (r'\$\{', String.Interpol, 'curly'),
- ],
- 'curly': [
- (r'\}', String.Interpol, '#pop'),
- (r'\w+', Name.Attribute),
- ],
- 'spvalue': [
- include('interp'),
- (r'#.*$', Comment.Single, '#pop'),
- (r'\n', Whitespace, '#pop'),
- # fallback
- (r'\s+', Whitespace),
- (r'[^${}#\n\s]+', Text),
- (r'.', Text),
- ],
- }
- class PacmanConfLexer(RegexLexer):
- """
- Lexer for pacman.conf.
- Actually, IniLexer works almost fine for this format,
- but it yield error token. It is because pacman.conf has
- a form without assignment like:
- UseSyslog
- Color
- TotalDownload
- CheckSpace
- VerbosePkgLists
- These are flags to switch on.
- .. versionadded:: 2.1
- """
- name = 'PacmanConf'
- url = 'https://www.archlinux.org/pacman/pacman.conf.5.html'
- aliases = ['pacmanconf']
- filenames = ['pacman.conf']
- mimetypes = []
- tokens = {
- 'root': [
- # comment
- (r'#.*$', Comment.Single),
- # section header
- (r'^(\s*)(\[.*?\])(\s*)$', bygroups(Whitespace, Keyword, Whitespace)),
- # variable definitions
- # (Leading space is allowed...)
- (r'(\w+)(\s*)(=)',
- bygroups(Name.Attribute, Whitespace, Operator)),
- # flags to on
- (r'^(\s*)(\w+)(\s*)$',
- bygroups(Whitespace, Name.Attribute, Whitespace)),
- # built-in special values
- (words((
- '$repo', # repository
- '$arch', # architecture
- '%o', # outfile
- '%u', # url
- ), suffix=r'\b'),
- Name.Variable),
- # fallback
- (r'\s+', Whitespace),
- (r'.', Text),
- ],
- }
- class AugeasLexer(RegexLexer):
- """
- Lexer for Augeas.
- .. versionadded:: 2.4
- """
- name = 'Augeas'
- url = 'http://augeas.net'
- aliases = ['augeas']
- filenames = ['*.aug']
- tokens = {
- 'root': [
- (r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
- (r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Whitespace, Name.Variable)),
- (r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Whitespace)),
- (r'(\()([^:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)),
- (r'\(\*', Comment.Multiline, 'comment'),
- (r'[*+\-.;=?|]', Operator),
- (r'[()\[\]{}]', Operator),
- (r'"', String.Double, 'string'),
- (r'\/', String.Regex, 'regex'),
- (r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)),
- (r'.', Name.Variable),
- (r'\s+', Whitespace),
- ],
- 'string': [
- (r'\\.', String.Escape),
- (r'[^"]', String.Double),
- (r'"', String.Double, '#pop'),
- ],
- 'regex': [
- (r'\\.', String.Escape),
- (r'[^/]', String.Regex),
- (r'\/', String.Regex, '#pop'),
- ],
- 'comment': [
- (r'[^*)]', Comment.Multiline),
- (r'\(\*', Comment.Multiline, '#push'),
- (r'\*\)', Comment.Multiline, '#pop'),
- (r'[)*]', Comment.Multiline)
- ],
- }
- class TOMLLexer(RegexLexer):
- """
- Lexer for TOML, a simple language
- for config files.
- .. versionadded:: 2.4
- """
- name = 'TOML'
- url = 'https://github.com/toml-lang/toml'
- aliases = ['toml']
- filenames = ['*.toml', 'Pipfile', 'poetry.lock']
- tokens = {
- 'root': [
- # Table
- (r'^(\s*)(\[.*?\])$', bygroups(Whitespace, Keyword)),
- # Basics, comments, strings
- (r'[ \t]+', Whitespace),
- (r'\n', Whitespace),
- (r'#.*?$', Comment.Single),
- # Basic string
- (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
- # Literal string
- (r'\'\'\'(.*)\'\'\'', String),
- (r'\'[^\']*\'', String),
- (r'(true|false)$', Keyword.Constant),
- (r'[a-zA-Z_][\w\-]*', Name),
- # Datetime
- # TODO this needs to be expanded, as TOML is rather flexible:
- # https://github.com/toml-lang/toml#offset-date-time
- (r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer),
- # Numbers
- (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
- (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
- # Handle +-inf, +-infinity, +-nan
- (r'[+-]?(?:(inf(?:inity)?)|nan)', Number.Float),
- (r'[+-]?\d+', Number.Integer),
- # Punctuation
- (r'[]{}:(),;[]', Punctuation),
- (r'\.', Punctuation),
- # Operators
- (r'=', Operator)
- ]
- }
- class NestedTextLexer(RegexLexer):
- """
- Lexer for *NextedText*, a human-friendly data format.
- .. versionadded:: 2.9
- .. versionchanged:: 2.16
- Added support for *NextedText* v3.0.
- """
- name = 'NestedText'
- url = 'https://nestedtext.org'
- aliases = ['nestedtext', 'nt']
- filenames = ['*.nt']
- tokens = {
- 'root': [
- # Comment: # ...
- (r'^([ ]*)(#.*)$', bygroups(Whitespace, Comment)),
- # Inline dictionary: {...}
- (r'^([ ]*)(\{)', bygroups(Whitespace, Punctuation), 'inline_dict'),
- # Inline list: [...]
- (r'^([ ]*)(\[)', bygroups(Whitespace, Punctuation), 'inline_list'),
- # empty multiline string item: >
- (r'^([ ]*)(>)$', bygroups(Whitespace, Punctuation)),
- # multiline string item: > ...
- (r'^([ ]*)(>)( )(.*?)([ \t]*)$', bygroups(Whitespace, Punctuation, Whitespace, Text, Whitespace)),
- # empty list item: -
- (r'^([ ]*)(-)$', bygroups(Whitespace, Punctuation)),
- # list item: - ...
- (r'^([ ]*)(-)( )(.*?)([ \t]*)$', bygroups(Whitespace, Punctuation, Whitespace, Text, Whitespace)),
- # empty multiline key item: :
- (r'^([ ]*)(:)$', bygroups(Whitespace, Punctuation)),
- # multiline key item: : ...
- (r'^([ ]*)(:)( )([^\n]*?)([ \t]*)$', bygroups(Whitespace, Punctuation, Whitespace, Name.Tag, Whitespace)),
- # empty dict key item: ...:
- (r'^([ ]*)([^\{\[\s].*?)(:)$', bygroups(Whitespace, Name.Tag, Punctuation)),
- # dict key item: ...: ...
- (r'^([ ]*)([^\{\[\s].*?)(:)( )(.*?)([ \t]*)$', bygroups(Whitespace, Name.Tag, Punctuation, Whitespace, Text, Whitespace)),
- ],
- 'inline_list': [
- include('whitespace'),
- (r'[^\{\}\[\],\s]+', Text),
- include('inline_value'),
- (r',', Punctuation),
- (r'\]', Punctuation, '#pop'),
- (r'\n', Error, '#pop'),
- ],
- 'inline_dict': [
- include('whitespace'),
- (r'[^\{\}\[\],:\s]+', Name.Tag),
- (r':', Punctuation, 'inline_dict_value'),
- (r'\}', Punctuation, '#pop'),
- (r'\n', Error, '#pop'),
- ],
- 'inline_dict_value': [
- include('whitespace'),
- (r'[^\{\}\[\],:\s]+', Text),
- include('inline_value'),
- (r',', Punctuation, '#pop'),
- (r'\}', Punctuation, '#pop:2'),
- ],
- 'inline_value': [
- include('whitespace'),
- (r'\{', Punctuation, 'inline_dict'),
- (r'\[', Punctuation, 'inline_list'),
- ],
- 'whitespace': [
- (r'[ \t]+', Whitespace),
- ],
- }
- class SingularityLexer(RegexLexer):
- """
- Lexer for Singularity definition files.
- .. versionadded:: 2.6
- """
- name = 'Singularity'
- url = 'https://www.sylabs.io/guides/3.0/user-guide/definition_files.html'
- aliases = ['singularity']
- filenames = ['*.def', 'Singularity']
- flags = re.IGNORECASE | re.MULTILINE | re.DOTALL
- _headers = r'^(\s*)(bootstrap|from|osversion|mirrorurl|include|registry|namespace|includecmd)(:)'
- _section = r'^(%(?:pre|post|setup|environment|help|labels|test|runscript|files|startscript))(\s*)'
- _appsect = r'^(%app(?:install|help|run|labels|env|test|files))(\s*)'
- tokens = {
- 'root': [
- (_section, bygroups(Generic.Heading, Whitespace), 'script'),
- (_appsect, bygroups(Generic.Heading, Whitespace), 'script'),
- (_headers, bygroups(Whitespace, Keyword, Text)),
- (r'\s*#.*?\n', Comment),
- (r'\b(([0-9]+\.?[0-9]*)|(\.[0-9]+))\b', Number),
- (r'[ \t]+', Whitespace),
- (r'(?!^\s*%).', Text),
- ],
- 'script': [
- (r'(.+?(?=^\s*%))|(.*)', using(BashLexer), '#pop'),
- ],
- }
- def analyse_text(text):
- """This is a quite simple script file, but there are a few keywords
- which seem unique to this language."""
- result = 0
- if re.search(r'\b(?:osversion|includecmd|mirrorurl)\b', text, re.IGNORECASE):
- result += 0.5
- if re.search(SingularityLexer._section[1:], text):
- result += 0.49
- return result
- class UnixConfigLexer(RegexLexer):
- """
- Lexer for Unix/Linux config files using colon-separated values, e.g.
- * ``/etc/group``
- * ``/etc/passwd``
- * ``/etc/shadow``
- .. versionadded:: 2.12
- """
- name = 'Unix/Linux config files'
- aliases = ['unixconfig', 'linuxconfig']
- filenames = []
- tokens = {
- 'root': [
- (r'^#.*', Comment),
- (r'\n', Whitespace),
- (r':', Punctuation),
- (r'[0-9]+', Number),
- (r'((?!\n)[a-zA-Z0-9\_\-\s\(\),]){2,}', Text),
- (r'[^:\n]+', String),
- ],
- }
|