1
# -*- coding: utf-8 -*-
6
Lexers for non-source code file types.
8
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
9
:license: BSD, see LICENSE for details.
13
from bisect import bisect
15
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
16
bygroups, include, using, this, do_insertions
17
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
18
Generic, Operator, Number, Whitespace, Literal
19
from pygments.util import get_bool_opt
20
from pygments.lexers.other import BashLexer
22
__all__ = ['IniLexer', 'SourcesListLexer', 'BaseMakefileLexer',
23
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
24
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
25
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
26
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
27
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer']
30
class IniLexer(RegexLexer):
32
Lexer for configuration files in INI style.
36
aliases = ['ini', 'cfg']
37
filenames = ['*.ini', '*.cfg', '*.properties']
38
mimetypes = ['text/x-ini']
43
(r'[;#].*?$', Comment),
44
(r'\[.*?\]$', Keyword),
45
(r'(.*?)([ \t]*)(=)([ \t]*)(.*?)$',
46
bygroups(Name.Attribute, Text, Operator, Text, String))
50
def analyse_text(text):
51
npos = text.find('\n')
54
return text[0] == '[' and text[npos-1] == ']'
57
class SourcesListLexer(RegexLexer):
59
Lexer that highlights debian sources.list files.
61
*New in Pygments 0.7.*
64
name = 'Debian Sourcelist'
65
aliases = ['sourceslist', 'sources.list']
66
filenames = ['sources.list']
67
mimetype = ['application/x-debian-sourceslist']
73
(r'^(deb(?:-src)?)(\s+)',
74
bygroups(Keyword, Text), 'distribution')
77
(r'#.*?$', Comment, '#pop'),
78
(r'\$\(ARCH\)', Name.Variable),
79
(r'[^\s$[]+', String),
80
(r'\[', String.Other, 'escaped-distribution'),
82
(r'\s+', Text, 'components')
84
'escaped-distribution': [
85
(r'\]', String.Other, '#pop'),
86
(r'\$\(ARCH\)', Name.Variable),
87
(r'[^\]$]+', String.Other),
91
(r'#.*?$', Comment, '#pop:2'),
92
(r'$', Text, '#pop:2'),
94
(r'\S+', Keyword.Pseudo),
98
def analyse_text(text):
99
for line in text.split('\n'):
101
if not (line.startswith('#') or line.startswith('deb ') or
102
line.startswith('deb-src ') or not line):
107
class MakefileLexer(Lexer):
109
Lexer for BSD and GNU make extensions (lenient enough to handle both in
112
*Rewritten in Pygments 0.10.*
116
aliases = ['make', 'makefile', 'mf', 'bsdmake']
117
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
118
mimetypes = ['text/x-makefile']
120
r_special = re.compile(r'^(?:'
122
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
124
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
125
r_comment = re.compile(r'^\s*@?#')
127
def get_tokens_unprocessed(self, text):
129
lines = text.splitlines(True)
131
lex = BaseMakefileLexer(**self.options)
132
backslashflag = False
134
if self.r_special.match(line) or backslashflag:
135
ins.append((len(done), [(0, Comment.Preproc, line)]))
136
backslashflag = line.strip().endswith('\\')
137
elif self.r_comment.match(line):
138
ins.append((len(done), [(0, Comment, line)]))
141
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
145
class BaseMakefileLexer(RegexLexer):
147
Lexer for simple Makefiles (no preprocessing).
149
*New in Pygments 0.10.*
153
aliases = ['basemake']
159
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
160
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
162
(r'#.*?\n', Comment),
163
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
164
bygroups(Keyword, Text), 'export'),
165
(r'export\s+', Keyword),
167
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n|.*\n)+)',
168
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
170
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
171
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
173
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
175
# TODO: add paren handling (grr)
178
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
179
(r'\n', Text, '#pop'),
183
(r'[^,\\\n#]+', Number),
185
(r'#.*?\n', Comment),
186
(r'\\\n', Text), # line continuation
188
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
193
class DiffLexer(RegexLexer):
195
Lexer for unified or context-style diffs or patches.
199
aliases = ['diff', 'udiff']
200
filenames = ['*.diff', '*.patch']
201
mimetypes = ['text/x-diff', 'text/x-patch']
206
(r'\+.*\n', Generic.Inserted),
207
(r'-.*\n', Generic.Deleted),
208
(r'!.*\n', Generic.Strong),
209
(r'@.*\n', Generic.Subheading),
210
(r'([Ii]ndex|diff).*\n', Generic.Heading),
211
(r'=.*\n', Generic.Heading),
216
def analyse_text(text):
217
if text[:7] == 'Index: ':
219
if text[:5] == 'diff ':
221
if text[:4] == '--- ':
225
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
228
class DarcsPatchLexer(RegexLexer):
230
DarcsPatchLexer is a lexer for the various versions of the darcs patch
231
format. Examples of this format are derived by commands such as
232
``darcs annotate --patch`` and ``darcs send``.
234
*New in Pygments 0.10.*
238
filenames = ['*.dpatch', '*.darcspatch']
246
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
247
bygroups(Operator, Keyword, Name, Text, Name, Operator,
248
Literal.Date, Text, Operator)),
249
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
250
bygroups(Operator, Keyword, Name, Text, Name, Operator,
251
Literal.Date, Text), 'comment'),
252
(r'New patches:', Generic.Heading),
253
(r'Context:', Generic.Heading),
254
(r'Patch bundle hash:', Generic.Heading),
255
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
256
bygroups(Text, Keyword, Text)),
257
(r'\+', Generic.Inserted, "insert"),
258
(r'-', Generic.Deleted, "delete"),
262
(r'[^\]].*\n', Comment),
263
(r'\]', Operator, "#pop"),
265
'specialText': [ # darcs add [_CODE_] special operators for clarity
266
(r'\n', Text, "#pop"), # line-based
267
(r'\[_[^_]*_]', Operator),
270
include('specialText'),
271
(r'\[', Generic.Inserted),
272
(r'[^\n\[]*', Generic.Inserted),
275
include('specialText'),
276
(r'\[', Generic.Deleted),
277
(r'[^\n\[]*', Generic.Deleted),
282
class IrcLogsLexer(RegexLexer):
284
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
289
filenames = ['*.weechatlog']
290
mimetypes = ['text/x-irclog']
292
flags = re.VERBOSE | re.MULTILINE
295
# irssi / xchat and others
296
(?: \[|\()? # Opening bracket or paren for the timestamp
298
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
299
[T ])? # Date/time separator: T or space
300
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
302
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
305
\d{4}\s\w{3}\s\d{2}\s # Date
306
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
309
\w{3}\s\d{2}\s # Date
310
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
316
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
318
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
320
("^" + timestamp + r"""
321
(\s*<.*?>\s*) # Nick """,
322
bygroups(Comment.Preproc, Name.Tag), 'msg'),
324
("^" + timestamp + r"""
326
([^\s]+\s+.*?\n) # Nick + rest of message """,
327
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
329
("^" + timestamp + r"""
330
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
331
([^\s]+\s+) # Nick + Space
332
(.*?\n) # Rest of message """,
333
bygroups(Comment.Preproc, Keyword, String, Comment)),
337
(r"[^\s]+:(?!//)", Name.Attribute), # Prefix
338
(r".*\n", Text, '#pop'),
343
class BBCodeLexer(RegexLexer):
345
A lexer that highlights BBCode(-like) syntax.
347
*New in Pygments 0.6.*
352
mimetypes = ['text/x-bbcode']
358
(r'\[/?\w+', Keyword, 'tag'),
364
# attribute with value
365
(r'(\w+)(=)("?[^\s"\]]+"?)',
366
bygroups(Name.Attribute, Operator, String)),
367
# tag argument (a la [color=green])
368
(r'(=)("?[^\s"\]]+"?)',
369
bygroups(Operator, String)),
371
(r'\]', Keyword, '#pop'),
376
class TexLexer(RegexLexer):
378
Lexer for the TeX and LaTeX typesetting languages.
382
aliases = ['tex', 'latex']
383
filenames = ['*.tex', '*.aux', '*.toc']
384
mimetypes = ['text/x-tex', 'text/x-latex']
388
(r'%.*?\n', Comment),
389
(r'[{}]', Name.Builtin),
390
(r'[&_^]', Name.Builtin),
393
(r'\\\[', String.Backtick, 'displaymath'),
394
(r'\\\(', String, 'inlinemath'),
395
(r'\$\$', String.Backtick, 'displaymath'),
396
(r'\$', String, 'inlinemath'),
397
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
399
(r'[^\\$%&_^{}]+', Text),
402
(r'\\([a-zA-Z]+|.)', Name.Variable),
405
(r'[-=!+*/()\[\]]', Operator),
406
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
409
(r'\\\)', String, '#pop'),
410
(r'\$', String, '#pop'),
414
(r'\\\]', String, '#pop'),
415
(r'\$\$', String, '#pop'),
416
(r'\$', Name.Builtin),
420
(r'\[.*?\]', Name.Attribute),
426
def analyse_text(text):
427
for start in ("\\documentclass", "\\input", "\\documentstyle",
429
if text[:len(start)] == start:
433
class GroffLexer(RegexLexer):
435
Lexer for the (g)roff typesetting language, supporting groff
436
extensions. Mainly useful for highlighting manpage sources.
438
*New in Pygments 0.6.*
442
aliases = ['groff', 'nroff', 'man']
443
filenames = ['*.[1234567]', '*.man']
444
mimetypes = ['application/x-troff', 'text/troff']
448
(r'(?i)(\.)(\w+)', bygroups(Text, Keyword), 'request'),
449
(r'\.', Punctuation, 'request'),
450
# Regular characters, slurp till we find a backslash or newline
451
(r'[^\\\n]*', Text, 'textline'),
456
(r'\n', Text, '#pop'),
459
# groff has many ways to write escapes.
460
(r'\\"[^\n]*', Comment),
461
(r'\\[fn]\w', String.Escape),
462
(r'\\\(..', String.Escape),
463
(r'\\.\[.*\]', String.Escape),
464
(r'\\.', String.Escape),
465
(r'\\\n', Text, 'request'),
468
(r'\n', Text, '#pop'),
470
(r'"[^\n"]+"', String.Double),
477
def analyse_text(text):
480
if text[:3] == '.\\"':
482
if text[:4] == '.TH ':
484
if text[1:3].isalnum() and text[3].isspace():
488
class ApacheConfLexer(RegexLexer):
490
Lexer for configuration files following the Apache config file
493
*New in Pygments 0.6.*
497
aliases = ['apacheconf', 'aconf', 'apache']
498
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
499
mimetypes = ['text/x-apacheconf']
500
flags = re.MULTILINE | re.IGNORECASE
505
(r'(#.*?)$', Comment),
506
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
507
bygroups(Name.Tag, Text, String, Name.Tag)),
508
(r'([a-zA-Z][a-zA-Z0-9]*)(\s+)',
509
bygroups(Name.Builtin, Text), 'value'),
513
(r'$', Text, '#pop'),
515
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
517
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
518
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
519
r'os|productonly|full|emerg|alert|crit|error|warn|'
520
r'notice|info|debug|registry|script|inetd|standalone|'
521
r'user|group)\b', Keyword),
522
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
528
class MoinWikiLexer(RegexLexer):
530
For MoinMoin (and Trac) Wiki markup.
532
*New in Pygments 0.7.*
535
name = 'MoinMoin/Trac Wiki markup'
536
aliases = ['trac-wiki', 'moin']
538
mimetypes = ['text/x-trac-wiki']
539
flags = re.MULTILINE | re.IGNORECASE
544
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
546
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
547
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
548
# Literal code blocks, with optional shebang
549
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
550
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
552
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
553
(r'^( +)([a-zivx]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
555
(r'\[\[\w+.*?\]\]', Keyword), # Macro
556
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
557
bygroups(Keyword, String, Keyword)), # Link
558
(r'^----+$', Keyword), # Horizontal rules
559
(r'[^\n\'\[{!_~^,|]+', Text),
564
(r'}}}', Name.Builtin, '#pop'),
565
# these blocks are allowed to be nested in Trac, but not MoinMoin
566
(r'{{{', Text, '#push'),
567
(r'[^{}]+', Comment.Preproc), # slurp boring text
568
(r'.', Comment.Preproc), # allow loose { or }
573
class RstLexer(RegexLexer):
575
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
577
*New in Pygments 0.7.*
579
Additional options accepted:
582
Highlight the contents of ``.. sourcecode:: langauge`` and
583
``.. code:: language`` directives with a lexer for the given
584
language (default: ``True``). *New in Pygments 0.8.*
586
name = 'reStructuredText'
587
aliases = ['rst', 'rest', 'restructuredtext']
588
filenames = ['*.rst', '*.rest']
589
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
592
def _handle_sourcecode(self, match):
593
from pygments.lexers import get_lexer_by_name
594
from pygments.util import ClassNotFound
597
yield match.start(1), Punctuation, match.group(1)
598
yield match.start(2), Text, match.group(2)
599
yield match.start(3), Operator.Word, match.group(3)
600
yield match.start(4), Punctuation, match.group(4)
601
yield match.start(5), Text, match.group(5)
602
yield match.start(6), Keyword, match.group(6)
603
yield match.start(7), Text, match.group(7)
605
# lookup lexer if wanted and existing
607
if self.handlecodeblocks:
609
lexer = get_lexer_by_name(match.group(6).strip())
610
except ClassNotFound:
612
indention = match.group(8)
613
indention_size = len(indention)
614
code = (indention + match.group(9) + match.group(10) + match.group(11))
616
# no lexer for this language. handle it like it was a code block
618
yield match.start(8), String, code
621
# highlight the lines with the lexer.
623
codelines = code.splitlines(True)
625
for line in codelines:
626
if len(line) > indention_size:
627
ins.append((len(code), [(0, Text, line[:indention_size])]))
628
code += line[indention_size:]
631
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
636
# Heading with overline
637
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
639
bygroups(Generic.Heading, Text, Generic.Heading,
640
Text, Generic.Heading, Text)),
642
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
643
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
644
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
646
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
647
bygroups(Text, Number, using(this, state='inline'))),
649
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
650
bygroups(Text, Number, using(this, state='inline'))),
651
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
652
bygroups(Text, Number, using(this, state='inline'))),
653
# Numbered, but keep words at BOL from becoming lists
654
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
655
bygroups(Text, Number, using(this, state='inline'))),
656
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
657
bygroups(Text, Number, using(this, state='inline'))),
659
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
660
bygroups(Text, Operator, using(this, state='inline'))),
661
# Sourcecode directives
662
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
663
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
666
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
667
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
668
using(this, state='inline'))),
670
(r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
671
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
673
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
674
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
676
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
677
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
678
Punctuation, Text, using(this, state='inline'))),
680
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
682
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
683
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
684
bygroups(Text, Name.Class, Text, Name.Function)),
686
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
687
bygroups(using(this, state='inline'), using(this, state='inline'))),
689
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
690
bygroups(String.Escape, Text, String, String, Text, String)),
694
(r'\\.', Text), # escape
695
(r'``', String, 'literal'), # code
696
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
697
bygroups(String, String.Interpol, String)),
698
(r'`.+?`__?', String), # reference
699
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
700
bygroups(Name.Variable, Name.Attribute)), # role
701
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
702
bygroups(Name.Attribute, Name.Variable)), # role (content first)
703
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
704
(r'\*.+?\*', Generic.Emph), # Emphasis
705
(r'\[.*?\]_', String), # Footnote or citation
706
(r'<.+?>', Name.Tag), # Hyperlink
707
(r'[^\\\n\[*`:]+', Text),
711
(r'[^`\\]+', String),
713
(r'``', String, '#pop'),
718
def __init__(self, **options):
719
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
720
RegexLexer.__init__(self, **options)
722
def analyse_text(text):
723
if text[:2] == '..' and text[2:3] != '.':
726
p2 = text.find("\n", p1 + 1)
727
if (p2 > -1 and # has two lines
728
p1 * 2 + 1 == p2 and # they are the same length
729
text[p1+1] in '-=' and # the next line both starts and ends with
730
text[p1+1] == text[p2-1]): # ...a sufficiently high header
734
class VimLexer(RegexLexer):
736
Lexer for VimL script files.
738
*New in Pygments 0.8.*
742
filenames = ['*.vim', '.vimrc']
743
mimetypes = ['text/x-vim']
748
# Who decided that doublequote was a good comment character??
749
(r'^\s*".*', Comment),
750
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
753
# TODO: regexes can have other delims
754
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
755
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
756
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
758
(r'#[0-9a-f]{6}', Number.Hex),
759
(r'^:', Punctuation),
760
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
761
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
763
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
764
(r'\b\w+\b', Name.Other), # These are postprocessed below
768
def __init__(self, **options):
769
from pygments.lexers._vimbuiltins import command, option, auto
774
RegexLexer.__init__(self, **options)
776
def is_in(self, w, mapping):
778
It's kind of difficult to decide if something might be a keyword
779
in VimL because it allows you to abbreviate them. In fact,
780
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
781
valid ways to call it so rather than making really awful regexps
784
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
786
we match `\b\w+\b` and then call is_in() on those tokens. See
787
`scripts/get_vimkw.py` for how the lists are extracted.
789
p = bisect(mapping, (w,))
791
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
792
mapping[p-1][1][:len(w)] == w: return True
794
return mapping[p][0] == w[:len(mapping[p][0])] and \
795
mapping[p][1][:len(w)] == w
798
def get_tokens_unprocessed(self, text):
799
# TODO: builtins are only subsequent tokens on lines
800
# and 'keywords' only happen at the beginning except
802
for index, token, value in \
803
RegexLexer.get_tokens_unprocessed(self, text):
804
if token is Name.Other:
805
if self.is_in(value, self._cmd):
806
yield index, Keyword, value
807
elif self.is_in(value, self._opt) or \
808
self.is_in(value, self._aut):
809
yield index, Name.Builtin, value
811
yield index, Text, value
813
yield index, token, value
816
class GettextLexer(RegexLexer):
818
Lexer for Gettext catalog files.
820
*New in Pygments 0.9.*
822
name = 'Gettext Catalog'
823
aliases = ['pot', 'po']
824
filenames = ['*.pot', '*.po']
825
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
829
(r'^#,\s.*?$', Keyword.Type),
830
(r'^#:\s.*?$', Keyword.Declaration),
832
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
833
(r'^(")([\w-]*:)(.*")$',
834
bygroups(String, Name.Property, String)),
836
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
837
bygroups(Name.Variable, Text, String)),
838
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
839
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
844
class SquidConfLexer(RegexLexer):
846
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
848
*New in Pygments 0.9.*
852
aliases = ['squidconf', 'squid.conf', 'squid']
853
filenames = ['squid.conf']
854
mimetypes = ['text/x-squidconf']
855
flags = re.IGNORECASE
857
keywords = [ "acl", "always_direct", "announce_host",
858
"announce_period", "announce_port", "announce_to",
859
"anonymize_headers", "append_domain", "as_whois_server",
860
"auth_param_basic", "authenticate_children",
861
"authenticate_program", "authenticate_ttl", "broken_posts",
862
"buffered_logs", "cache_access_log", "cache_announce",
863
"cache_dir", "cache_dns_program", "cache_effective_group",
864
"cache_effective_user", "cache_host", "cache_host_acl",
865
"cache_host_domain", "cache_log", "cache_mem",
866
"cache_mem_high", "cache_mem_low", "cache_mgr",
867
"cachemgr_passwd", "cache_peer", "cache_peer_access",
868
"cahce_replacement_policy", "cache_stoplist",
869
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
870
"cache_swap_high", "cache_swap_log", "cache_swap_low",
871
"client_db", "client_lifetime", "client_netmask",
872
"connect_timeout", "coredump_dir", "dead_peer_timeout",
873
"debug_options", "delay_access", "delay_class",
874
"delay_initial_bucket_level", "delay_parameters",
875
"delay_pools", "deny_info", "dns_children", "dns_defnames",
876
"dns_nameservers", "dns_testnames", "emulate_httpd_log",
877
"err_html_text", "fake_user_agent", "firewall_ip",
878
"forwarded_for", "forward_snmpd_port", "fqdncache_size",
879
"ftpget_options", "ftpget_program", "ftp_list_width",
880
"ftp_passive", "ftp_user", "half_closed_clients",
881
"header_access", "header_replace", "hierarchy_stoplist",
882
"high_response_time_warning", "high_page_fault_warning",
883
"htcp_port", "http_access", "http_anonymizer", "httpd_accel",
884
"httpd_accel_host", "httpd_accel_port",
885
"httpd_accel_uses_host_header", "httpd_accel_with_proxy",
886
"http_port", "http_reply_access", "icp_access",
887
"icp_hit_stale", "icp_port", "icp_query_timeout",
888
"ident_lookup", "ident_lookup_access", "ident_timeout",
889
"incoming_http_average", "incoming_icp_average",
890
"inside_firewall", "ipcache_high", "ipcache_low",
891
"ipcache_size", "local_domain", "local_ip", "logfile_rotate",
892
"log_fqdn", "log_icp_queries", "log_mime_hdrs",
893
"maximum_object_size", "maximum_single_addr_tries",
894
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
895
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
896
"memory_pools_limit", "memory_replacement_policy",
897
"mime_table", "min_http_poll_cnt", "min_icp_poll_cnt",
898
"minimum_direct_hops", "minimum_object_size",
899
"minimum_retry_timeout", "miss_access", "negative_dns_ttl",
900
"negative_ttl", "neighbor_timeout", "neighbor_type_domain",
901
"netdb_high", "netdb_low", "netdb_ping_period",
902
"netdb_ping_rate", "never_direct", "no_cache",
903
"passthrough_proxy", "pconn_timeout", "pid_filename",
904
"pinger_program", "positive_dns_ttl", "prefer_direct",
905
"proxy_auth", "proxy_auth_realm", "query_icmp", "quick_abort",
906
"quick_abort", "quick_abort_max", "quick_abort_min",
907
"quick_abort_pct", "range_offset_limit", "read_timeout",
908
"redirect_children", "redirect_program",
909
"redirect_rewrites_host_header", "reference_age",
910
"reference_age", "refresh_pattern", "reload_into_ims",
911
"request_body_max_size", "request_size", "request_timeout",
912
"shutdown_lifetime", "single_parent_bypass",
913
"siteselect_timeout", "snmp_access", "snmp_incoming_address",
914
"snmp_port", "source_ping", "ssl_proxy",
915
"store_avg_object_size", "store_objects_per_bucket",
916
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
917
"tcp_incoming_address", "tcp_outgoing_address",
918
"tcp_recv_bufsize", "test_reachability", "udp_hit_obj",
919
"udp_hit_obj_size", "udp_incoming_address",
920
"udp_outgoing_address", "unique_hostname", "unlinkd_program",
921
"uri_whitespace", "useragent_log", "visible_hostname",
922
"wais_relay", "wais_relay_host", "wais_relay_port",
925
opts = [ "proxy-only", "weight", "ttl", "no-query", "default",
926
"round-robin", "multicast-responder", "on", "off", "all",
927
"deny", "allow", "via", "parent", "no-digest", "heap", "lru",
928
"realm", "children", "credentialsttl", "none", "disable",
929
"offline_toggle", "diskd", "q1", "q2",
932
actions = [ "shutdown", "info", "parameter", "server_list",
933
"client_list", r'squid\.conf',
936
actions_stats = [ "objects", "vm_objects", "utilization",
937
"ipcache", "fqdncache", "dns", "redirector", "io",
938
"reply_headers", "filedescriptors", "netdb",
941
actions_log = [ "status", "enable", "disable", "clear"]
943
acls = [ "url_regex", "urlpath_regex", "referer_regex", "port",
944
"proto", "req_mime_type", "rep_mime_type", "method",
945
"browser", "user", "src", "dst", "time", "dstdomain", "ident",
949
ip_re = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
951
def makelistre(list):
952
return r'\b(?:'+'|'.join(list)+r')\b'
957
(r'#', Comment, 'comment'),
958
(makelistre(keywords), Keyword),
959
(makelistre(opts), Name.Constant),
961
(makelistre(actions), String),
962
(r'stats/'+makelistre(actions), String),
963
(r'log/'+makelistre(actions)+r'=', String),
964
(makelistre(acls), Keyword),
965
(ip_re+r'(?:/(?:'+ip_re+r')|\d+)?', Number),
966
(r'\b\d+\b', Number),
970
(r'\s*TAG:.*', String.Escape, '#pop'),
971
(r'.*', Comment, '#pop'),
976
class DebianControlLexer(RegexLexer):
978
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
980
*New in Pygments 0.9.*
982
name = 'Debian Control file'
983
aliases = ['control']
984
filenames = ['control']
988
(r'^(Description)', Keyword, 'description'),
989
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
990
(r'^((Build-)?Depends)', Keyword, 'depends'),
991
(r'^((?:Python-)?Version)(:\s*)([^\s]+)$',
992
bygroups(Keyword, Text, Number)),
993
(r'^((?:Installed-)?Size)(:\s*)([^\s]+)$',
994
bygroups(Keyword, Text, Number)),
995
(r'^(MD5Sum|SHA1|SHA256)(:\s*)([^\s]+)$',
996
bygroups(Keyword, Text, Number)),
997
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
998
bygroups(Keyword, Whitespace, String)),
1001
(r'<[^>]+>', Generic.Strong),
1002
(r'<[^>]+>$', Generic.Strong, '#pop'),
1007
(r'(.*)(Homepage)(: )([^\s]+)', bygroups(Text, String, Name, Name.Class)),
1008
(r':.*\n', Generic.Strong),
1014
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
1015
(r'\(', Text, 'depend_vers'),
1019
(r'[}\)]\s*$', Text, '#pop'),
1021
(r'[^,]$', Name.Function, '#pop'),
1022
(r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
1023
(r'\[.*?\]', Name.Entity),
1026
(r'\),', Text, '#pop'),
1027
(r'\)[^,]', Text, '#pop:2'),
1028
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
1033
class YamlLexerContext(LexerContext):
1034
"""Indentation context for the YAML lexer."""
1036
def __init__(self, *args, **kwds):
1037
super(YamlLexerContext, self).__init__(*args, **kwds)
1038
self.indent_stack = []
1040
self.next_indent = 0
1041
self.block_scalar_indent = None
1044
class YamlLexer(ExtendedRegexLexer):
1046
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
1049
*New in Pygments 0.11.*
1054
filenames = ['*.yaml', '*.yml']
1055
mimetypes = ['text/x-yaml']
1058
def something(token_class):
1059
"""Do not produce empty tokens."""
1060
def callback(lexer, match, context):
1061
text = match.group()
1064
yield match.start(), token_class, text
1065
context.pos = match.end()
1068
def reset_indent(token_class):
1069
"""Reset the indentation levels."""
1070
def callback(lexer, match, context):
1071
text = match.group()
1072
context.indent_stack = []
1074
context.next_indent = 0
1075
context.block_scalar_indent = None
1076
yield match.start(), token_class, text
1077
context.pos = match.end()
1080
def save_indent(token_class, start=False):
1081
"""Save a possible indentation level."""
1082
def callback(lexer, match, context):
1083
text = match.group()
1086
context.next_indent = len(text)
1087
if context.next_indent < context.indent:
1088
while context.next_indent < context.indent:
1089
context.indent = context.indent_stack.pop()
1090
if context.next_indent > context.indent:
1091
extra = text[context.indent:]
1092
text = text[:context.indent]
1094
context.next_indent += len(text)
1096
yield match.start(), token_class, text
1098
yield match.start()+len(text), token_class.Error, extra
1099
context.pos = match.end()
1102
def set_indent(token_class, implicit=False):
1103
"""Set the previously saved indentation level."""
1104
def callback(lexer, match, context):
1105
text = match.group()
1106
if context.indent < context.next_indent:
1107
context.indent_stack.append(context.indent)
1108
context.indent = context.next_indent
1110
context.next_indent += len(text)
1111
yield match.start(), token_class, text
1112
context.pos = match.end()
1115
def set_block_scalar_indent(token_class):
1116
"""Set an explicit indentation level for a block scalar."""
1117
def callback(lexer, match, context):
1118
text = match.group()
1119
context.block_scalar_indent = None
1122
increment = match.group(1)
1124
current_indent = max(context.indent, 0)
1125
increment = int(increment)
1126
context.block_scalar_indent = current_indent + increment
1128
yield match.start(), token_class, text
1129
context.pos = match.end()
1132
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
1133
"""Process an empty line in a block scalar."""
1134
def callback(lexer, match, context):
1135
text = match.group()
1136
if (context.block_scalar_indent is None or
1137
len(text) <= context.block_scalar_indent):
1139
yield match.start(), indent_token_class, text
1141
indentation = text[:context.block_scalar_indent]
1142
content = text[context.block_scalar_indent:]
1143
yield match.start(), indent_token_class, indentation
1144
yield (match.start()+context.block_scalar_indent,
1145
content_token_class, content)
1146
context.pos = match.end()
1149
def parse_block_scalar_indent(token_class):
1150
"""Process indentation spaces in a block scalar."""
1151
def callback(lexer, match, context):
1152
text = match.group()
1153
if context.block_scalar_indent is None:
1154
if len(text) <= max(context.indent, 0):
1158
context.block_scalar_indent = len(text)
1160
if len(text) < context.block_scalar_indent:
1165
yield match.start(), token_class, text
1166
context.pos = match.end()
1169
def parse_plain_scalar_indent(token_class):
1170
"""Process indentation spaces in a plain scalar."""
1171
def callback(lexer, match, context):
1172
text = match.group()
1173
if len(text) <= context.indent:
1178
yield match.start(), token_class, text
1179
context.pos = match.end()
1187
# ignored whitespaces
1188
(r'[ ]+(?=#|$)', Text),
1192
(r'#[^\n]*', Comment.Single),
1193
# the '%YAML' directive
1194
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
1195
# the %TAG directive
1196
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
1197
# document start and document end indicators
1198
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
1200
# indentation spaces
1201
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
1202
('block-line', 'indentation')),
1205
# trailing whitespaces after directives or a block scalar indicator
1207
# ignored whitespaces
1208
(r'[ ]+(?=#|$)', Text),
1210
(r'#[^\n]*', Comment.Single),
1212
(r'\n', Text, '#pop:2'),
1215
# the %YAML directive
1217
# the version number
1218
(r'([ ]+)([0-9]+\.[0-9]+)',
1219
bygroups(Text, Number), 'ignored-line'),
1222
# the %YAG directive
1224
# a tag handle and the corresponding prefix
1225
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
1226
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
1227
bygroups(Text, Keyword.Type, Text, Keyword.Type),
1231
# block scalar indicators and indentation spaces
1233
# trailing whitespaces are ignored
1234
(r'[ ]*$', something(Text), '#pop:2'),
1235
# whitespaces preceeding block collection indicators
1236
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
1237
# block collection indicators
1238
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
1239
# the beginning a block line
1240
(r'[ ]*', save_indent(Text), '#pop'),
1243
# an indented line in the block context
1246
(r'[ ]*(?=#|$)', something(Text), '#pop'),
1247
# whitespaces separating tokens
1249
# tags, anchors and aliases,
1250
include('descriptors'),
1251
# block collections and scalars
1252
include('block-nodes'),
1253
# flow collections and quoted scalars
1254
include('flow-nodes'),
1256
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
1257
something(Name.Variable),
1258
'plain-scalar-in-block-context'),
1261
# tags, anchors, aliases
1264
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
1265
# a tag in the form '!', '!suffix' or '!handle!suffix'
1266
(r'!(?:[0-9A-Za-z_-]+)?'
1267
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
1269
(r'&[0-9A-Za-z_-]+', Name.Label),
1271
(r'\*[0-9A-Za-z_-]+', Name.Variable),
1274
# block collections and scalars
1277
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
1278
# literal and folded scalars
1279
(r'[|>]', Punctuation.Indicator,
1280
('block-scalar-content', 'block-scalar-header')),
1283
# flow collections and quoted scalars
1286
(r'\[', Punctuation.Indicator, 'flow-sequence'),
1288
(r'\{', Punctuation.Indicator, 'flow-mapping'),
1289
# a single-quoted scalar
1290
(r'\'', String, 'single-quoted-scalar'),
1291
# a double-quoted scalar
1292
(r'\"', String, 'double-quoted-scalar'),
1295
# the content of a flow collection
1296
'flow-collection': [
1302
(r'#[^\n]*', Comment.Single),
1304
(r'[?:,]', Punctuation.Indicator),
1305
# tags, anchors and aliases
1306
include('descriptors'),
1307
# nested collections and quoted scalars
1308
include('flow-nodes'),
1310
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
1311
something(Name.Variable),
1312
'plain-scalar-in-flow-context'),
1315
# a flow sequence indicated by '[' and ']'
1317
# include flow collection rules
1318
include('flow-collection'),
1319
# the closing indicator
1320
(r'\]', Punctuation.Indicator, '#pop'),
1323
# a flow mapping indicated by '{' and '}'
1325
# include flow collection rules
1326
include('flow-collection'),
1327
# the closing indicator
1328
(r'\}', Punctuation.Indicator, '#pop'),
1331
# block scalar lines
1332
'block-scalar-content': [
1337
parse_block_scalar_empty_line(Text, Name.Constant)),
1338
# indentation spaces (we may leave the state here)
1339
(r'^[ ]*', parse_block_scalar_indent(Text)),
1341
(r'[^\n\r\f\v]+', Name.Constant),
1344
# the content of a literal or folded scalar
1345
'block-scalar-header': [
1346
# indentation indicator followed by chomping flag
1347
(r'([1-9])?[+-]?(?=[ ]|$)',
1348
set_block_scalar_indent(Punctuation.Indicator),
1350
# chomping flag followed by indentation indicator
1351
(r'[+-]?([1-9])?(?=[ ]|$)',
1352
set_block_scalar_indent(Punctuation.Indicator),
1356
# ignored and regular whitespaces in quoted scalars
1357
'quoted-scalar-whitespaces': [
1358
# leading and trailing whitespaces are ignored
1359
(r'^[ ]+|[ ]+$', Text),
1360
# line breaks are ignored
1362
# other whitespaces are a part of the value
1363
(r'[ ]+', Name.Variable),
1366
# single-quoted scalars
1367
'single-quoted-scalar': [
1368
# include whitespace and line break rules
1369
include('quoted-scalar-whitespaces'),
1370
# escaping of the quote character
1371
(r'\'\'', String.Escape),
1372
# regular non-whitespace characters
1373
(r'[^ \t\n\r\f\v\']+', String),
1375
(r'\'', String, '#pop'),
1378
# double-quoted scalars
1379
'double-quoted-scalar': [
1380
# include whitespace and line break rules
1381
include('quoted-scalar-whitespaces'),
1382
# escaping of special characters
1383
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
1385
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
1387
# regular non-whitespace characters
1388
(r'[^ \t\n\r\f\v\"\\]+', String),
1390
(r'"', String, '#pop'),
1393
# the beginning of a new line while scanning a plain scalar
1394
'plain-scalar-in-block-context-new-line': [
1399
# document start and document end indicators
1400
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
1401
# indentation spaces (we may leave the block line state here)
1402
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
1405
# a plain scalar in the block context
1406
'plain-scalar-in-block-context': [
1407
# the scalar ends with the ':' indicator
1408
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
1409
# the scalar ends with whitespaces followed by a comment
1410
(r'[ ]+(?=#)', Text, '#pop'),
1411
# trailing whitespaces are ignored
1413
# line breaks are ignored
1414
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
1415
# other whitespaces are a part of the value
1416
(r'[ ]+', Literal.Scalar.Plain),
1417
# regular non-whitespace characters
1418
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
1421
# a plain scalar is the flow context
1422
'plain-scalar-in-flow-context': [
1423
# the scalar ends with an indicator character
1424
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
1425
# the scalar ends with a comment
1426
(r'[ ]+(?=#)', Text, '#pop'),
1427
# leading and trailing whitespaces are ignored
1428
(r'^[ ]+|[ ]+$', Text),
1429
# line breaks are ignored
1431
# other whitespaces are a part of the value
1432
(r'[ ]+', Name.Variable),
1433
# regular non-whitespace characters
1434
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
1439
def get_tokens_unprocessed(self, text=None, context=None):
1441
context = YamlLexerContext(text, 0)
1442
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
1445
class LighttpdConfLexer(RegexLexer):
1447
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
1449
*New in Pygments 0.11.*
1451
name = 'Lighttpd configuration file'
1452
aliases = ['lighty', 'lighttpd']
1454
mimetypes = ['text/x-lighttpd-conf']
1458
(r'#.*\n', Comment.Single),
1459
(r'/\S*', Name), # pathname
1460
(r'[a-zA-Z._-]+', Keyword),
1461
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
1462
(r'[0-9]+', Number),
1463
(r'=>|=~|\+=|==|=|\+', Operator),
1464
(r'\$[A-Z]+', Name.Builtin),
1465
(r'[(){}\[\],]', Punctuation),
1466
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
1473
class NginxConfLexer(RegexLexer):
1475
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
1477
*New in Pygments 0.11.*
1479
name = 'Nginx configuration file'
1482
mimetypes = ['text/x-nginx-conf']
1486
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
1487
(r'[^\s;#]+', Keyword, 'stmt'),
1491
(r'}', Punctuation, '#pop:2'),
1492
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
1496
(r'{', Punctuation, 'block'),
1497
(r';', Punctuation, '#pop'),
1501
(r'#.*\n', Comment.Single),
1502
(r'on|off', Name.Constant),
1503
(r'\$[^\s;#()]+', Name.Variable),
1504
(r'([a-z0-9.-]+)(:)([0-9]+)',
1505
bygroups(Name, Punctuation, Number.Integer)),
1506
(r'[a-z-]+/[a-z-+]+', String), # mimetype
1507
#(r'[a-zA-Z._-]+', Keyword),
1508
(r'[0-9]+[km]?\b', Number.Integer),
1509
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
1510
(r'[:=~]', Punctuation),
1511
(r'[^\s;#{}$]+', String), # catch all
1512
(r'/[^\s;#]*', Name), # pathname
1514
(r'[$;]', Text), # leftover characters
1519
class CMakeLexer(RegexLexer):
1521
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
1523
*New in Pygments 1.2.*
1527
filenames = ['*.cmake']
1528
mimetypes = ['text/x-cmake']
1532
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
1533
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
1534
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
1535
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
1536
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
1537
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
1538
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
1539
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
1540
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
1541
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
1542
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
1543
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
1544
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
1545
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
1546
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
1547
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
1548
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
1549
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
1550
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
1551
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
1552
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
1553
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
1554
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
1555
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
1556
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
1557
# r'COUNTARGS)\b', Name.Builtin, 'args'),
1558
(r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
1559
Punctuation), 'args'),
1560
include('keywords'),
1564
(r'\(', Punctuation, '#push'),
1565
(r'\)', Punctuation, '#pop'),
1566
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
1567
(r'(?s)".*?"', String.Double),
1569
(r'[^\)$"# \t\n]+', String),
1570
(r'\n', Text), # explicitly legal
1571
include('keywords'),
1578
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
1579
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
1583
(r'#.+\n', Comment),