PKҍ]5Rr r pygments/style.py# -*- coding: utf-8 -*- """ pygments.style ~~~~~~~~~~~~~~ Basic style object. :copyright: 2006 by Georg Brandl. :license: GNU LGPL, see LICENSE for more details. """ from pygments.token import Token, STANDARD_TYPES class StyleMeta(type): def __new__(mcs, name, bases, dct): obj = type.__new__(mcs, name, bases, dct) for token in STANDARD_TYPES: if token not in obj.styles: obj.styles[token] = '' def colorformat(text): if text[0:1] == '#': col = text[1:] if len(col) == 6: return col elif len(col) == 3: return col[0]+'0'+col[1]+'0'+col[2]+'0' elif text == '': return '' assert False, "wrong color format %r" % text _styles = obj._styles = {} for ttype in obj.styles: for token in ttype.split(): if token in _styles: continue ndef = _styles.get(token.parent, None) styledefs = obj.styles.get(token, '').split() if not ndef or token is None: ndef = ['', 0, 0, 0, '', ''] elif 'noinherit' in styledefs and token is not Token: ndef = _styles[Token][:] else: ndef = ndef[:] _styles[token] = ndef for styledef in obj.styles.get(token, '').split(): if styledef == 'noinherit': pass elif styledef == 'bold': ndef[1] = 1 elif styledef == 'nobold': ndef[1] = 0 elif styledef == 'italic': ndef[2] = 1 elif styledef == 'noitalic': ndef[2] = 0 elif styledef == 'underline': ndef[3] = 1 elif styledef == 'nounderline': ndef[3] = 0 elif styledef[:3] == 'bg:': ndef[4] = colorformat(styledef[3:]) elif styledef[:7] == 'border:': ndef[5] = colorformat(styledef[7:]) else: ndef[0] = colorformat(styledef) return obj def style_for_token(cls, token): t = cls._styles[token] return { 'color': t[0] or None, 'bold': bool(t[1]), 'italic': bool(t[2]), 'underline': bool(t[3]), 'bgcolor': t[4] or None, 'border': t[5] or None } def list_styles(cls): return list(cls) def __iter__(cls): for token in cls._styles: yield token, cls.style_for_token(token) def __len__(cls): return len(cls._styles) class Style(object): __metaclass__ = StyleMeta #: overall background color (``None`` means transparent) background_color = '#ffffff' #: Style definitions for individual token types. styles = {} PKz[^51رJJpygments/style.pyc; DEc@sIdZdklZlZdefdYZdefdYZdS(s pygments.style ~~~~~~~~~~~~~~ Basic style object. :copyright: 2006 by Georg Brandl. :license: GNU LGPL, see LICENSE for more details. (sTokensSTANDARD_TYPESs StyleMetacBs5tZdZdZdZdZdZRS(Nc Cs}ti||||}x/tD]'}||i jod|i |<ei9d?<ei:d@<ei;dA<ei<dB<ei=dCe?dDjodEk@Z@e>iAZBdFeBe' % (self.__class__.__name__, self.options) else: return '' % self.__class__.__name__ def analyse_text(text): """ Has to return a float between ``0`` and ``1`` that indicates if a lexer wants to highlight this text. Used by ``guess_lexer``. If this method returns ``0`` it won't highlight it in any case, if it returns ``1`` highlighting with this lexer is guaranteed. The `LexerMeta` metaclass automatically wraps this function so that it works like a static method (no ``self`` or ``cls`` parameter) and the return value is automatically converted to `float`. If the return value is an object that is boolean `False` it's the same as if the return values was ``0.0``. """ def get_tokens(self, text): """ Return an iterable of (tokentype, value) pairs generated from ``text``. Also preprocess the text, i.e. expand tabs and strip it if wanted. """ text = type(text)('\n').join(text.splitlines()) if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) if not text.endswith('\n'): text += '\n' for i, t, v in self.get_tokens_unprocessed(text): yield t, v def get_tokens_unprocessed(self, text): """ Return an iterable of (tokentype, value) pairs. In subclasses, implement this method as a generator to maximize effectiveness. """ raise NotImplementedError class DelegatingLexer(Lexer): """ This lexer takes two lexer as arguments. A root lexer and a language lexer. First everything is scanned using the language lexer, afterwards all ``Other`` tokens are lexed using the root lexer. The lexers from the ``template`` lexer package use this base lexer. """ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): self.root_lexer = _root_lexer(**options) self.language_lexer = _language_lexer(**options) self.needle = _needle Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): buffered = '' insertions = [] lng_buffer = [] for i, t, v in self.language_lexer.get_tokens_unprocessed(text): if t is self.needle: if lng_buffer: insertions.append((len(buffered), lng_buffer)) lng_buffer = [] buffered += v else: lng_buffer.append((i, t, v)) if lng_buffer: insertions.append((len(buffered), lng_buffer)) return do_insertions(insertions, self.root_lexer.get_tokens_unprocessed(buffered)) #------------------------------------------------------------------------------- # RegexLexer and ExtendedRegexLexer # class include(str): """ Indicates that a state should include rules from another state. """ pass class combined(tuple): """ Indicates a state combined from multiple states. """ def __new__(cls, *args): return tuple.__new__(cls, args) def __init__(self, *args): tuple.__init__(self, args) class _PseudoMatch(object): """ A pseudo match object constructed from a string. """ def __init__(self, start, text): self._text = text self._start = start def start(self, arg=None): return self._start def end(self, arg=None): return self._start + len(self._text) def group(self, arg=None): if arg: raise IndexError('No such group') return self._text def groups(self): return (self._text,) def groupdict(self): return {} def bygroups(*args): """ Callback that yields multiple actions for each group in the match. """ def callback(lexer, match, ctx=None): for i, action in enumerate(args): if type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i + 1), action, data else: if ctx: ctx.pos = match.start(i + 1) for item in action(lexer, _PseudoMatch(match.start(i + 1), match.group(i + 1)), ctx): if item: yield item if ctx: ctx.pos = match.end() return callback class _This(object): """ Special singleton used for indicating the caller class. Used by ``using``. """ this = _This() def using(_other, **kwargs): """ Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer. """ if _other is this: def callback(lexer, match, ctx=None): s = match.start() for i, t, v in lexer.get_tokens_unprocessed(match.group()): yield i + s, t, v if ctx: ctx.pos = match.end() else: def callback(lexer, match, ctx=None): # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs) s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group()): yield i + s, t, v if ctx: ctx.pos = match.end() return callback class RegexLexerMeta(LexerMeta): """ Metaclass for RegexLexer, creates the self._tokens attribute from self.tokens on the first instantiation. """ def _process_state(cls, state): assert type(state) is str, "wrong state name %r" % state assert state[0] != '#', "invalid state name %r" % state if state in cls._tokens: return cls._tokens[state] tokens = cls._tokens[state] = [] rflags = cls.flags for tdef in cls.tokens[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state tokens.extend(cls._process_state(str(tdef))) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef rex = re.compile(tdef[0], rflags) assert type(tdef[1]) is _TokenType or callable(tdef[1]), \ 'token type must be simple type or callable, not %r' % tdef[1] if len(tdef) == 2: new_state = None else: tdef2 = tdef[2] if isinstance(tdef2, str): # an existing state if tdef2 == '#pop': new_state = -1 elif tdef2 in cls.tokens: new_state = (tdef2,) elif tdef2 == '#push': new_state = tdef2 elif tdef2[:5] == '#pop:': new_state = -int(tdef2[5:]) else: assert False, 'unknown new state %r' % tdef2 elif isinstance(tdef2, combined): # combine a new state from existing ones new_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in tdef2: assert istate != state, 'circular state ref %r' % istate itokens.extend(cls._process_state(istate)) cls._tokens[new_state] = itokens new_state = (new_state,) elif isinstance(tdef2, tuple): # push more than one state for state in tdef2: assert state in cls.tokens, \ 'unknown new state ' + state new_state = tdef2 else: assert False, 'unknown new state def %r' % tdef2 tokens.append((rex, tdef[1], new_state)) return tokens def __call__(cls, *args, **kwds): if not hasattr(cls, '_tokens'): cls._tokens = {} cls._tmpname = 0 for state in cls.tokens.keys(): cls._process_state(state) return type.__call__(cls, *args, **kwds) class RegexLexer(Lexer): """ Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions. """ __metaclass__ = RegexLexerMeta #: Flags for compiling the regular expressions. #: Defaults to MULTILINE. flags = re.MULTILINE #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` #: #: The initial state is 'root'. #: ``new_state`` can be omitted to signify no state transition. #: If it is a string, the state is pushed on the stack and changed. #: If it is a tuple of strings, all states are pushed on the stack and #: the current state will be the topmost. #: It can also be ``combined('state1', 'state2', ...)`` #: to signify a new, anonymous state combined from the rules of two #: or more existing ones. #: Furthermore, it can be '#pop' to signify going back one step in #: the state stack, or '#push' to push the current state on the stack #: again. #: #: The tuple can also be replaced with ``include('state')``, in which #: case the rules from the state named by the string are included in the #: current one. tokens = {} def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the inital stack (default: ``['root']``) """ pos = 0 statestack = list(stack) statetokens = self._tokens[statestack[-1]] while 1: for rex, action, new_state in statetokens: m = rex.match(text, pos) if m: if type(action) is _TokenType: yield pos, action, m.group() else: for item in action(self, m): yield item pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): statestack.extend(new_state) elif isinstance(new_state, int): # pop del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = self._tokens[statestack[-1]] break else: try: if text[pos] == '\n': # at EOL, reset state to "root" pos += 1 statestack = ['root'] statetokens = self._tokens['root'] yield pos, Text, '\n' continue yield pos, Error, text[pos] pos += 1 except IndexError: break class LexerContext(object): """ A helper object that holds lexer position data. """ def __init__(self, text, pos, stack=None, end=None): self.text = text self.pos = pos self.end = end or len(text) # end=0 not supported ;-) self.stack = stack or ['root'] def __repr__(self): return 'LexerContext(%r, %r, %r)' % ( self.text, self.pos, self.stack) class ExtendedRegexLexer(RegexLexer): """ A RegexLexer that uses a context object to store its state. """ def get_tokens_unprocessed(self, text=None, context=None): """ Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. """ if not context: ctx = LexerContext(text, 0) statetokens = self._tokens['root'] else: ctx = context statetokens = self._tokens[ctx.stack[-1]] text = ctx.text while 1: for rex, action, new_state in statetokens: m = rex.match(text, ctx.pos, ctx.end) if m: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: for item in action(self, m, ctx): yield item if not new_state: # altered the state stack? statetokens = self._tokens[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): ctx.stack.extend(new_state) elif isinstance(new_state, int): # pop del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = self._tokens[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.pos += 1 ctx.stack = ['root'] statetokens = self._tokens['root'] yield ctx.pos, Text, '\n' continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break def do_insertions(insertions, tokens): """ Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. XXX: The indices yielded by this function are not correct! """ insertions = iter(insertions) try: index, itokens = insertions.next() except StopIteration: # no insertions for item in tokens: yield item return insleft = True for i, t, v in tokens: oldi = 0 while insleft and i + len(v) >= index: yield i, t, v[oldi:index-i] for item in itokens: yield item oldi = index-i try: index, itokens = insertions.next() except StopIteration: insleft = False break # not strictly necessary yield i, t, v[oldi:] PKz[^55LZ""pygments/__init__.pyc; >EEc@sdZdZdZdZdZdZdddgZd kZd kZd k l Z d k l Z d k l Z lZlZd klZlZlZlZdZedZedZdZedjoeieeind S(s2 Pygments ~~~~~~~~ Pygments is a syntax highlighting package written in Python. It aims to be a generic syntax highlighter for general use in all kinds of software such as forum systems, wikis or other applications that need to prettify source code. Highlights are: * a wide range of common languages and markup formats is supported * special attention is paid to details, increasing quality by a fair amount * support for new languages and formats are added easily * a number of output formats, presently HTML, LaTeX and ANSI sequences * it is usable as a command-line tool and as a library * ... and it highlights even Brainfuck! :copyright: 2006 by Georg Brandl, Armin Ronacher, Lukas Meuser and others. :license: GNU LGPL, see LICENSE for more details. s0.5srestructuredtexts(GNU Lesser General Public License (LGPL)sGeorg Brandl shttp://pygments.pocoo.org/slexsformats highlightN(sStringIO(s OptionError(sLEXERSsget_lexer_by_namesget_lexer_for_filename(s FORMATTERSsget_formatter_by_namesget_formatter_for_filenamesTerminalFormattercCs|i|SdS(sG Lex ``code`` with ``lexer`` and return an iterable of tokens. N(slexers get_tokensscode(scodeslexer((s/build/bdist.linux-i686/egg/pygments/__init__.pyslex,scCs:|pt}|i||| o|iSndS(s Format a tokenlist ``tokens`` with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string. N(soutfilesStringIOs realoutfiles formattersformatstokenssgetvalue(stokenss formattersoutfiles realoutfile((s/build/bdist.linux-i686/egg/pygments/__init__.pysformat3s cCstt||||SdS(s Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string. N(sformatslexscodeslexers formattersoutfile(scodeslexers formattersoutfile((s/build/bdist.linux-i686/egg/pygments/__init__.pys highlightAscCs`dk} d|dfd}y | i|dd\}}Wn'| ij oti|IJdSnXt|}| o| o |GHdSn|idt t j o |GHdSn|id t t j od t t fGHdSn|id t }|t j o|p|oti|IJdSnt d d gHd GHdGHg}d}xtiD]\}}}}di|d||oddi|dpd f}|i|t|d|jot|d}q]q]W|ix%|D]}dt|d|GHqWHdGHdGHg}d}xtiD]\}}}}di|d||oddi|dpd f}|i|t|d|jot|d}q<q<W|ix%|D]}dt|d|GHqWdSnh}|idt }|onyC|i!d} x-| D]%} | i!d\} }||| ||||Wn,t4j o }tid&I|IJdSnXdSdS('s9 Make pygments usable as a command line utility. Ns7Usage: %s [-l ] [-f ] [-O ] [-o ] [] %s -S

%(title)s

%(code)s cBsJtZdZdZdZdZddZedZdZ RS(sl Output HTML tags with appropriate classes. Additional options accepted: ``nowrap`` If set to true, don't wrap the tokens at all. This disables all other options (default: False). ``noclasses`` If set to true, token s will not use CSS classes, but inline styles. ``classprefix`` Prefix for token CSS classes, is prepended to all token style classes (e.g. class="o" -> class="_o" if classprefix == '_') (default: ''). ``cssclass`` CSS class for the wrapping
(default: 'highlight'). ``cssstyles`` Inline CSS styles for the wrapping
. (default: ''). ``linenos`` If set to ``True``, output line numbers (default: False). ``linenostart`` The line number for the first line (default: 1). ``linenostep`` If set to a number n > 1, only every nth line number is printed (default: 1). ``linenospecial`` If set to a number n > 0, every nth line number is given a special CSS class ``special`` (default: 0). ``nobackground`` If set to ``True`` the formatter won't output the background color for the overall element (this automatically defaults to ``False`` when there is no overall element [eg: no argument for the `get_syntax_defs` method given]) (default: ``False``) cKs ti||t|dt|_t|dt|_|idd|_ |idd|_ |idd|_ t|dt|_ t t|d d |_t t|d d |_t t|d d |_t|dt|_h|_|idS(Nsnowraps noclassess classprefixsscssclasss highlights cssstylesslinenoss linenostartis linenosteps linenospecialis nobackground(s Formatters__init__sselfsoptionss get_bool_optsFalsesnowraps noclassessgets classprefixscssclasss cssstylesslinenossabss get_int_opts linenostarts linenosteps linenospecials nobackgrounds _class_caches_create_stylesheet(sselfsoptions((s6build/bdist.linux-i686/egg/pygments/formatters/html.pys__init__ps cCsD||ijo|i|Sn|iti|p t|SdS(sUReturn the css class of this token type prefixed with the classprefix option.N(sttypesselfs _class_caches classprefixsSTANDARD_TYPESsgets_get_ttype_class(sselfsttype((s6build/bdist.linux-i686/egg/pygments/formatters/html.pys_get_css_classscCs@htd<}|_h}|_|i}x |iD]\}}|t |}d}|do|d|d7}n|do|d7}n|do|d7}n|do|d 7}n|d o|d |d 7}n|d o|d |d 7}n|o+|||<|d |t |f||ss s(slncountsselfs noclassessnoclss ttype2classsgetsgetclss class2stylesc2ssoutfileswriteslspans tokensourcesttypesvalues escape_htmls htmlvalueslnosscountscclasssNonesparentscspans_get_css_classsclssreplace(sselfs tokensourcesoutfileslnossc2sslspans htmlvaluesnoclsswritesvaluesgetclsscclassslncountsttypescspanscls((s6build/bdist.linux-i686/egg/pygments/formatters/html.pys_format_nowrapsF          c Cs|io|i||dSn|}|i}|i}d|i o d|i |i o d|i d}|p|ot i }n|i ||i d|i|||}|i dd}|oF|i} tt|| d}|i} |i} | o{d igi} t| | |D]J} | | | d jo&| | d jod pd || fpdq2~ }n`d igi} t| | |D]2} | | | d jod || fpdq~ }|d |d}||i7}|d7}n|oV| o||id}n|i t t!d|i"d|i#dd|n*|o|i |dn|i ddS(Nss
s
sis is %*ds%*ds
s
s
s
stitles styledefssbodyscode($sselfsnowraps_format_nowraps tokensourcesoutfiles realoutfileslinenosslnossfullscssclasss cssstylessdivsStringIOswriteslncountsrets linenostartsflslensstrsmws linenospecialssps linenostepsstsjoinsappends_[1]srangesislssgetvalues DOC_TEMPLATEsdictstitlesget_style_defs(sselfs tokensourcesoutfileslnossmwsretslssfullslncountsflsisspssts_[1]s realoutfilesdiv((s6build/bdist.linux-i686/egg/pygments/formatters/html.pysformatsD   4      {_( s__name__s __module__s__doc__s__init__s_get_css_classs_create_stylesheetsget_style_defssFalses_format_nowrapsformat(((s6build/bdist.linux-i686/egg/pygments/formatters/html.pys HtmlFormatterKs #     *(s__doc__sStringIOspygments.formatters Formatterspygments.tokensTokensTextsSTANDARD_TYPESs pygments.utils get_bool_opts get_int_opts__all__s escape_htmls get_random_ids_get_ttype_classs DOC_TEMPLATEs HtmlFormatter( sSTANDARD_TYPESs__all__sStringIOsTexts DOC_TEMPLATEs get_bool_opts get_random_ids escape_htmlsTokens_get_ttype_classs HtmlFormatters Formatters get_int_opt((s6build/bdist.linux-i686/egg/pygments/formatters/html.pys? s    PK"oV5f pygments/formatters/terminal.py# -*- coding: utf-8 -*- """ pygments.formatters.terminal ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Formatter for terminal output with ANSI sequences. :copyright: 2006 by Georg Brandl. :license: GNU LGPL, see LICENSE for more details. """ from pygments.formatter import Formatter from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Token from pygments.console import ansiformat from pygments.util import get_bool_opt __all__ = ['TerminalFormatter'] #: Map token types to a tuple of color values for light and dark #: backgrounds. TERMINAL_COLORS = { Token: ('', ''), Comment: ('lightgray', 'darkgray'), Keyword: ('darkblue', 'blue'), Keyword.Type: ('teal', 'turquoise'), Operator.Word: ('purple', 'fuchsia'), Name.Builtin: ('teal', 'turquoise'), Name.Function: ('darkgreen', 'green'), Name.Namespace: ('_teal_', '_turquoise_'), Name.Class: ('_darkgreen_', '_green_'), Name.Exception: ('teal', 'turquoise'), Name.Decorator: ('darkgray', 'lightgray'), Name.Variable: ('darkred', 'red'), Name.Constant: ('darkred', 'red'), Name.Attribute: ('teal', 'turquoise'), Name.Tag: ('blue', 'blue'), String: ('brown', 'brown'), Number: ('darkblue', 'blue'), Generic.Deleted: ('red', 'red'), Generic.Inserted: ('darkgreen', 'green'), Generic.Heading: ('**', '**'), Generic.Subheading: ('*purple*', '*fuchsia*'), Generic.Error: ('red', 'red'), Error: ('_red_', '_red_'), } class TerminalFormatter(Formatter): """ Output plain text with coloring ANSI sequences. """ def __init__(self, **options): """ Accepted options: ``bg`` Set to ``'light'`` or ``'dark'`` depending on the terminal's background. ``colorscheme`` ``None`` or a dictionary mapping token types to ``(lightbg, darkbg)`` color names. ``debug`` If true, output "<>" after each error token. """ Formatter.__init__(self, **options) self.darkbg = options.get('bg', 'light') == 'dark' self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS self.debug = get_bool_opt(options, 'debug', False) def format(self, tokensource, outfile): dbg = self.debug for ttype, value in tokensource: color = self.colorscheme.get(ttype) while color is None: ttype = ttype[:-1] color = self.colorscheme.get(ttype) if color: color = color[self.darkbg] spl = value.split('\n') for line in spl[:-1]: if line: outfile.write(ansiformat(color, line)) outfile.write('\n') if spl[-1]: outfile.write(ansiformat(color, spl[-1])) else: outfile.write(value) if dbg and ttype is Error: outfile.write('<>') PKz[^5& H[ [ pygments/formatters/other.pyc; BEEc@sOdZdklZddgZdefdYZdefdYZdS(s pygments.formatters.other ~~~~~~~~~~~~~~~~~~~~~~~~~ Other formatters: NullFormatter, RawTokenFormatter. :copyright: 2006 by Georg Brandl, Armin Ronacher. :license: GNU LGPL, see LICENSE for more details. (s Formatters NullFormattersRawTokenFormattercBstZdZdZRS(s; Output the text unchanged without any formatting. cCs(x!|D]\}}|i|qWdS(N(s tokensourcesttypesvaluesoutfileswrite(sselfs tokensourcesoutfilesvaluesttype((s7build/bdist.linux-i686/egg/pygments/formatters/other.pysformats (s__name__s __module__s__doc__sformat(((s7build/bdist.linux-i686/egg/pygments/formatters/other.pys NullFormatters cBs tZdZdZdZRS(s( Output a raw token representation for storing token streams. The format is ``tokentyperepr(tokenstring)`` Additional options accepted: ``compress`` If set to "gz" or "bz2", compress the token stream with the given compression algorithm (default: ''). cKs)ti|||idd|_dS(Nscompresss(s Formatters__init__sselfsoptionssgetscompress(sselfsoptions((s7build/bdist.linux-i686/egg/pygments/formatters/other.pys__init__(sc s+|idjo7dk} | idddi}i} n]|idjo:dk}|idd}d} ni}i} t }d}xZ|D]R\} }| |jo||7}q|o|d||fn|}| }qW|d||f| dS( Nsgzsswbi sbz2csii|dS(N(soutfileswrites compressorscompressstext(stext(s compressorsoutfile(s7build/bdist.linux-i686/egg/pygments/formatters/other.pyswrite5scs!iiidS(N(soutfileswrites compressorsflush((soutfiles compressor(s7build/bdist.linux-i686/egg/pygments/formatters/other.pysflush7ss%s %r (sselfscompresssgzipsGzipFilesoutfileswritesflushsbz2s BZ2Compressors compressorsNoneslasttypeslastvals tokensourcesttypesvalue( sselfs tokensourcesoutfileslasttypesbz2s compressorslastvalsvalueswritesflushsgzipsttype((soutfiles compressors7build/bdist.linux-i686/egg/pygments/formatters/other.pysformat,s0         (s__name__s __module__s__doc__s__init__sformat(((s7build/bdist.linux-i686/egg/pygments/formatters/other.pysRawTokenFormatters  N(s__doc__spygments.formatters Formatters__all__s NullFormattersRawTokenFormatter(s NullFormatters FormattersRawTokenFormatters__all__((s7build/bdist.linux-i686/egg/pygments/formatters/other.pys? s   PKU^5ϓpygments/formatters/other.py# -*- coding: utf-8 -*- """ pygments.formatters.other ~~~~~~~~~~~~~~~~~~~~~~~~~ Other formatters: NullFormatter, RawTokenFormatter. :copyright: 2006 by Georg Brandl, Armin Ronacher. :license: GNU LGPL, see LICENSE for more details. """ from pygments.formatter import Formatter __all__ = ['NullFormatter', 'RawTokenFormatter'] class NullFormatter(Formatter): """ Output the text unchanged without any formatting. """ def format(self, tokensource, outfile): for ttype, value in tokensource: outfile.write(value) class RawTokenFormatter(Formatter): """ Output a raw token representation for storing token streams. The format is ``tokentyperepr(tokenstring)`` Additional options accepted: ``compress`` If set to "gz" or "bz2", compress the token stream with the given compression algorithm (default: ''). """ def __init__(self, **options): Formatter.__init__(self, **options) self.compress = options.get('compress', '') def format(self, tokensource, outfile): if self.compress == 'gz': import gzip outfile = gzip.GzipFile('', 'wb', 9, outfile) write = outfile.write flush = outfile.flush elif self.compress == 'bz2': import bz2 compressor = bz2.BZ2Compressor(9) def write(text): outfile.write(compressor.compress(text)) def flush(): outfile.write(compressor.flush()) outfile.flush() else: write = outfile.write flush = outfile.flush lasttype = None lastval = '' for ttype, value in tokensource: if ttype is lasttype: lastval += value else: if lasttype: write("%s\t%r\n" % (lasttype, lastval)) lastval = value lasttype = ttype write("%s\t%r\n" % (lasttype, lastval)) flush() PKAV^5, pygments/formatters/bbcode.py# -*- coding: utf-8 -*- """ pygments.formatters.bbcode ~~~~~~~~~~~~~~~~~~~~~~~~~~ BBcode formatter. :copyright: 2006 by Lukas Meuser. :license: GNU LGPL, see LICENSE for more details. """ from pygments.formatter import Formatter from pygments.util import get_bool_opt __all__ = ['BBCodeFormatter'] class BBCodeFormatter(Formatter): """ Output BBCode tags with appropiate colors and formatting. This formatter doesn't support background colors and borders, as there are no common BBcodes for that. Some board systems (e.g. phpBB) don't support markup in their [code] tag, so you can't use the highlighting together with that tag. Text in a [code] tag usually is shown with a monospace font (which this formatter can do with the ``monofont`` option) and no spaces (which you need for indentation) are removed. Additional options accepted: ``codetag`` If set to true, put the output into [code] tags (default: false). ``monofont`` If set to true, add a tag to show the code with a monospace font (default: false). """ def __init__(self, **options): Formatter.__init__(self, **options) self._code = get_bool_opt(options, 'codetag', False) self._mono = get_bool_opt(options, 'monofont', False) self.styles = {} self._make_styles() def _make_styles(self): for ttype, ndef in self.style: start = end = '' if ndef['color']: start += '[color=#%s]' % ndef['color'] end = '[/color]' + end if ndef['bold']: start += '[b]' end = '[/b]' + end if ndef['italic']: start += '[i]' end = '[/i]' + end if ndef['underline']: start += '[u]' end = '[/u]' + end # there are no common BBcodes for background-color and border self.styles[token] = start, end def format(self, tokensource, outfile): if self._code: outfile.write('[code]') if self._mono: outfile.write('[font=monospace]') lastval = '' lasttype = None for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent if ttype == lasttype: lastval += value else: if lastval: start, end = self.styles[lasttype] outfile.write(''.join((start, lastval, end))) lastval = value lasttype = ttype if lastval: start, end = self.styles[lasttype] outfile.write(''.join((start, lastval, end))) if self._mono: outfile.write('[/font]') if self._code: outfile.write('[/code]') if self._code or self._mono: outfile.write('\n') PKz[^5 pygments/formatters/bbcode.pyc; EEc@sCdZdklZdklZdgZdefdYZdS(s pygments.formatters.bbcode ~~~~~~~~~~~~~~~~~~~~~~~~~~ BBcode formatter. :copyright: 2006 by Lukas Meuser. :license: GNU LGPL, see LICENSE for more details. (s Formatter(s get_bool_optsBBCodeFormattercBs)tZdZdZdZdZRS(s Output BBCode tags with appropiate colors and formatting. This formatter doesn't support background colors and borders, as there are no common BBcodes for that. Some board systems (e.g. phpBB) don't support markup in their [code] tag, so you can't use the highlighting together with that tag. Text in a [code] tag usually is shown with a monospace font (which this formatter can do with the ``monofont`` option) and no spaces (which you need for indentation) are removed. Additional options accepted: ``codetag`` If set to true, put the output into [code] tags (default: false). ``monofont`` If set to true, add a tag to show the code with a monospace font (default: false). cKsQti||t|dt|_t|dt|_h|_|i dS(Nscodetagsmonofont( s Formatters__init__sselfsoptionss get_bool_optsFalses_codes_monosstyless _make_styles(sselfsoptions((s8build/bdist.linux-i686/egg/pygments/formatters/bbcode.pys__init__*s  cCsx|iD]\}}d}}|do |d|d7}d|}n|do|d7}d|}n|do|d 7}d |}n|d o|d 7}d |}n||f|it as well as single and double quotes for HTML.""" return text.replace('&', '&'). \ replace('<', '<'). \ replace('>', '>'). \ replace('"', '"'). \ replace("'", ''') def get_random_id(): """Return a random id for javascript fields.""" from random import random from time import time try: from hashlib import sha1 as sha except ImportError: import sha sha = sha.new return sha('%s|%s' % (random(), time())).hexdigest() def _get_ttype_class(ttype): fname = STANDARD_TYPES.get(ttype) if fname: return fname aname = '' while fname is None: aname = '-' + ttype[-1] + aname ttype = ttype.parent fname = STANDARD_TYPES.get(ttype) return fname + aname DOC_TEMPLATE = '''\ %(title)s

%(title)s

%(code)s ''' class HtmlFormatter(Formatter): """ Output HTML tags with appropriate classes. Additional options accepted: ``nowrap`` If set to true, don't wrap the tokens at all. This disables all other options (default: False). ``noclasses`` If set to true, token s will not use CSS classes, but inline styles. ``classprefix`` Prefix for token CSS classes, is prepended to all token style classes (e.g. class="o" -> class="_o" if classprefix == '_') (default: ''). ``cssclass`` CSS class for the wrapping
(default: 'highlight'). ``cssstyles`` Inline CSS styles for the wrapping
. (default: ''). ``linenos`` If set to ``True``, output line numbers (default: False). ``linenostart`` The line number for the first line (default: 1). ``linenostep`` If set to a number n > 1, only every nth line number is printed (default: 1). ``linenospecial`` If set to a number n > 0, every nth line number is given a special CSS class ``special`` (default: 0). ``nobackground`` If set to ``True`` the formatter won't output the background color for the overall element (this automatically defaults to ``False`` when there is no overall element [eg: no argument for the `get_syntax_defs` method given]) (default: ``False``) """ def __init__(self, **options): Formatter.__init__(self, **options) self.nowrap = get_bool_opt(options, 'nowrap', False) self.noclasses = get_bool_opt(options, 'noclasses', False) self.classprefix = options.get('classprefix', '') self.cssclass = options.get('cssclass', 'highlight') self.cssstyles = options.get('cssstyles', '') self.linenos = get_bool_opt(options, 'linenos', False) self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0)) self.nobackground = get_bool_opt(options, 'nobackground', False) self._class_cache = {} self._create_stylesheet() def _get_css_class(self, ttype): """Return the css class of this token type prefixed with the classprefix option.""" if ttype in self._class_cache: return self._class_cache[ttype] return self.classprefix + STANDARD_TYPES.get(ttype) or _get_ttype_class(ttype) def _create_stylesheet(self): t2c = self.ttype2class = {Token: ''} c2s = self.class2style = {} cp = self.classprefix for ttype, ndef in self.style: name = cp + _get_ttype_class(ttype) style = '' if ndef['color']: style += 'color: #%s; ' % ndef['color'] if ndef['bold']: style += 'font-weight: bold; ' if ndef['italic']: style += 'font-style: italic; ' if ndef['underline']: style += 'text-decoration: underline; ' if ndef['bgcolor']: style += 'background-color: #%s; ' % ndef['bgcolor'] if ndef['border']: style += 'border: 1px solid #%s; ' % ndef['border'] if style: t2c[ttype] = name # save len(ttype) to enable ordering the styles by # hierarchy (necessary for CSS cascading rules!) c2s[name] = (style[:-2], ttype, len(ttype)) def get_style_defs(self, arg=''): """ Return CSS style definitions for the classes produced by the current highlighting style. ``arg`` can be a string of selectors to insert before the token type classes. """ if arg: arg += ' ' styles = [(level, ttype, cls, style) for cls, (style, ttype, level) in self.class2style.iteritems() if cls and style] styles.sort() lines = ['%s.%s { %s } /* %s */' % (arg, cls, style, repr(ttype)[6:]) for level, ttype, cls, style in styles] if arg and not self.nobackground and \ self.style.background_color is not None: text_style = '' if Text in self.ttype2class: text_style = ' ' + self.class2style[self.ttype2class[Text]][0] lines.insert(0, '%s{ background: %s;%s }' % (arg, self.style.background_color, text_style)) return '\n'.join(lines) def _format_nowrap(self, tokensource, outfile, lnos=False): lncount = 0 nocls = self.noclasses # for lookup only getcls = self.ttype2class.get c2s = self.class2style write = outfile.write lspan = '' for ttype, value in tokensource: htmlvalue = escape_html(value) if lnos: lncount += value.count("\n") if nocls: cclass = getcls(ttype) while cclass is None: ttype = ttype.parent cclass = getcls(ttype) cspan = cclass and '' % c2s[cclass][0] else: cls = self._get_css_class(ttype) cspan = cls and '' % cls if cspan == lspan: if not cspan: write(htmlvalue) else: write(htmlvalue.replace('\n', '\n' + cspan)) elif htmlvalue: # if no value, leave old span open if lspan: write('') lspan = cspan if cspan: htmlvalue = htmlvalue.replace('\n', '\n' + cspan) write(cspan + htmlvalue) else: write(htmlvalue) if lspan: write('') return lncount def format(self, tokensource, outfile): if self.nowrap: self._format_nowrap(tokensource, outfile) return realoutfile = outfile lnos = self.linenos full = self.full div = ('') if full or lnos: outfile = StringIO.StringIO() else: outfile.write(div) outfile.write('
')
        lncount = self._format_nowrap(tokensource, outfile, lnos)
        outfile.write('
') ret = '' if lnos: fl = self.linenostart mw = len(str(lncount + fl - 1)) sp = self.linenospecial st = self.linenostep if sp: ls = '\n'.join([(i%st == 0 and (i%sp == 0 and '%*d' or '%*d') % (mw, i) or '') for i in range(fl, fl + lncount)]) else: ls = '\n'.join([(i%st == 0 and ('%*d' % (mw, i)) or '') for i in range(fl, fl + lncount)]) ret = div + ('' '
'''
                   + ls + '
') ret += outfile.getvalue() ret += '
' if full: if not ret: ret = div + outfile.getvalue() + '
\n' realoutfile.write(DOC_TEMPLATE % dict(title = self.title, styledefs = self.get_style_defs('body'), code = ret)) elif lnos: realoutfile.write(ret + '
\n') else: realoutfile.write('
\n') PKz[^5(*11pygments/formatters/latex.pyc; EEc@sndZdkZdklZdklZdklZlZdgZ dZ dZ defdYZ dS( s pygments.formatters.latex ~~~~~~~~~~~~~~~~~~~~~~~~~ Formatter for LaTeX fancyvrb output. :copyright: 2006 by Georg Brandl. :license: GNU LGPL, see LICENSE for more details. N(s Formatter(sToken(s get_bool_opts get_int_optsLatexFormattercCsP|iddiddiddiddiddidd SdS( Ns@ss[ss]ss@at[]s@lb[]s@rb[](stextsreplace(stext((s7build/bdist.linux-i686/egg/pygments/formatters/latex.pys escape_texss \documentclass{%(docclass)s} \usepackage{fancyvrb} \usepackage{color} %(preamble)s %(styledefs)s \begin{document} \section*{%(title)s} %(code)s \end{document} cBs5tZdZdZdZddZdZRS(s@ Output LaTeX "color" and "fancyvrb" control sequences. cKsti|||idd|_|idd|_t|dt|_ t t |dd|_ t t |dd|_ |id d|_t|d t|_|id S( s Additional options accepted: ``docclass`` If ``full`` is true, this is the document class to use (default: 'article'). ``preamble`` If ``full`` is true, this can be further preamble commands (default: ''). ``linenos`` If true, output line numbers (default: False). ``linenostart`` The line number for the first line (default: 1). ``linenostep`` If set to a number n > 1, only every nth line number is printed (default: 1). ``verboptions`` Additional options given to the Verbatim environment (default: ''). ``nobackground`` If set to ``True`` the formatter won't output the background color for the overall element (default: ``False``) Note that light colors on dark background with this option disabled won't be readable very good. sdocclasssarticlespreamblesslinenoss linenostartis linenosteps verboptionss nobackgroundN(s Formatters__init__sselfsoptionssgetsdocclassspreambles get_bool_optsFalseslinenossabss get_int_opts linenostarts linenosteps verboptionss nobackgrounds_create_stylecmds(sselfsoptions((s7build/bdist.linux-i686/egg/pygments/formatters/latex.pys__init__4sc Cshtd<} |_h}|_d}t|} t|}| i } d}x~|i D]s\}}d}|dod|d}n|dod |d}n|d od |d}n|d od ||d |f}n|do+d||d||d|f}n*|dod||d|f}n|djoq`nyd| |i }Wn?tj o3| i } t|}d| |i }nX|| |<||| 1, only every nth line number is printed (default: 1). ``verboptions`` Additional options given to the Verbatim environment (default: ''). ``nobackground`` If set to ``True`` the formatter won't output the background color for the overall element (default: ``False``) Note that light colors on dark background with this option disabled won't be readable very good. """ Formatter.__init__(self, **options) self.docclass = options.get('docclass', 'article') self.preamble = options.get('preamble', '') self.linenos = get_bool_opt(options, 'linenos', False) self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) self.verboptions = options.get('verboptions', '') self.nobackground = get_bool_opt(options, 'nobackground', False) self._create_stylecmds() def _create_stylecmds(self): t2c = self.ttype2cmd = {Token: ''} c2d = self.cmd2def = {} letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' first = iter(letters) second = iter(letters) firstl = first.next() def rgbcolor(col): if col: return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0) for i in (0, 2, 4)]) else: return '1,1,1' for ttype, ndef in self.style: cmndef = '#1' if ndef['bold']: cmndef = r'\textbf{' + cmndef + '}' if ndef['italic']: cmndef = r'\textit{' + cmndef + '}' if ndef['underline']: cmndef = r'\underline{' + cmndef + '}' if ndef['color']: cmndef = r'\textcolor[rgb]{%s}{%s}' % ( rgbcolor(ndef['color']), cmndef ) if ndef['border']: cmndef = r'\fcolorbox[rgb]{%s}{%s}{%s}' % ( rgbcolor(ndef['border']), rgbcolor(ndef['bgcolor']), cmndef ) elif ndef['bgcolor']: cmndef = r'\colorbox[rgb]{%s}{%s}' % ( rgbcolor(ndef['bgcolor']), cmndef ) if cmndef == '#1': continue try: alias = 'C' + firstl + second.next() except StopIteration: firstl = first.next() second = iter(letters) alias = 'C' + firstl + second.next() t2c[ttype] = alias c2d[alias] = cmndef def get_style_defs(self, arg=''): """ Return the \\newcommand sequences needed to define the commands used to format text in the verbatim environment. If ``arg`` is given and true, use \\renewcommand instead. """ nc = (arg and r'\renewcommand' or r'\newcommand') return '%s\\at{@}\n%s\\lb{[}\n%s\\rb{]}\n' % (nc, nc, nc) + \ '\n'.join(['%s\\%s[1]{%s}' % (nc, alias, cmndef) for alias, cmndef in self.cmd2def.iteritems() if cmndef != '#1']) def format(self, tokensource, outfile): # TODO: add support for background colors if self.full: realoutfile = outfile outfile = StringIO.StringIO() outfile.write(r'\begin{Verbatim}[commandchars=@\[\]') if self.linenos: start, step = self.linenostart, self.linenostep outfile.write(',numbers=left' + (start and ',firstnumber=%d' % start or '') + (step and ',stepnumber=%d' % step or '')) if self.verboptions: outfile.write(',' + self.verboptions) outfile.write(']\n') for ttype, value in tokensource: value = escape_tex(value) cmd = self.ttype2cmd.get(ttype) while cmd is None: ttype = ttype.parent cmd = self.ttype2cmd.get(ttype) if cmd: spl = value.split('\n') for line in spl[:-1]: if line: outfile.write("@%s[%s]" % (cmd, line)) outfile.write('\n') if spl[-1]: outfile.write("@%s[%s]" % (cmd, spl[-1])) else: outfile.write(value) outfile.write('\n\\end{Verbatim}\n') if self.full: realoutfile.write(DOC_TEMPLATE % dict(docclass = self.docclass, preamble = self.preamble, title = self.title, styledefs = self.get_style_defs(), code = outfile.getvalue())) PKYV^53Hpygments/styles/native.py# -*- coding: utf-8 -*- """ pygments.styles.native ~~~~~~~~~~~~~~~~~~~~~~ pygments version of my "native" vim theme. :copyright: 2006 by Armin Ronacher. :license: GNU LGPL, see LICENSE for more details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Token class NativeStyle(Style): background_color = '#202020' styles = { Token: '#d0d0d0', Comment: 'italic #999999', Comment.Preproc: 'noitalic bold #cd2828', Keyword: 'bold #6ab825', Keyword.Pseudo: 'nobold', Operator.Word: 'bold #6ab825', String: '#ed9d13', String.Other: '#ffa500', Number: '#3677a9', Name.Builtin: '#24909d', Name.Variable: '#40ffff', Name.Constant: '#40ffff', Name.Class: 'underline #447fcf', Name.Function: '#447fcf', Name.Namespace: 'underline #447fcf', Name.Exception: '#bbbbbb', Name.Tag: 'bold #6ab825', Name.Attribute: '#bbbbbb', Name.Decorator: '#ffa500', Generic.Heading: 'bold #ffffff', Generic.Subheading: 'underline #ffffff', Generic.Deleted: '#d22323', Generic.Inserted: '#589819', Generic.Error: '#d22323', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: '#aaaaaa', Generic.Output: '#cccccc', Generic.Traceback: '#d22323', Error: 'bg:#e3d2d2 #a61717' } PKz[^5|ߤ pygments/styles/default.pyc; %];Ec@sddZdklZdklZlZlZlZlZl Z l Z l Z defdYZ dS(s pygments.styles.default ~~~~~~~~~~~~~~~~~~~~~~~ The default highlighting style for Pygments. :copyright: 2006 by Georg Brandl. :license: GNU LGPL, see LICENSE for more details. (sStyle(sKeywordsNamesCommentsStringsErrorsNumbersOperatorsGenerics DefaultStylecBstZdZdZdZhed<eid<ed<eid<ei d<e d<e i d<e i d<e id <e id <e id <e id <e id <e id<e id<e id<e id<e id<e id<ed<eid<eid<eid<eid<eid <eid<e d<e!i"d<e!i#d<e!i$d<e!i%d <e!i&d<e!i'd<e!i(d<e!i)d<e!i*d<e!i+d<e&d. :license: GNU LGPL, see LICENSE for more details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic class ManniStyle(Style): background_color = '#f0f3f3' styles = { Comment: 'italic #0099FF', Comment.Preproc: 'noitalic #009999', Keyword: 'bold #006699', Keyword.Pseudo: 'nobold', Keyword.Type: '#007788', Operator: '#555555', Operator.Word: 'bold #000000', Name.Builtin: '#336666', Name.Function: '#CC00FF', Name.Class: 'bold #00AA88', Name.Namespace: 'bold #00CCFF', Name.Exception: 'bold #CC0000', Name.Variable: '#003333', Name.Constant: '#336600', Name.Label: '#9999FF', Name.Entity: 'bold #999999', Name.Attribute: '#330099', Name.Tag: 'bold #330099', Name.Decorator: '#9999FF', String: '#CC3300', String.Doc: 'italic', String.Interpol: '#AA0000', String.Escape: 'bold #CC3300', String.Regex: '#33AAAA', String.Symbol: '#FFCC33', String.Other: '#CC3300', Number: '#FF6600', Generic.Heading: 'bold #003300', Generic.Subheading: 'bold #003300', Generic.Deleted: 'border:#CC0000 bg:#FFCCCC', Generic.Inserted: 'border:#00CC00 bg:#CCFFCC', Generic.Error: '#FF0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: 'bold #000099', Generic.Output: '#AAAAAA', Generic.Traceback: '#99CC66', Error: 'bg:#FFAAAA #AA0000' } PKgoV5[\pygments/styles/borland.py# -*- coding: utf-8 -*- """ pygments.styles.borland ~~~~~~~~~~~~~~~~~~~~~~~ Style similar to the style used in the borland ides. :copyright: 2006 by Armin Ronacher. :license: GNU LGPL, see LICENSE for more details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic class BorlandStyle(Style): default_style = '' styles = { Comment: 'italic #008800', Comment.Preproc: 'noitalic', String: '#0000FF', Number: '#0000FF', Keyword: 'bold', Operator.Word: 'bold', Name.Tag: 'bold', Name.Attribute: 'italic', Generic.Heading: '#999999', Generic.Subheading: '#aaaaaa', Generic.Deleted: 'bg:#ffdddd #000000', Generic.Inserted: 'bg:#ddffdd #000000', Generic.Error: '#aa0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: '#555555', Generic.Output: '#888888', Generic.Traceback: '#aa0000', Error: 'bg:#e3d2d2 #a61717' } PKz[^5oaapygments/styles/native.pyc; EEc@sjdZdklZdklZlZlZlZlZl Z l Z l Z l Z defdYZ dS(s pygments.styles.native ~~~~~~~~~~~~~~~~~~~~~~ pygments version of my "native" vim theme. :copyright: 2006 by Armin Ronacher. :license: GNU LGPL, see LICENSE for more details. (sStyle( sKeywordsNamesCommentsStringsErrorsNumbersOperatorsGenericsTokens NativeStylecBsjtZdZhed<ed<eid<ed<eid<ei d<e d<e i d<e d <e id <e id <e id <e id <e id <e id <e id<e id<e id<e id<eid<eid<eid<eid<eid<eid<eid<ei d<ei!d<ei"d<ed. :license: GNU LGPL, see LICENSE for more details. (sStyle(sKeywordsNamesCommentsStringsErrorsNumbersOperatorsGenerics ManniStylecBstZdZhed<eid<ed<eid<eid<ed<ei d<e i d <e i d <e i d <e id <e id <e id<e id<e id<e id<e id<e id<e id<ed<eid<eid<eid<eid<eid<eid<ed<ei d<ei!d<ei"d<ei#d<ei$d<ei%d<ei&d<ei'd <ei(d!<ei)d"<e$d#ggrpygments/lexers/text.pyPKz[^5 >xpygments/lexers/text.pycPKz[^5+`* * fpygments/lexers/dotnet.pycPKU^5pygments/lexers/_phpbuiltins.pyPKz[^5apygments/lexers/__init__.pycPKz[^5H9iipygments/lexers/agile.pycPKz[^5N8oopygments/lexers/special.pycPKU^5D;;apygments/lexers/web.pyPKU^5zsLL4pygments/lexers/templates.pyPKz[^5q!! pygments/lexers/_luabuiltins.pycPKV^5W7##ףpygments/lexers/dotnet.pyPKoV5 oopygments/lexers/_luabuiltins.pyPKz[^5&;;dpygments/lexers/web.pycPKU^5D2pygments/lexers/__init__.pyPKz[^59Olhh5pygments/lexers/_mapping.pycPKz[^5 Ivv Spygments/formatters/terminal.pycPKz[^5H 3x*x*Wcpygments/formatters/html.pycPK"oV5f  pygments/formatters/terminal.pyPKz[^5& H[ [ >pygments/formatters/other.pycPKU^5ϓԨpygments/formatters/other.pyPKAV^5, pygments/formatters/bbcode.pyPKz[^5 pygments/formatters/bbcode.pycPKz[^52RR pygments/formatters/__init__.pycPKHV^5bM&&pygments/formatters/html.pyPKz[^5(*11pygments/formatters/latex.pycPKwjV5~< < L pygments/formatters/__init__.pyPKRV^5if& pygments/formatters/latex.pyPKYV^53H@ pygments/styles/native.pyPKz[^5|ߤ F pygments/styles/default.pycPKloV5*  P pygments/styles/colorful.pyPKz[^5m +[ pygments/styles/colorful.pycPKnoV5E e pygments/styles/pastie.pyPKboV5}¼ n pygments/styles/murphy.pyPKz[^5( x pygments/styles/friendly.pycPKz[^5XS++˂ pygments/styles/autumn.pycPK]oV55F.. pygments/styles/perldoc.pyPKz[^5GWf f  pygments/styles/murphy.pycPK nV56?  pygments/styles/manni.pyPKgoV5[\ pygments/styles/borland.pyPKz[^5oaaج pygments/styles/native.pycPKV^5Saq pygments/styles/trac.pyPKz[^5z/D88 pygments/styles/__init__.pycPKz[^5W۝J J  pygments/styles/manni.pycPKpoV5.p" " s pygments/styles/default.pyPKz[^52 pygments/styles/borland.pycPKeoV5u' pygments/styles/autumn.pyPKwjV5H pygments/styles/__init__.pyPKz[^5%I^ ^ @ pygments/styles/pastie.pycPKz[^5\k_ pygments/styles/perldoc.pycPKsoV5QDT  pygments/styles/friendly.pyPKz[^5