Python pyparsing 模块,ZeroOrMore() 实例源码
我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用pyparsing.ZeroOrMore()。
def __init__(self, calc = SimpleCalculator()):
self.exprStack = []
def pushStack(s, l, t):
self.exprStack.append(t[0])
integer = Word(nums).addParseAction(pushStack)
addop = Literal('+') | Literal('-')
mulop = Literal('*') | Literal('/')
lpar = Literal('(')
rpar = Literal(')')
expr = Forward()
atom = integer | lpar + expr + rpar
term = atom + ZeroOrMore((mulop + atom).addParseAction(pushStack))
expr << term + ZeroOrMore((addop + term).addParseAction(pushStack))
self.expr = expr + StringEnd()
self.opfun = {
'+' : (lambda a, b: calc.add(a,b)),
'-' : (lambda a, b: calc.sub(a,
'*' : (lambda a, b: calc.mul(a,
'/' : (lambda a, b: calc.div(a,b)) }
def anything_beetween(opener_and_closer):
"""Builds a (pyparsing) parser for the content inside delimiters.
Args:
opener_and_closer: a string containing two elements: opener and closer
Returns:
A (pyparsing) parser for the content inside delimiters.
"""
opener = pyparsing.Literal(opener_and_closer[0])
closer = pyparsing.Literal(opener_and_closer[1])
char_removal_mapping = dict.fromkeys(map(ord, opener_and_closer))
other_chars = unicode(string.printable).translate(char_removal_mapping)
word_without_delimiters = pyparsing.Word(other_chars).setName(
"other_chars")
anything = pyparsing.Forward()
delimited_block = opener + anything + closer
# pylint: disable=expression-not-assigned
anything << pyparsing.ZeroOrMore(
word_without_delimiters.setName("word_without_delimiters")
| delimited_block.setName("delimited_block")
)
# Combine all the parts into a single string.
return pyparsing.Combine(anything)
def anything_beetween(opener_and_closer):
"""Builds a (pyparsing) parser for the content inside delimiters.
Args:
opener_and_closer: a string containing two elements: opener and closer
Returns:
A (pyparsing) parser for the content inside delimiters.
"""
opener = pyparsing.Literal(opener_and_closer[0])
closer = pyparsing.Literal(opener_and_closer[1])
char_removal_mapping = dict.fromkeys(map(ord, opener_and_closer))
other_chars = unicode(string.printable).translate(char_removal_mapping)
word_without_delimiters = pyparsing.Word(other_chars).setName(
"other_chars")
anything = pyparsing.Forward()
delimited_block = opener + anything + closer
# pylint: disable=expression-not-assigned
anything << pyparsing.ZeroOrMore(
word_without_delimiters.setName("word_without_delimiters")
| delimited_block.setName("delimited_block")
)
# Combine all the parts into a single string.
return pyparsing.Combine(anything)
def _struct_deFinition_possibly_with_fields(self):
"""Detect a struct/enum/union deFinition.
e.g.
struct foobar {
int v[100];
} __attribute__((packed))
"""
return (
(_STRUCT | _UNION)("type")
+ pyparsing.Optional(self._identifier())("type_name")
+ _OPEN_CURLY
+ pyparsing.ZeroOrMore(
self.element
)("fields")
+ _CLOSE_CURLY
+ self._maybe_attributes()("attributes")
).setParseAction(self._process_struct_deFinition)
def _type_reference(self):
"""A reference to a type.
The type may be already defined in place or just refered by name.
"""
identifier = (
self._typeof_expression()
# Inline struct deFinition.
# e.g. struct { int x; } foo;
| self._struct_deFinition_possibly_with_fields()
| self._enum_deFinition()
| self._numeric_type_identifier()
| self._compound_type_identifier()
| self._identifier()
)
return (
pyparsing.ZeroOrMore(_VOLATILE)
+ identifier
).setParseAction(self._create_type_reference)
def _create_simple_statements():
global binary, ident, rvalue, simple_statement, semi, comp, number, slot_id, callrpc_stmt, generic_statement, streamer_stmt, stream, selector
if simple_statement is not None:
return
Meta_stmt = Group(Literal('Meta').suppress() + ident + Literal('=').suppress() + rvalue + semi).setResultsName('Meta_statement')
require_stmt = Group(Literal('require').suppress() + ident + comp + rvalue + semi).setResultsName('require_statement')
set_stmt = Group(Literal('set').suppress() - (ident | number) - Literal("to").suppress() - (rvalue | binary) - Optional(Literal('as').suppress() + config_type) + semi).setResultsName('set_statement')
callrpc_stmt = Group(Literal("call").suppress() + (ident | number) + Literal("on").suppress() + slot_id + Optional(Literal("=>").suppress() + stream('explicit_stream')) + semi).setResultsName('call_statement')
streamer_stmt = Group(Optional(Literal("manual")('manual')) + Optional(oneOf(u'encrypted signed')('security')) + Optional(Literal(u'realtime')('realtime')) + Literal('streamer').suppress() -
Literal('on').suppress() - selector('selector') - Optional(Literal('to').suppress() - slot_id('explicit_tile')) - Optional(Literal('with').suppress() - Literal('streamer').suppress() - number('with_other')) - semi).setResultsName('streamer_statement')
copy_stmt = Group(Literal("copy").suppress() - Optional(oneOf("all count average")('modifier')) - Optional(stream('explicit_input') | number('constant_input')) - Literal("=>") - stream("output") - semi).setResultsName('copy_statement')
trigger_stmt = Group(Literal("trigger") - Literal("streamer") - number('index') - semi).setResultsName('trigger_statement')
simple_statement = Meta_stmt | require_stmt | set_stmt | callrpc_stmt | streamer_stmt | trigger_stmt | copy_stmt
# In generic statements,keep track of the location where the match started for error handling
locator = Empty().setParseAction(lambda s, t: l)('location')
generic_statement = Group(locator + Group(ZeroOrMore(Regex(u"[^{};]+")) + Literal(u';'))('match')).setResultsName('unparsed_statement')
def _create_block_bnf():
global block_bnf, time_interval, statement, block_id, stream
if block_bnf is not None:
return
trigger_clause = Group(stream_trigger | Group(stream).setResultsName('stream_always') | Group(ident).setResultsName('identifier'))
every_block_id = Group(Literal(u'every').suppress() - time_interval).setResultsName('every_block')
when_block_id = Group(Literal(u'when').suppress() + Literal("connected").suppress() - Literal("to").suppress() - slot_id).setResultsName('when_block')
latch_block_id = Group(Literal(u'when').suppress() - stream_trigger).setResultsName('latch_block')
config_block_id = Group(Literal(u'config').suppress() - slot_id).setResultsName('config_block')
on_block_id = Group(Literal(u'on').suppress() - trigger_clause.setResultsName('triggerA') - Optional((Literal("and") | Literal("or")) - trigger_clause.setResultsName('triggerB'))).setResultsName('on_block')
block_id = every_block_id | when_block_id | latch_block_id | config_block_id | on_block_id
block_bnf = Forward()
statement = generic_statement | block_bnf
block_bnf << Group(block_id + Group(Literal(u'{').suppress() + ZeroOrMore(statement) + Literal(u'}').suppress())).setResultsName('block')
def get_language():
"""Create or retrieve the parse tree for defining a sensor graph."""
global sensor_graph, statement
if sensor_graph is not None:
return sensor_graph
_create_primitives()
_create_simple_statements()
_create_block_bnf()
sensor_graph = ZeroOrMore(statement) + StringEnd()
sensor_graph.ignore(comment)
return sensor_graph
def __init__(self, ffilter, queue_out):
FuzzQueue.__init__(self, queue_out)
Thread.__init__(self)
self.setName('filter_thread')
self.queue_out = queue_out
if PYPARSING:
element = oneOf("c l w h")
digits = "XB0123456789"
integer = Word( digits )#.setParseAction( self.__convertIntegers )
elementRef = Group(element + oneOf("= != < > >= <=") + integer)
operator = oneOf("and or")
deFinition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + deFinition + Suppress(Optional(Literal(")"))))
self.finalformula = nestedformula + ZeroOrMore( operator + nestedformula)
elementRef.setParseAction(self.__compute_element)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
self.res = None
self.hideparams = ffilter
if "XXX" in self.hideparams['codes']:
self.hideparams['codes'].append("0")
self.baseline = None
def __init__(self):
if PYPARSING:
category = Word( alphas + "_-*", alphanums + "_-*" )
operator = oneOf("and or,")
neg_operator = "not"
elementRef = category
deFinition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + deFinition + Suppress(Optional(Literal(")"))))
neg_nestedformula = Optional(neg_operator) + nestedformula
self.finalformula = neg_nestedformula + ZeroOrMore( operator + neg_nestedformula)
elementRef.setParseAction(self.__compute_element)
neg_nestedformula.setParseAction(self.__compute_neg_formula)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
def __init__(self):
real_word_dashes = Word(pyparsing.alphas + '-')
punctuation = Word('.!?:,;-')
punctuation_no_dash = Word('.!?:,;')
punctuation_reference_letter = Word('.:,;-')
printable = Word(pyparsing.printables, exact=1)
letter = Word(pyparsing.alphas, exact=1)
letter_reference = punctuation_reference_letter + letter
nums = Word(pyparsing.nums) + Optional(letter) + \
ZeroOrMore(letter_reference)
word_end = pyparsing.ZeroOrMore(Word(')') | Word('}') | Word(']')) + \
WordEnd()
self.single_number = (
WordStart() +
real_word_dashes +
nums +
word_end
)
self.single_number_parens = (
printable +
letter +
Optional(punctuation_no_dash) +
pyparsing.OneOrMore(
Word('([{', exact=1) +
pyparsing.OneOrMore(nums | Word('-')) +
Word(')]}', exact=1)
) +
word_end
)
self.number_then_punctuation = (
printable +
letter +
nums +
punctuation +
pyparsing.ZeroOrMore(nums | punctuation) +
word_end
)
self.punctuation_then_number = (
printable +
letter +
punctuation_no_dash +
nums +
pyparsing.ZeroOrMore(punctuation | nums) +
word_end
)
def __init__(self, queue_out)
Thread.__init__(self)
self.setName('filter_thread')
self.queue_out = queue_out
if PYPARSING:
element = oneOf("c l w h")
digits = "XB0123456789"
integer = Word( digits )#.setParseAction( self.__convertIntegers )
elementRef = Group(element + oneOf("= != < > >= <=") + integer)
operator = oneOf("and or")
deFinition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + deFinition + Suppress(Optional(Literal(")"))))
self.finalformula = nestedformula + ZeroOrMore( operator + nestedformula)
elementRef.setParseAction(self.__compute_element)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
self.res = None
self.hideparams = ffilter
if "XXX" in self.hideparams['codes']:
self.hideparams['codes'].append("0")
self.baseline = None
def __init__(self):
if PYPARSING:
category = Word( alphas + "_-*",")
neg_operator = "not"
elementRef = category
deFinition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + deFinition + Suppress(Optional(Literal(")"))))
neg_nestedformula = Optional(neg_operator) + nestedformula
self.finalformula = neg_nestedformula + ZeroOrMore( operator + neg_nestedformula)
elementRef.setParseAction(self.__compute_element)
neg_nestedformula.setParseAction(self.__compute_neg_formula)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
def statement():
return pyparsing.Group(
_IDENTIFIER.setResultsName("lhs") + _EQUALS +
pyparsing.Combine(
(anything_in_curly() |
pyparsing.QuotedString("'", escChar="\\", unquoteResults=False) |
pyparsing.QuotedString("\"", unquoteResults=False) |
_REGEX) +
pyparsing.ZeroOrMore(_KEYWORD),
adjacent=False,
joinString=" ",
).setResultsName("rhs")
)
def _base_or_array_expression(self):
array_indices = pyparsing.ZeroOrMore(
_OPEN_BRACKETS
+ self.expression
+ _CLOSE_BRACKETS
)
return (
self._base_expression()
+ pyparsing.Group(array_indices)
).setParseAction(self._create_base_or_array_expression)
def XXXX_cast_expression(self):
"""A function returning a parser for parsing cast expressions.
Args:
expression: a pyparsing parser for parsing an expression to be cast.
Returns:
A (pyparsing) parser for parsing cast expressions.
"""
word = pyparsing.Word(pyparsing.alphanums + '_*[]')
nested = pyparsing.Forward().setName("nested")
nested << pyparsing.Combine(
pyparsing.Literal('(').suppress()
+ pyparsing.Combine(
pyparsing.ZeroOrMore(self._integer() | word | nested))
+ pyparsing.Literal(')').suppress()
)
typeof_expression = (
_OPEN_PARENTHESIS
+ pyparsing.Keyword('typeof')
+ nested("typeof_arg")
+ _CLOSE_PARENTHESIS
)
type_expression = (
typeof_expression
| nested("simple_type")
)
return (
type_expression
+ ~(_PLUS | _MINUS)
+ self.expression("expression")
).setParseAction(self._create_cast_expression)
def _program(self):
return pyparsing.ZeroOrMore(
self._element()
| self._typedef()
).setParseAction(self._make_prog)
def _enum_deFinition(self):
"""Detect an enum deFinition.
e.g.
enum foo {
OPTION_1: 1 + 2,
OPTION_2
}
"""
return (
_ENUM
+ pyparsing.Optional(self._identifier())("enum_name")
+ _OPEN_CURLY
+ pyparsing.ZeroOrMore(
pyparsing.Group(
self._identifier()("name")
+ pyparsing.Optional(
_EQUALS
# This allows us to get even invalid expressions.
+ pyparsing.SkipTo(pyparsing.Word(",}"))("expression")
)
+ pyparsing.Optional(_COMMA)
)
)("fields")
+ _CLOSE_CURLY
+ self._maybe_attributes()("attributes")
).setParseAction(self._process_enum_deFinition)
def grammar():
"""Define the query grammar.
Backus-Naur form (BNF) of the grammar::
<grammar> ::= <item> | <item> <boolean> <grammar>
<item> ::= <hosts> | "(" <grammar> ")"
<boolean> ::= "and not" | "and" | "xor" | "or"
Given that the pyparsing library defines the grammar in a BNF-like style,for the details of the tokens not
specified above check directly the source code.
Returns:
pyparsing.ParserElement: the grammar parser.
"""
# Boolean operators
boolean = (pp.CaselessKeyword('and not').leaveWhitespace() | pp.CaselessKeyword('and') |
pp.CaselessKeyword('xor') | pp.CaselessKeyword('or'))('bool')
# Parentheses
lpar = pp.Literal('(')('open_subgroup')
rpar = pp.Literal(')')('close_subgroup')
# Hosts selection: clustershell (,!&^[]) Syntax is allowed: host10[10-42].domain
hosts = (~(boolean) + pp.Word(pp.alphanums + '-_.,!&^[]'))('hosts')
# Final grammar,see the docstring for its BNF based on the tokens defined above
# Groups are used to split the parsed results for an easy access
full_grammar = pp.Forward()
item = hosts | lpar + full_grammar + rpar
full_grammar << pp.Group(item) + pp.ZeroOrMore(pp.Group(boolean + item)) # pylint: disable=expression-not-assigned
return full_grammar
def grammar():
"""Define the query grammar for the external backend used for testing."""
# Hosts selection: clustershell (,!&^[]) Syntax is allowed: host10[10-42].domain
hosts = pp.Word(pp.alphanums + '-_.,!&^[]')('hosts')
# Final grammar,see the docstring for its BNF based on the tokens defined above
# Groups are used to split the parsed results for an easy access
full_grammar = pp.Forward()
full_grammar << pp.Group(hosts) + pp.ZeroOrMore(pp.Group(hosts)) # pylint: disable=expression-not-assigned
return full_grammar
def compute(self):
def getname(obj, name):
_val = None
if hasattr(obj, name):
_val = getattr(obj, name, None)
if _val is None:
return _val
try:
if _val.isdynamic: #Todo make this work for non-attributes,non-dynamics (use .issingleton? - what about a concat mode?)
raise ValueError('Combine plugin cannot process %s because it contains a dynamic class' % name)
except AttributeError:
raise TypeError('Expected an attribute but got a %s' % type(_val))
if _val.issingleton():
_ret = '%s' % _val[0].raw()
else:
_ret = ','.join(['%s' % v.raw() for v in _val])
return _ret
attrmarker = (p.Literal('@') | p.Literal('!'))
attrmatch = attrmarker.suppress() + p.Word(p.alphanums)
for i in attrmatch.scanString(self.config):
x = i[0][0]
self.__attribs__[x] = getname(self.targetobject, x)
if all(v is not None for v in self.__attribs__.values()):
self.computable = True
if self.computable:
attrmatch = p.Literal('@').suppress() + p.Word(p.alphanums)
attrmatch.setParseAction(self.substitute)
attrlist = p.ZeroOrMore(p.Optional(p.White()) + attrmatch + p.Optional(p.White()))
self.__result__ = attrlist.transformString(self.config)
def parseTerms():
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
global terms
if not terms:
point = Literal( "." )
e = CaselessLiteral( "E" )
fnumber = Combine( Word( "+-"+nums, nums ) +
Optional( point + Optional( Word( nums ) ) ) +
Optional( e + Word( "+-"+nums, nums ) ) )
ident = Word(alphas, alphas+nums+"_$")
plus = Literal( "+" )
minus = Literal( "-" )
mult = Literal( "*" )
div = Literal( "/" )
lpar = Literal( "(" ).suppress()
rpar = Literal( ")" ).suppress()
addop = plus | minus
multop = mult | div
expop = Literal( "^" )
pi = CaselessLiteral( "PI" )
expr = Forward()
atom = (Optional("-") + ( pi | e | fnumber | ident + lpar + expr + rpar ).setParseAction( pushFirst ) | ( lpar + expr.suppress() + rpar )).setParseAction(pushUMinus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...",we get right-to-left exponents,instead of left-to-righ
# that is,2^3^2 = 2^(3^2),not (2^3)^2.
factor = Forward()
factor << atom + ZeroOrMore( ( expop + factor ).setParseAction( pushFirst ) )
term = factor + ZeroOrMore( ( multop + factor ).setParseAction( pushFirst ) )
expr << term + ZeroOrMore( ( addop + term ).setParseAction( pushFirst ) )
terms = expr
return terms
def __init__(self):
real_word_dashes = Word(pyparsing.alphas + '-')
punctuation = Word('.!?:, exact=1)
) +
word_end
)
self.number_then_punctuation = (
printable +
letter +
nums +
punctuation +
pyparsing.ZeroOrMore(nums | punctuation) +
word_end
)
self.punctuation_then_number = (
printable +
letter +
punctuation_no_dash +
nums +
pyparsing.ZeroOrMore(punctuation | nums) +
word_end
)
def parse(cls, search=False):
"""Parse the main query text. This method will also set the
class attribute `parsed_search` to the parsed query,and it will
return it too.
:param cls: The class object,since it is a static method
:type cls: object
:param search: Search text string if a custom search string is to be
used. False if the `cls.search` class attribute is to be used.
:type search: str
:returns: Parsed query
:rtype: list
>>> print(DocMatcher.parse('hello author = einstein'))
[['hello'],['author','=','einstein']]
>>> print(DocMatcher.parse(''))
[]
>>> print(\
DocMatcher.parse(\
'"hello world whatever =" tags = \\\'hello ====\\\''))
[['hello world whatever ='],['tags','hello ====']]
>>> print(DocMatcher.parse('hello'))
[['hello']]
"""
import pyparsing
cls.logger.debug('Parsing search')
search = search or cls.search
papis_alphas = pyparsing.printables.replace('=', '')
papis_key = pyparsing.Word(pyparsing.alphanums + '-')
papis_value = pyparsing.QuotedString(
quoteChar='"', escChar='\\', escQuote='\\'
) ^ pyparsing.QuotedString(
quoteChar="'", escQuote='\\'
) ^ papis_key
equal = pyparsing.ZeroOrMore(" ") + \
pyparsing.Literal('=') + \
pyparsing.ZeroOrMore(" ")
papis_query = pyparsing.ZeroOrMore(
pyparsing.Group(
pyparsing.ZeroOrMore(
papis_key + equal
) + papis_value
)
)
parsed = papis_query.parseString(search)
cls.logger.debug('Parsed search = %s' % parsed)
cls.parsed_search = parsed
return cls.parsed_search
def _type_instance(self):
"""A type declaration.
The modifiers of a typedef:
struct s *P[];
^^^^<- The type instance.
"""
type_instance = (
# Function pointer (*f)(int foobar)
pyparsing.ZeroOrMore(_STAR)
+ _OPEN_PARENTHESIS
+ pyparsing.Optional(_STAR("function_pointer"))
+ self._identifier()("type_instance_name")
+ _CLOSE_PARENTHESIS
+ parsers.anything_in_parentheses()("function_args")
) | (
# Function object f(foo bar *)
pyparsing.ZeroOrMore(_STAR)
+ self._identifier()("type_instance_name")
+ parsers.anything_in_parentheses()("function_args")
) | (
# Simple form: *foo[10];
pyparsing.ZeroOrMore(_STAR)("type_pointer")
+ self._identifier()("type_instance_name")
# Possibly array: [],[][]
+ pyparsing.ZeroOrMore(
_OPEN_BRACKET
+ pyparsing.SkipTo(_CLOSE_BRACKET)(
"brackets_with_expression_inside*")
+ _CLOSE_BRACKET)
# Bitfields: int x: 7;
+ pyparsing.Optional(
_COLON
+ pyparsing.SkipTo(
_SEMICOLON | _COMMA)("bitfield")
)
)
return pyparsing.Group(
type_instance
+ self._maybe_attributes()
)
def grammar():
"""Define the query grammar.
Backus-Naur form (BNF) of the grammar::
<grammar> ::= <item> | <item> <and_or> <grammar>
<item> ::= [<neg>] <query-token> | [<neg>] "(" <grammar> ")"
<query-token> ::= <token> | <hosts>
<token> ::= <category>:<key> [<operator> <value>]
Given that the pyparsing library defines the grammar in a BNF-like style,for the details of the tokens not
specified above check directly the source code.
Returns:
pyparsing.ParserElement: the grammar parser.
"""
# Boolean operators
and_or = (pp.CaselessKeyword('and') | pp.CaselessKeyword('or'))('bool')
# 'neg' is used as label to allow the use of dot notation,'not' is a reserved word in Python
neg = pp.CaselessKeyword('not')('neg')
operator = pp.oneOf(OPERATORS, caseless=True)('operator') # Comparison operators
quoted_string = pp.quotedString.copy().addParseAction(pp.removeQuotes) # Both single and double quotes are allowed
# Parentheses
lpar = pp.Literal('(')('open_subgroup')
rpar = pp.Literal(')')('close_subgroup')
# Hosts selection: glob (*) and clustershell (,!&^[]) Syntaxes are allowed:
# i.e. host10[10-42].*.domain
hosts = quoted_string | (~(and_or | neg) + pp.Word(pp.alphanums + '-_.*,!&^[]'))
# Key-value token for allowed categories using the available comparison operators
# i.e. F:key = value
category = pp.oneOf(CATEGORIES, caseless=True)('category')
key = pp.Word(pp.alphanums + '-_.%@:')('key')
selector = pp.Combine(category + ':' + key) # i.e. F:key
# All printables characters except the parentheses that are part of this or the global grammar
all_but_par = ''.join([c for c in pp.printables if c not in ('(', ')', '{', '}')])
value = (quoted_string | pp.Word(all_but_par))('value')
token = selector + pp.Optional(operator + value)
# Final grammar,see the docstring for its BNF based on the tokens defined above
# Groups are used to split the parsed results for an easy access
full_grammar = pp.Forward()
item = pp.Group(pp.Optional(neg) + (token | hosts('hosts'))) | pp.Group(
pp.Optional(neg) + lpar + full_grammar + rpar)
full_grammar << item + pp.ZeroOrMore(pp.Group(and_or) + full_grammar) # pylint: disable=expression-not-assigned
return full_grammar
def grammar(backend_keys):
"""Define the main multi-query grammar.
Cumin provides a user-friendly generic query language that allows to combine the results of subqueries for multiple
backends:
* Each query part can be composed with the others using boolean operators ``and``,``or``,``and not``,``xor``.
* Multiple query parts can be grouped together with parentheses ``(``,``)``.
* Specific backend query ``I{backend-specific query Syntax}``,where ``I`` is an identifier for the specific
backend.
* Alias replacement,according to aliases defined in the configuration file ``A:group1``.
* The identifier ``A`` is reserved for the aliases replacement and cannot be used to identify a backend.
* A complex query example: ``(D{host1 or host2} and (P{R:Class = Role::MyClass} and not A:group1)) or D{host3}``
Backus-Naur form (BNF) of the grammar::
<grammar> ::= <item> | <item> <boolean> <grammar>
<item> ::= <backend_query> | <alias> | "(" <grammar> ")"
<backend_query> ::= <backend> "{" <query> "}"
<alias> ::= A:<alias_name>
<boolean> ::= "and not" | "and" | "xor" | "or"
Given that the pyparsing library defines the grammar in a BNF-like style,for the details of the tokens not
specified above check directly the source code.
Arguments:
backend_keys (list): list of the GRAMMAR_PREFIX for each registered backend.
Returns:
pyparsing.ParserElement: the grammar parser.
"""
# Boolean operators
boolean = (pp.CaselessKeyword('and not').leaveWhitespace() | pp.CaselessKeyword('and') |
pp.CaselessKeyword('xor') | pp.CaselessKeyword('or'))('bool')
# Parentheses
lpar = pp.Literal('(')('open_subgroup')
rpar = pp.Literal(')')('close_subgroup')
# Backend query: P{PuppetDB specific query}
query_start = pp.Combine(pp.oneOf(backend_keys, caseless=True)('backend') + pp.Literal('{'))
query_end = pp.Literal('}')
# Allow the backend specific query to use the end_query token as well,as long as it's in a quoted string
# and fail if there is a query_start token before the first query_end is reached
query = pp.SkipTo(query_end, ignore=pp.quotedString, failOn=query_start)('query')
backend_query = pp.Combine(query_start + query + query_end)
# Alias
alias = pp.Combine(pp.CaselessKeyword('A') + ':' + pp.Word(pp.alphanums + '-_.+')('alias'))
# Final grammar,see the docstring for its BNF based on the tokens defined above
# Group are used to have an easy dictionary access to the parsed results
full_grammar = pp.Forward()
item = backend_query | alias | lpar + full_grammar + rpar
full_grammar << pp.Group(item) + pp.ZeroOrMore(pp.Group(boolean + item)) # pylint: disable=expression-not-assigned
return full_grammar
def __init__(self, comm_file_path):
expression_spaced = Forward()
expression = Forward()
args_spaced = Forward()
cb = Optional(',') + ')' # closing_brackets might include a ','
ob = Optional(' ') + '(' + Optional(' ') # closing_brackets might include a ' '
value = (Or([pyparsing_common.identifier.copy().setResultsName('id'),
pyparsing_common.number.copy().setResultsName('number'),
QuotedString("'").setResultsName('string')])).setParseAction(Value).setResultsName('value')
values = (ZeroOrMore(value.setResultsName('valueList', listAllMatches=True) + Optional(','))).setParseAction(
Values)
keyword = pyparsing_common.identifier.copy()
keyword_argument = (
keyword.setResultsName('keyword') + '=' + expression_spaced.setResultsName('expression')
).setParseAction(Keyword_argument)
keyword_arguments = (
keyword_argument.setResultsName('keyword_argument', listAllMatches=True) +
ZeroOrMore(',' + keyword_argument.setResultsName('keyword_argument', listAllMatches=True))
).setParseAction(Keyword_arguments)
expression << (Or([
value, (ob + values.setResultsName('values') + cb),
'_F' + ob + keyword_arguments.setResultsName('keyword_arguments') + cb,
ob + expression.setResultsName('expression') + cb
])).setParseAction(Expression)
expression_spaced << (Or([expression, ob + expression_spaced + cb]))
left_side = pyparsing_common.identifier.setResultsName('left_side')
operator_name = pyparsing_common.identifier.setResultsName('operator_name')
paragraph = (Optional(left_side + "=") + operator_name + ob + Optional(keyword_arguments
.setResultsName(
'keyword_arguments')) + cb + Optional(';')).setParseAction(Paragraph)
file = OneOrMore(paragraph).setResultsName('paragraphs').setParseAction(File)
self.beam_data_model = file.parseFile(comm_file_path)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。