Robot Framework
statementlexers.py
Go to the documentation of this file.
1 # Copyright 2008-2015 Nokia Networks
2 # Copyright 2016- Robot Framework Foundation
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 
16 from robot.errors import DataError
17 from robot.utils import normalize_whitespace
18 from robot.variables import is_assign
19 
20 from .tokens import Token
21 
22 
23 
24 class Lexer:
25 
26  def __init__(self, ctx):
27  self.ctxctx = ctx
28 
29  @classmethod
30  def handles(cls, statement, ctx):
31  return True
32 
33  def accepts_more(self, statement):
34  raise NotImplementedError
35 
36  def input(self, statement):
37  raise NotImplementedError
38 
39  def lex(self):
40  raise NotImplementedError
41 
42 
44  token_type = None
45 
46  def __init__(self, ctx):
47  super().__init__(ctx)
48  self.statementstatement = None
49 
50  def accepts_more(self, statement):
51  return False
52 
53  def input(self, statement):
54  self.statementstatement = statement
55 
56  def lex(self):
57  raise NotImplementedError
58 
59 
61 
62  def lex(self):
63  for token in self.statementstatement:
64  token.type = self.token_typetoken_type
65 
66 
68 
69  def lex(self):
70  self.statementstatement[0].type = self.token_typetoken_type
71  for token in self.statementstatement[1:]:
72  token.type = Token.ARGUMENT
73 
74 
76 
77  @classmethod
78  def handles(cls, statement, ctx):
79  return statement[0].value.startswith('*')
80 
81 
83  token_type = Token.SETTING_HEADER
84 
85 
87  token_type = Token.VARIABLE_HEADER
88 
89 
91  token_type = Token.TESTCASE_HEADER
92 
93 
95  token_type = Token.TASK_HEADER
96 
97 
99  token_type = Token.KEYWORD_HEADER
100 
101 
103  token_type = Token.COMMENT_HEADER
104 
105 
107 
108  def lex(self):
109  self.ctxctx.lex_invalid_section(self.statementstatement)
110 
111 
113  token_type = Token.COMMENT
114 
115 
117 
118  def input(self, statement):
119  super().input(statement)
120  if len(statement) == 1 and statement[0].value.lower().startswith('language:'):
121  lang = statement[0].value.split(':', 1)[1].strip()
122  try:
123  self.ctxctx.add_language(lang)
124  except DataError:
125  statement[0].set_error(
126  f"Invalid language configuration: "
127  f"Language '{lang}' not found nor importable as a language module."
128  )
129  else:
130  statement[0].type = Token.CONFIG
131 
132  def lex(self):
133  for token in self.statementstatement:
134  if not token.type:
135  token.type = self.token_typetoken_typetoken_type
136 
137 
139 
140  def lex(self):
141  self.ctxctx.lex_setting(self.statementstatement)
142 
143 
145 
146  @classmethod
147  def handles(cls, statement, ctx):
148  marker = statement[0].value
149  return marker and marker[0] == '[' and marker[-1] == ']'
150 
151 
153  token_type = Token.VARIABLE
154 
155 
157 
158  def lex(self):
159  if self.ctxctx.template_set:
160  self._lex_as_template_lex_as_template()
161  else:
162  self._lex_as_keyword_call_lex_as_keyword_call()
163 
164  def _lex_as_template(self):
165  for token in self.statementstatement:
166  token.type = Token.ARGUMENT
167 
169  keyword_seen = False
170  for token in self.statementstatement:
171  if keyword_seen:
172  token.type = Token.ARGUMENT
173  elif is_assign(token.value, allow_assign_mark=True):
174  token.type = Token.ASSIGN
175  else:
176  token.type = Token.KEYWORD
177  keyword_seen = True
178 
179 
181  separators = ('IN', 'IN RANGE', 'IN ENUMERATE', 'IN ZIP')
182 
183  @classmethod
184  def handles(cls, statement, ctx):
185  return statement[0].value == 'FOR'
186 
187  def lex(self):
188  self.statementstatement[0].type = Token.FOR
189  separator_seen = False
190  for token in self.statementstatement[1:]:
191  if separator_seen:
192  token.type = Token.ARGUMENT
193  elif normalize_whitespace(token.value) in self.separatorsseparators:
194  token.type = Token.FOR_SEPARATOR
195  separator_seen = True
196  else:
197  token.type = Token.VARIABLE
198 
199 
201  token_type = Token.IF
202 
203  @classmethod
204  def handles(cls, statement, ctx):
205  return statement[0].value == 'IF' and len(statement) <= 2
206 
207 
209  token_type = Token.INLINE_IF
210 
211  @classmethod
212  def handles(cls, statement, ctx):
213  for token in statement:
214  if token.value == 'IF':
215  return True
216  if not is_assign(token.value, allow_assign_mark=True):
217  return False
218  return False
219 
220  def lex(self):
221  if_seen = False
222  for token in self.statementstatement:
223  if if_seen:
224  token.type = Token.ARGUMENT
225  elif token.value == 'IF':
226  token.type = Token.INLINE_IF
227  if_seen = True
228  else:
229  token.type = Token.ASSIGN
230 
231 
233  token_type = Token.ELSE_IF
234 
235  @classmethod
236  def handles(cls, statement, ctx):
237  return normalize_whitespace(statement[0].value) == 'ELSE IF'
238 
239 
241  token_type = Token.ELSE
242 
243  @classmethod
244  def handles(cls, statement, ctx):
245  return statement[0].value == 'ELSE'
246 
247 
249  token_type = Token.TRY
250 
251  @classmethod
252  def handles(cls, statement, ctx):
253  return statement[0].value == 'TRY'
254 
255 
257  token_type = Token.EXCEPT
258 
259  @classmethod
260  def handles(cls, statement, ctx):
261  return statement[0].value == 'EXCEPT'
262 
263  def lex(self):
264  self.statementstatement[0].type = Token.EXCEPT
265  last_pattern = None
266  as_seen = False
267  for token in self.statementstatement[1:]:
268  if token.value == 'AS':
269  token.type = Token.AS
270  as_seen = True
271  elif as_seen:
272  token.type = Token.VARIABLE
273  else:
274  token.type = Token.ARGUMENT
275  last_pattern = token
276  if last_pattern and last_pattern.value.startswith('type='):
277  last_pattern.type = Token.OPTION
278 
279 
281  token_type = Token.FINALLY
282 
283  @classmethod
284  def handles(cls, statement, ctx):
285  return statement[0].value == 'FINALLY'
286 
287 
289  token_type = Token.WHILE
290 
291  @classmethod
292  def handles(cls, statement, ctx):
293  return statement[0].value == 'WHILE'
294 
295  def lex(self):
296  self.statementstatement[0].type = Token.WHILE
297  for token in self.statementstatement[1:]:
298  token.type = Token.ARGUMENT
299  if self.statementstatement[-1].value.startswith('limit='):
300  self.statementstatement[-1].type = Token.OPTION
301 
302 
304  token_type = Token.END
305 
306  @classmethod
307  def handles(cls, statement, ctx):
308  return statement[0].value == 'END'
309 
310 
312  token_type = Token.RETURN_STATEMENT
313 
314  @classmethod
315  def handles(cls, statement, ctx):
316  return statement[0].value == 'RETURN'
317 
318 
320  token_type = Token.CONTINUE
321 
322  @classmethod
323  def handles(cls, statement, ctx):
324  return statement[0].value == 'CONTINUE'
325 
326 
328  token_type = Token.BREAK
329 
330  @classmethod
331  def handles(cls, statement, ctx):
332  return statement[0].value == 'BREAK'
def normalize_whitespace(string)
Definition: normalizing.py:45
def is_assign(string, identifiers='$@&', allow_assign_mark=False)
Definition: search.py:50