Coverage for src/robotide/lib/compat/pygments/robotframework.py: 81%
570 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-05-06 10:40 +0100
« prev ^ index » next coverage.py v7.8.0, created at 2025-05-06 10:40 +0100
1"""
2 pygments.lexers.robotframework
3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 Lexer for Robot Framework.
7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
10from __future__ import annotations
12# Copyright 2012 Nokia Siemens Networks Oyj
13#
14# Licensed under the Apache License, Version 2.0 (the "License");
15# you may not use this file except in compliance with the License.
16# You may obtain a copy of the License at
17#
18# http://www.apache.org/licenses/LICENSE-2.0
19#
20# Unless required by applicable law or agreed to in writing, software
21# distributed under the License is distributed on an "AS IS" BASIS,
22# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23# See the License for the specific language governing permissions and
24# limitations under the License.
26# Copyright 2012-2015 Nokia Networks
27# Copyright 2023- Robot Framework Foundation
28#
29# Licensed under the Apache License, Version 2.0 (the "License");
30# you may not use this file except in compliance with the License.
31# You may obtain a copy of the License at
32#
33# http://www.apache.org/licenses/LICENSE-2.0
34#
35# Unless required by applicable law or agreed to in writing, software
36# distributed under the License is distributed on an "AS IS" BASIS,
37# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
38# See the License for the specific language governing permissions and
39# limitations under the License.
40#
41# This is a modified copy from Pygments 2.17.0, by Hélio Guilherme 2023-12-09
42# The changes in this file were done with the purpose to add colorization
43# support for test suites using languages other than English and supported
44# by Robot Framework 6.0 or higher (at the time, version is 6.1.1)
47import re
49from pygments.lexer import Lexer
50from pygments.token import Token
51from multiprocessing import shared_memory
52from robotide.lib.compat.parsing.language import Language
53from robotide.utils import normalize_lc, normalize_dict, normalize_pipe_list
55if not Language: # Let's import original 55 ↛ 56line 55 didn't jump to line 56 because the condition on line 55 was never true
56 raise ImportError
58__all__ = ['RobotFrameworkLexer']
61HEADING = Token.Generic.Heading
62SETTING = Token.Keyword.Namespace
63IMPORT = Token.Name.Namespace
64TC_KW_NAME = Token.Generic.Subheading
65KEYWORD = Token.Name.Function
66ARGUMENT = Token.String
67VARIABLE = Token.Name.Variable
68COMMENT = Token.Comment
69SEPARATOR = Token.Punctuation
70SYNTAX = Token.Punctuation
71GHERKIN = Token.Generic.Emph
72ERROR = Token.Error
75def get_key_by_value(table: dict, value: str) -> str:
76 for k, v in table.items(): 1ajpqrstumnovkwxyzlABDghfideCbc
77 if v == value: 1ajpqrstumnovkwxyzlABDghfideCbc
78 return k 1ajpqrstumnovkwxyzlABDghfideCbc
79 return value # Returns original if not in dict, deprecated/old labels 1ajpqrstumnovkwxyzlABDghfideCbc
82class RobotFrameworkLexer(Lexer):
83 """
84 For Robot Framework test data.
86 Supports both space and pipe separated plain text formats.
88 ... versionadded:: 1.6
89 """
90 name = 'RobotFramework'
91 url = 'http://robotframework.org'
92 aliases = ['robotframework']
93 filenames = ['*.robot', '*.resource']
94 mimetypes = ['text/x-robotframework']
96 def __init__(self, **options):
97 options['tabsize'] = 2 1ajpqrstumnovkwxyzlABghfideCbc
98 options['encoding'] = 'UTF-8' 1ajpqrstumnovkwxyzlABghfideCbc
99 Lexer.__init__(self, **options) 1ajpqrstumnovkwxyzlABghfideCbc
100 self.language = options.get('doc language', None) 1ajpqrstumnovkwxyzlABghfideCbc
101 set_lang = shared_memory.ShareableList(name="language") 1ajpqrstumnovkwxyzlABghfideCbc
102 if not self.language: 102 ↛ 107line 102 didn't jump to line 107 because the condition on line 102 was always true1ajpqrstumnovkwxyzlABghfideCbc
103 # DEBUG self.new_lang = Language.from_name('en')
104 shared_lang = set_lang[0].replace('_', '-') or 'en' 1ajpqrstumnovkwxyzlABghfideCbc
105 self.new_lang = Language.from_name(shared_lang) 1ajpqrstumnovkwxyzlABghfideCbc
106 else:
107 self.new_lang = Language.from_name(self.language[0].replace('_', '-')) # DEBUG: We consider a single language
108 # print(f"DEBUG: robotframework.py after RobotFrameworkLexer _init_ mimetypes={self.mimetypes}\n"
109 # f"options['doc language']={options['doc language']}\n"
110 # f"self.new_lang={self.new_lang.code.replace('-','_')}")
112 def get_tokens_unprocessed(self, text):
113 row_tokenizer = RowTokenizer(self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
114 var_tokenizer = VariableTokenizer() 1ajpqrstumnovkwxyzlABDghfideCbc
115 index = 0 1ajpqrstumnovkwxyzlABDghfideCbc
116 for row in text.splitlines(): 1ajpqrstumnovkwxyzlABDghfideCbc
117 for val, tokn in row_tokenizer.tokenize(row): 1ajpqrstumnovkwxyzlABghfideCbc
118 for value, token in var_tokenizer.tokenize(val, tokn): 1ajpqrstumnovkwxyzlABghfideCbc
119 if value: 1ajpqrstumnovkwxyzlABghfideCbc
120 yield index, token, str(value) 1ajpqrstumnovkwxyzlABghfideCbc
121 index += len(value) 1ajpqrstumnovkwxyzlABghfideCbc
124class VariableTokenizer:
126 def tokenize(self, string, token):
127 var = VariableSplitter(string, identifiers='$@%&') 1ajpqrstumnovkwxyzlABghfideCbc
128 if var.start < 0 or token in (COMMENT, ERROR): 1ajpqrstumnovkwxyzlABghfideCbc
129 yield string, token 1ajpqrstumnovkwxyzlABghfideCbc
130 return 1ajpqrstumnovkwxyzlABghfideCbc
131 for value, token in self._tokenize(var, string, token): 1ajkghfidebc
132 if value: 1ajkghfidebc
133 yield value, token 1ajkghfidebc
135 def _tokenize(self, var, string, orig_token):
136 before = string[:var.start] 1ajkghfidebc
137 yield before, orig_token 1ajkghfidebc
138 yield var.identifier + '{', SYNTAX 1ajkghfidebc
139 yield from self.tokenize(var.base, VARIABLE) 1ajkghfidebc
140 yield '}', SYNTAX 1ajkghfidebc
141 if var.index is not None: 141 ↛ 142line 141 didn't jump to line 142 because the condition on line 141 was never true1ajkghfidebc
142 yield '[', SYNTAX
143 yield from self.tokenize(var.index, VARIABLE)
144 yield ']', SYNTAX
145 yield from self.tokenize(string[var.end:], orig_token) 1ajkghfidebc
148class RowTokenizer:
149 new_lang = None
151 def __init__(self, new_lang):
152 if self.new_lang is None: 152 ↛ 158line 152 didn't jump to line 158 because the condition on line 152 was always true1ajpqrstumnovkwxyzlABDghfideCbc
153 if new_lang is None: 153 ↛ 154line 153 didn't jump to line 154 because the condition on line 153 was never true1ajpqrstumnovkwxyzlABDghfideCbc
154 set_lang = shared_memory.ShareableList(name="language")
155 self.new_lang = Language.from_name(set_lang[0].replace('_', '-'))
156 else:
157 self.new_lang = new_lang 1ajpqrstumnovkwxyzlABDghfideCbc
158 self._table = UnknownTable() 1ajpqrstumnovkwxyzlABDghfideCbc
159 self._splitter = RowSplitter() 1ajpqrstumnovkwxyzlABDghfideCbc
160 testcases = TestCaseTable(new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
161 settings = SettingTable(testcases.set_default_template, new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
162 variables = VariableTable(new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
163 keywords = KeywordTable(new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
164 comments = CommentsTable(new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
165 normalized_headers = normalize_dict(self.new_lang.headers) 1ajpqrstumnovkwxyzlABDghfideCbc
166 self._tables = {get_key_by_value(normalized_headers, 'settings'): settings, 1ajpqrstumnovkwxyzlABDghfideCbc
167 get_key_by_value(normalized_headers, 'metadata'): settings,
168 get_key_by_value(normalized_headers, 'variables'): variables,
169 get_key_by_value(normalized_headers, 'testcases'): testcases,
170 get_key_by_value(normalized_headers, 'tasks'): testcases,
171 get_key_by_value(normalized_headers, 'keywords'): keywords,
172 get_key_by_value(normalized_headers, 'comments'): comments}
174 def tokenize(self, row):
175 commented = False 1ajpqrstumnovkwxyzlABghfideCbc
176 heading = False 1ajpqrstumnovkwxyzlABghfideCbc
177 for index, value in enumerate(self._splitter.split(row)): 1ajpqrstumnovkwxyzlABghfideCbc
178 # First value, and every second after that, is a separator.
179 index, separator = divmod(index-1, 2) 1ajpqrstumnovkwxyzlABghfideCbc
180 if value.startswith('#'): 1ajpqrstumnovkwxyzlABghfideCbc
181 commented = True 1ajmnokghfidebc
182 elif index == 0 and value.startswith('*'): 1ajpqrstumnovkwxyzlABghfideCbc
183 self._table = self._start_table(value) 1ajkghfidebc
184 # print(f"DEBUG: robotframework.py RowTokenizer tokenize HEADING value={value}\n"
185 # f"self._table={self._table} lang={self.new_lang}\n")
186 heading = True 1ajkghfidebc
187 yield from self._tokenize(value, index, commented, 1ajpqrstumnovkwxyzlABghfideCbc
188 separator, heading)
189 self._table.end_row() 1ajpqrstumnovkwxyzlABghfideCbc
191 def _start_table(self, header):
192 name = normalize_lc(header, remove='*') 1ajkghfidebc
193 return self._tables.get(name, UnknownTable()) 1ajkghfidebc
195 def _tokenize(self, value, index, commented, separator, heading):
196 # print(f"DEBUG: robotframework.py RowTokenizer _tokenize lang={self.new_lang}\n"
197 # f"{value=}, {index=}, {commented=}, {separator=}, {heading=}")
198 if commented: 1ajpqrstumnovkwxyzlABghfideCbc
199 yield value, COMMENT 1ajmnokghfidebc
200 elif separator: 1ajpqrstumnovkwxyzlABghfideCbc
201 yield value, SEPARATOR 1ajpqrstumnovkwxyzlABghfideCbc
202 elif heading: 1ajpqrstumnovkwxyzlABghfideCbc
203 yield value, HEADING 1ajkghfidebc
204 else:
205 yield from self._table.tokenize(value, index) 1ajpqrstumnovkwxyzlABghfideCbc
208class RowSplitter:
209 _space_splitter = re.compile('( {2,})')
210 _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))')
212 def split(self, row):
213 splitter = (row.startswith('| ') and self._split_from_pipes 1ajpqrstumnovkwxyzlABghfideCbc
214 or self._split_from_spaces)
215 yield from splitter(row) 1ajpqrstumnovkwxyzlABghfideCbc
216 yield '\n' 1ajpqrstumnovkwxyzlABghfideCbc
218 def _split_from_spaces(self, row):
219 yield '' # Start with (pseudo)separator similarly as with pipes 1ajpqrstumnovkwxyzlABghfideCbc
220 yield from self._space_splitter.split(row) 1ajpqrstumnovkwxyzlABghfideCbc
222 def _split_from_pipes(self, row):
223 _, separator, rest = self._pipe_splitter.split(row, 1)
224 yield separator
225 while self._pipe_splitter.search(rest):
226 cell, separator, rest = self._pipe_splitter.split(rest, 1)
227 yield cell
228 yield separator
229 yield rest
232class Tokenizer:
233 _tokens = None
234 new_lang = None
236 def __init__(self, new_lang=None):
237 if self.new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
238 if new_lang is None: 238 ↛ 245line 238 didn't jump to line 245 because the condition on line 238 was always true1ajpqrstumnovkwxyzlABDghfideCbc
239 set_lang = shared_memory.ShareableList(name="language") 1ajpqrstumnovkwxyzlABDghfideCbc
240 try: 1ajpqrstumnovkwxyzlABDghfideCbc
241 self.new_lang = Language.from_name(set_lang[0].replace('_', '-')) 1ajpqrstumnovkwxyzlABDghfideCbc
242 except ValueError:
243 self.new_lang = ['En']
244 else:
245 self.new_lang = new_lang
246 self._index = 0 1ajpqrstumnovkwxyzlABDghfideCbc
247 # print(f"DEBUG: robotframework.py Tokenizer __init__ language={self.new_lang}")
249 def tokenize(self, value):
250 values_and_tokens = self._tokenize(value, self._index) 1ajpqrstumnovkwxyzlABghfideCbc
251 self._index += 1 1ajpqrstumnovkwxyzlABghfideCbc
252 if isinstance(values_and_tokens, type(Token)): 1ajpqrstumnovkwxyzlABghfideCbc
253 values_and_tokens = [(value, values_and_tokens)] 1ajpqrstumnovkwxyzlABghfideCbc
254 return values_and_tokens 1ajpqrstumnovkwxyzlABghfideCbc
256 def _tokenize(self, value, index):
257 _ = value 1ajpqrstumnovkwxyzlABghfideCbc
258 index = min(index, len(self._tokens) - 1) 1ajpqrstumnovkwxyzlABghfideCbc
259 return self._tokens[index] 1ajpqrstumnovkwxyzlABghfideCbc
261 @staticmethod
262 def _is_assign(value):
263 if value.endswith('='): 1ajkghfidebc
264 value = value[:-1].strip() 1bc
265 var = VariableSplitter(value, identifiers='$@&') 1ajkghfidebc
266 return var.start == 0 and var.end == len(value) 1ajkghfidebc
269class Comment(Tokenizer):
270 _tokens = (COMMENT,)
271 new_lang = None
274class Setting(Tokenizer):
275 _tokens = (SETTING, ARGUMENT)
276 """
277 _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
278 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition',
279 'testteardown', 'taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate')
280 _import_settings = ('library', 'resource', 'variables')
281 _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
282 'testtimeout', 'tasktimeout')
283 """
284 _custom_tokenizer = None
285 new_lang = None
287 def __init__(self, template_setter=None, new_lang=None):
288 self._template_setter = template_setter 1ajpqrstumnovkwxyzlABDghfideCbc
289 if self.new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
290 if new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
291 set_lang = shared_memory.ShareableList(name="language") 1ajpqrstumnovkwxyzlABDghfideCbc
292 try: 1ajpqrstumnovkwxyzlABDghfideCbc
293 self.new_lang = Language.from_name(set_lang[0].replace('_', '-')) 1ajpqrstumnovkwxyzlABDghfideCbc
294 except ValueError:
295 pass
296 else:
297 self.new_lang = new_lang 1ajkghfidebc
298 if self.new_lang is None: 298 ↛ 299line 298 didn't jump to line 299 because the condition on line 298 was never true1ajpqrstumnovkwxyzlABDghfideCbc
299 self.new_lang = Language.from_name('En')
300 # print(f"DEBUG: robotframework.py Setting self.new_lang={self.new_lang}\n")
301 self.normalized_settings = normalize_dict(self.new_lang.settings) 1ajpqrstumnovkwxyzlABDghfideCbc
302 self._keyword_settings = (get_key_by_value(self.normalized_settings,'suitesetup'), 1ajpqrstumnovkwxyzlABDghfideCbc
303 get_key_by_value(self.normalized_settings, 'suiteprecondition'),
304 get_key_by_value(self.normalized_settings, 'suiteteardown'),
305 get_key_by_value(self.normalized_settings, 'suitepostcondition'),
306 # get_key_by_value(self.normalized_settings, 'documentation'),
307 get_key_by_value(self.normalized_settings, 'arguments'), # Keyword setting
308 get_key_by_value(self.normalized_settings,'teardown'), # Keyword setting
309 get_key_by_value(self.normalized_settings,'testsetup'),
310 get_key_by_value(self.normalized_settings,'tasksetup'),
311 get_key_by_value(self.normalized_settings, 'testprecondition'),
312 get_key_by_value(self.normalized_settings,'testteardown'),
313 get_key_by_value(self.normalized_settings,'taskteardown'),
314 get_key_by_value(self.normalized_settings, 'testpostcondition'),
315 get_key_by_value(self.normalized_settings, 'testtemplate'),
316 get_key_by_value(self.normalized_settings,'tasktemplate'),
317 get_key_by_value(self.normalized_settings, 'setup'),
318 get_key_by_value(self.normalized_settings, 'precondition'), # obsolete
319 get_key_by_value(self.normalized_settings, 'postcondition'), # obsolete
320 get_key_by_value(self.normalized_settings, 'template'))
321 self._import_settings = (get_key_by_value(self.normalized_settings,'library'), 1ajpqrstumnovkwxyzlABDghfideCbc
322 get_key_by_value(self.normalized_settings,'resource'),
323 get_key_by_value(self.normalized_settings,'variables'))
324 self._other_settings = (get_key_by_value(self.normalized_settings,'documentation'), 1ajpqrstumnovkwxyzlABDghfideCbc
325 get_key_by_value(self.normalized_settings, 'metadata'),
326 get_key_by_value(self.normalized_settings, 'keywordtags'), # New
327 get_key_by_value(self.normalized_settings, 'testtags'), # New
328 get_key_by_value(self.normalized_settings,'tasktags'), # New
329 get_key_by_value(self.normalized_settings, 'tags'), # New
330 get_key_by_value(self.normalized_settings,'forcetags'), # Non-existing
331 get_key_by_value(self.normalized_settings,'defaulttags'), # Non-existing
332 get_key_by_value(self.normalized_settings, 'testtimeout'),
333 get_key_by_value(self.normalized_settings, 'tasktimeout'),
334 get_key_by_value(self.normalized_settings, 'timeout'))
335 Tokenizer.__init__(self, new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
336 # print(f"DEBUG: robotframework.py Setting self.normalized_settings={self.normalized_settings}\n"
337 # f"self._keyword_settings={self._keyword_settings}\n"
338 # f"self._import_settings={self._import_settings}\n"
339 # f"self._other_settings={self._other_settings}\n")
341 def _tokenize(self, value, index):
342 if index == 1 and self._template_setter: 1ajkghfidebc
343 self._template_setter(value) 1ajkghfidebc
344 if index == 0: 1ajkghfidebc
345 normalized = normalize_lc(value) 1ajkghfidebc
346 if normalized in self._keyword_settings: 1ajkghfidebc
347 # print(f"DEBUG: robotframework.py Setting call KeywordCall in _tokenize value={value}")
348 self._custom_tokenizer = KeywordCall(support_assign=False) 1ajkghfidebc
349 elif normalized in self._import_settings: 1ajkghfidebc
350 self._custom_tokenizer = ImportSetting() 1ajkghfidebc
351 return IMPORT 1ajkghfidebc
352 elif self._custom_tokenizer: 1ajkghfidebc
353 return self._custom_tokenizer.tokenize(value) 1ajkghfidebc
354 return Tokenizer._tokenize(self, value, index) 1ajkghfidebc
357class ImportSetting(Tokenizer):
358 _tokens = (IMPORT, ARGUMENT)
359 new_lang = None
361 def __init__(self, new_lang=None):
362 if self.new_lang is None: 362 ↛ 371line 362 didn't jump to line 371 because the condition on line 362 was always true1ajkghfidebc
363 if new_lang is None: 363 ↛ 370line 363 didn't jump to line 370 because the condition on line 363 was always true1ajkghfidebc
364 set_lang = shared_memory.ShareableList(name="language") 1ajkghfidebc
365 try: 1ajkghfidebc
366 self.new_lang = Language.from_name(set_lang[0].replace('_', '-')) 1ajkghfidebc
367 except ValueError:
368 self.new_lang = None
369 else:
370 self.new_lang = new_lang
371 if self.new_lang is None: 371 ↛ 372line 371 didn't jump to line 372 because the condition on line 371 was never true1ajkghfidebc
372 self.new_lang = Language.from_name('En')
373 self.normalized_settings = normalize_dict(self.new_lang.settings) 1ajkghfidebc
374 self._import_settings = (get_key_by_value(self.normalized_settings, 'library'), 1ajkghfidebc
375 get_key_by_value(self.normalized_settings, 'resource'),
376 get_key_by_value(self.normalized_settings, 'variables'))
377 Tokenizer.__init__(self, new_lang=self.new_lang) 1ajkghfidebc
380class TestCaseSetting(Setting):
381 """
382 _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
383 'template')
384 _import_settings = ()
385 _other_settings = ('documentation', 'tags', 'timeout')
386 """
388 def __init__(self, template_setter=None, new_lang=None):
389 if self.new_lang is None: 1ajkghfidebc
390 if new_lang is None: 390 ↛ 391line 390 didn't jump to line 391 because the condition on line 390 was never true1ajkghfidebc
391 set_lang = shared_memory.ShareableList(name="language")
392 try:
393 self.new_lang = Language.from_name(set_lang[0].replace('_', '-'))
394 except ValueError:
395 self.new_lang = None
396 else:
397 self.new_lang = new_lang 1ajkghfidebc
398 if self.new_lang is None: 398 ↛ 399line 398 didn't jump to line 399 because the condition on line 398 was never true1ajkghfidebc
399 self.new_lang = Language.from_name('En')
400 self.normalized_settings = normalize_dict(self.new_lang.settings) 1ajkghfidebc
401 self._keyword_settings = (get_key_by_value(self.normalized_settings, 'setup'), 1ajkghfidebc
402 get_key_by_value(self.normalized_settings, 'precondition'), # obsolete
403 get_key_by_value(self.normalized_settings, 'testsetup'),
404 get_key_by_value(self.normalized_settings, 'teardown'),
405 # get_key_by_value(self.normalized_settings, 'documentation'),
406 get_key_by_value(self.normalized_settings, 'postcondition'), # obsolete
407 get_key_by_value(self.normalized_settings, 'template'))
408 # DEBUG self._import_settings = ()
409 self._other_settings = (get_key_by_value(self.normalized_settings, 'documentation'), 1ajkghfidebc
410 get_key_by_value(self.normalized_settings, 'timeout'),
411 get_key_by_value(self.normalized_settings, 'keywordtags'), # New
412 get_key_by_value(self.normalized_settings, 'tags'))
413 Setting.__init__(self, template_setter=template_setter, new_lang=new_lang) 1ajkghfidebc
414 # print(f"DEBUG: robotframework.py TestCaseSetting \n"
415 # f"self._keyword_settings={self._keyword_settings}\n"
416 # f"self._import_settings={self._import_settings}\n"
417 # f"self._other_settings={self._other_settings}\n")
419 def _tokenize(self, value, index):
420 # print(f"DEBUG: robotframework.py TestCaseSetting _tokenize self.new_lang={self.new_lang} value={value}\n")
421 normalized = normalize_lc(value) 1ajkghfidebc
422 if normalized in self._keyword_settings: 1ajkghfidebc
423 self._custom_tokenizer = KeywordCall(support_assign=False) 1de
424 if index == 0: 1ajkghfidebc
425 stype = Setting(new_lang=self.new_lang)._tokenize(value[1:-1], index) 1ajkghfidebc
426 return [('[', SYNTAX), (value[1:-1], stype), (']', SYNTAX)] 1ajkghfidebc
427 elif self._custom_tokenizer: 1ajkghfidebc
428 return self._custom_tokenizer.tokenize(value) 1de
429 return Setting(new_lang=self.new_lang)._tokenize(value, index) 1ajkghfidebc
432class KeywordSetting(TestCaseSetting):
433 """
434 _keyword_settings = ('teardown', )
435 _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags')
436 """
438 def __init__(self, template_setter=None, new_lang=None):
439 if self.new_lang is None: 439 ↛ 448line 439 didn't jump to line 448 because the condition on line 439 was always true1ajkghfidebc
440 if new_lang is None: 440 ↛ 441line 440 didn't jump to line 441 because the condition on line 440 was never true1ajkghfidebc
441 set_lang = shared_memory.ShareableList(name="language")
442 try:
443 self.new_lang = Language.from_name(set_lang[0].replace('_', '-'))
444 except ValueError:
445 self.new_lang = None
446 else:
447 self.new_lang = new_lang 1ajkghfidebc
448 if self.new_lang is None: 448 ↛ 449line 448 didn't jump to line 449 because the condition on line 448 was never true1ajkghfidebc
449 self.new_lang = Language.from_name('En')
450 self.normalized_settings = normalize_dict(self.new_lang.settings) 1ajkghfidebc
451 self._keyword_settings = (get_key_by_value(self.normalized_settings, 'setup'), 1ajkghfidebc
452 get_key_by_value(self.normalized_settings, 'precondition'), # obsolete
453 get_key_by_value(self.normalized_settings, 'testsetup'),
454 get_key_by_value(self.normalized_settings, 'teardown'),
455 get_key_by_value(self.normalized_settings, 'return'), # Non-existing
456 get_key_by_value(self.normalized_settings, 'postcondition'), # obsolete
457 get_key_by_value(self.normalized_settings, 'template'))
458 self._other_settings = (get_key_by_value(self.normalized_settings, 'documentation'), 1ajkghfidebc
459 get_key_by_value(self.normalized_settings, 'timeout'),
460 get_key_by_value(self.normalized_settings, 'keywordtags'), # New
461 get_key_by_value(self.normalized_settings, 'tags'))
462 TestCaseSetting.__init__(self, template_setter=template_setter, new_lang=new_lang) 1ajkghfidebc
465class Variable(Tokenizer):
466 _tokens = (SYNTAX, ARGUMENT)
467 new_lang = None
469 def _tokenize(self, value, index):
470 if index == 0 and not self._is_assign(value): 1ajkghfidebc
471 return ERROR 1ajkghfidebc
472 return Tokenizer._tokenize(self, value, index) 1ajkghfidebc
475class KeywordCall(Tokenizer):
476 _tokens = (KEYWORD, ARGUMENT)
477 new_lang = None
479 def __init__(self, support_assign=True, new_lang=None):
480 if self.new_lang is None: 480 ↛ 489line 480 didn't jump to line 489 because the condition on line 480 was always true1ajpqrstumnovkwxyzlABDghfideCbc
481 if new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
482 set_lang = shared_memory.ShareableList(name="language") 1ajpqrstumnovkwxyzlABDghfideCbc
483 try: 1ajpqrstumnovkwxyzlABDghfideCbc
484 self.new_lang = Language.from_name(set_lang[0].replace('_', '-')) 1ajpqrstumnovkwxyzlABDghfideCbc
485 except ValueError:
486 self.new_lang = None
487 else:
488 self.new_lang = new_lang 1ajkghfidebc
489 if self.new_lang is None: 489 ↛ 490line 489 didn't jump to line 490 because the condition on line 489 was never true1ajpqrstumnovkwxyzlABDghfideCbc
490 self.new_lang = Language.from_name('En')
491 # print(f"DEBUG: robotframework.py KeywordCall __init__ lang={self.new_lang}")
492 Tokenizer.__init__(self, new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
493 self._keyword_found = not support_assign 1ajpqrstumnovkwxyzlABDghfideCbc
494 self._assigns = 0 1ajpqrstumnovkwxyzlABDghfideCbc
496 def _tokenize(self, value, index):
497 # print(f"DEBUG: robotframework.py KeywordCall _tokenize ENTER value={value} index={index}\n")
498 if not self._keyword_found and self._is_assign(value): 498 ↛ 499line 498 didn't jump to line 499 because the condition on line 498 was never true1ajkghfidebc
499 self._assigns += 1
500 return SYNTAX # VariableTokenizer tokenizes this later.
501 if self._keyword_found: 1ajkghfidebc
502 return Tokenizer._tokenize(self, value, index - self._assigns) 1ajkghfidebc
503 self._keyword_found = True 1ajkghfidebc
504 # print(f"DEBUG: robotframework.py KeywordCall _tokenize CALL GHERKIN value={value}\n"
505 # f"new_lang={self.new_lang}")
506 return GherkinTokenizer(new_lang=self.new_lang).tokenize(value, KEYWORD) 1ajkghfidebc
509class GherkinTokenizer(object):
510 # _gherkin_prefix = re.compile('^(Given|When|Then|And|But) ', re.IGNORECASE)
511 new_lang = None
513 def __init__(self, new_lang=None):
514 if self.new_lang is None: 514 ↛ 523line 514 didn't jump to line 523 because the condition on line 514 was always true1ajkghfidebc
515 if new_lang is None: 515 ↛ 516line 515 didn't jump to line 516 because the condition on line 515 was never true1ajkghfidebc
516 set_lang = shared_memory.ShareableList(name="language")
517 try:
518 self.new_lang = Language.from_name(set_lang[0].replace('_', '-'))
519 except ValueError:
520 self.new_lang = None
521 else:
522 self.new_lang = new_lang 1ajkghfidebc
523 if self.new_lang is None: 523 ↛ 524line 523 didn't jump to line 524 because the condition on line 523 was never true1ajkghfidebc
524 self.new_lang = Language.from_name('En')
525 # Sort prefixes by descending length so that the longest ones are matched first
526 prefixes = sorted(self.new_lang.bdd_prefixes, key=len, reverse=True) 1ajkghfidebc
527 self.normalized_bdd_prefixes = normalize_pipe_list(list(prefixes), spaces=False) 1ajkghfidebc
528 # print(f"DEBUG: robotframework.py GherkinTokenizer _tokenize DEFINITION GHERKIN"
529 # f" BDDPREFIXES={self.new_lang.bdd_prefixes}\n"
530 # f"PATTERN='^({self.normalized_bdd_prefixes}) '"
531 # f"new_lang={self.new_lang}")
532 self._gherkin_prefix = re.compile(fr'^({self.normalized_bdd_prefixes}) ', re.IGNORECASE) 1ajkghfidebc
534 def tokenize(self, value, token):
535 # print(f"DEBUG: robotframework.py GherkinTokenizer tokenize ENTER self._gherkin_prefix={self._gherkin_prefix}:"
536 # f"\nvalue={value} token={token}")
537 match = self._gherkin_prefix.match(value) 1ajkghfidebc
538 if not match: 1ajkghfidebc
539 # print(f"DEBUG: robotframework.py GherkinTokenizer tokenize NO MATCH value={value} token={token}")
540 return [(value, token)] 1ajkghfidebc
541 end = match.end() 1fdebc
542 # print(f"DEBUG: robotframework.py GherkinTokenizer tokenize RETURN MATCH GHERKIN value={value[:end]}
543 # rest={value[end:]}")
544 return [(value[:end], GHERKIN), (value[end:], token)] 1fdebc
547class TemplatedKeywordCall(Tokenizer):
548 _tokens = (ARGUMENT,)
549 new_lang = None
552class ForLoop(Tokenizer):
553 new_lang = None
555 def __init__(self, new_lang=None):
556 if self.new_lang is None: 556 ↛ 565line 556 didn't jump to line 565 because the condition on line 556 was always true1bc
557 if new_lang is None: 557 ↛ 564line 557 didn't jump to line 564 because the condition on line 557 was always true1bc
558 set_lang = shared_memory.ShareableList(name="language") 1bc
559 try: 1bc
560 self.new_lang = Language.from_name(set_lang[0].replace('_', '-')) 1bc
561 except ValueError:
562 self.new_lang = None
563 else:
564 self.new_lang = new_lang
565 if self.new_lang is None: 565 ↛ 566line 565 didn't jump to line 566 because the condition on line 565 was never true1bc
566 self.new_lang = Language.from_name('En')
567 Tokenizer.__init__(self, new_lang=self.new_lang) 1bc
568 self._in_arguments = False 1bc
570 def _tokenize(self, value, index):
571 token = self._in_arguments and ARGUMENT or SYNTAX 1bc
572 if value.upper() in ('IN', 'IN ENUMERATE', 'IN RANGE', 'IN ZIP'): 1bc
573 self._in_arguments = True 1bc
574 return token 1bc
577class _Table:
578 _tokenizer_class = None
579 new_lang = None
581 def __init__(self, prev_tokenizer=None, new_lang=None):
582 if self.new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
583 if new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
584 try: 1ajpqrstumnovkwxyzlABDghfideCbc
585 set_lang = shared_memory.ShareableList(name="language") 1ajpqrstumnovkwxyzlABDghfideCbc
586 except FileNotFoundError:
587 set_lang = ['En']
588 try: 1ajpqrstumnovkwxyzlABDghfideCbc
589 self.new_lang = Language.from_name(set_lang[0].replace('_','-')) 1ajpqrstumnovkwxyzlABDghfideCbc
590 except ValueError:
591 self.new_lang = None
592 else:
593 self.new_lang = new_lang 1ajpqrstumnovkwxyzlABDghfideCbc
594 if self.new_lang is None: 594 ↛ 595line 594 didn't jump to line 595 because the condition on line 594 was never true1ajpqrstumnovkwxyzlABDghfideCbc
595 self.new_lang = Language.from_name('En')
596 self._tokenizer = self._tokenizer_class() 1ajpqrstumnovkwxyzlABDghfideCbc
597 self._prev_tokenizer = prev_tokenizer 1ajpqrstumnovkwxyzlABDghfideCbc
598 self._prev_values_on_row = [] 1ajpqrstumnovkwxyzlABDghfideCbc
600 def tokenize(self, value, index):
601 if self._continues(value, index): 1ajpqrstumnovkwxyzlABghfideCbc
602 self._tokenizer = self._prev_tokenizer 1jkghfidebc
603 yield value, SYNTAX 1jkghfidebc
604 else:
605 yield from self._tokenize(value, index) 1ajpqrstumnovkwxyzlABghfideCbc
606 self._prev_values_on_row.append(value) 1ajpqrstumnovkwxyzlABghfideCbc
608 def _continues(self, value, index):
609 _ = index 1ajkghfidebc
610 return value == '...' and all(self._is_empty(t) for t in self._prev_values_on_row) 1ajkghfidebc
612 @staticmethod
613 def _is_empty(value):
614 return value in ('', '\\') 1ajkghfidebc
616 def _tokenize(self, value, index):
617 _ = index 1ajpqrstumnovkwxyzlABghfideCbc
618 return self._tokenizer.tokenize(value) 1ajpqrstumnovkwxyzlABghfideCbc
620 def end_row(self):
621 self.__init__(prev_tokenizer=self._tokenizer) 1ajpqrstumnovkwxyzlABghfideCbc
624class CommentsTable(_Table):
625 _tokenizer_class = Comment
627 def _continues(self, value, index):
628 return False 1fdebc
631class UnknownTable(_Table):
632 _tokenizer_class = Comment
634 def _continues(self, value, index):
635 return False 1pqrstumnovwxyzlABghfideCbc
638class VariableTable(_Table):
639 _tokenizer_class = Variable
642class SettingTable(_Table):
643 _tokenizer_class = Setting
644 new_lang = None
646 def __init__(self, template_setter, prev_tokenizer=None, new_lang=None):
647 self._template_setter = template_setter 1ajpqrstumnovkwxyzlABDghfideCbc
648 if self.new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
649 if new_lang is None: 649 ↛ 650line 649 didn't jump to line 650 because the condition on line 649 was never true1ajpqrstumnovkwxyzlABDghfideCbc
650 set_lang = shared_memory.ShareableList(name="language")
651 try:
652 self.new_lang = Language.from_name(set_lang[0].replace('_', '-'))
653 except ValueError:
654 self.new_lang = None
655 else:
656 self.new_lang = new_lang 1ajpqrstumnovkwxyzlABDghfideCbc
657 if self.new_lang is None: 657 ↛ 658line 657 didn't jump to line 658 because the condition on line 657 was never true1ajpqrstumnovkwxyzlABDghfideCbc
658 self.new_lang = Language.from_name('En')
659 self.normalized_settings = normalize_dict(self.new_lang.settings) 1ajpqrstumnovkwxyzlABDghfideCbc
660 _Table.__init__(self, prev_tokenizer, new_lang=self.new_lang) 1ajpqrstumnovkwxyzlABDghfideCbc
661 # print(f"DEBUG: robotframework.py SettingTable self.normalized_settings={self.normalized_settings}")
663 def _tokenize(self, value, index):
664 # print(f"DEBUG: robotframework.py SettingTable ENTER _tokenize "
665 # f"self.normalized_settings={self.normalized_settings}\n"
666 # f"value={value} new_lang={self.new_lang}")
667 if index == 0 and normalize_lc(value) in ( 1ajkghfidebc
668 get_key_by_value(self.normalized_settings, 'metadata'), # DEBUG
669 get_key_by_value(self.normalized_settings, 'template'),
670 get_key_by_value(self.normalized_settings, 'documentation'),
671 get_key_by_value(self.normalized_settings, 'suitesetup'),
672 get_key_by_value(self.normalized_settings, 'suiteteardown'),
673 get_key_by_value(self.normalized_settings,'testsetup'),
674 get_key_by_value(self.normalized_settings,'tasksetup'),
675 get_key_by_value(self.normalized_settings,'testteardown'),
676 get_key_by_value(self.normalized_settings,'taskteardown'),
677 get_key_by_value(self.normalized_settings, 'library'),
678 get_key_by_value(self.normalized_settings, 'resource'),
679 get_key_by_value(self.normalized_settings, 'variables'),
680 get_key_by_value(self.normalized_settings, 'testtemplate'),
681 get_key_by_value(self.normalized_settings, 'tasktemplate')):
682 self._tokenizer = Setting(template_setter=self._template_setter, new_lang=self.new_lang) 1ajkghfidebc
683 return _Table._tokenize(self, value, index) 1ajkghfidebc
685 def end_row(self):
686 self.__init__(self._template_setter, prev_tokenizer=self._tokenizer, new_lang=self.new_lang) 1ajkghfidebc
689class TestCaseTable(_Table):
690 _setting_class = TestCaseSetting
691 _test_template = None
692 _default_template = None
693 new_lang = None
695 def __init__(self, prev_tokenizer=None, new_lang=None):
696 if self.new_lang is None: 1ajpqrstumnovkwxyzlABDghfideCbc
697 if new_lang is None: 697 ↛ 698line 697 didn't jump to line 698 because the condition on line 697 was never true1ajpqrstumnovkwxyzlABDghfideCbc
698 set_lang = shared_memory.ShareableList(name="language")
699 try:
700 self.new_lang = Language.from_name(set_lang[0].replace('_', '-'))
701 except ValueError:
702 self.new_lang = None
703 else:
704 self.new_lang = new_lang 1ajpqrstumnovkwxyzlABDghfideCbc
705 if self.new_lang is None: 705 ↛ 706line 705 didn't jump to line 706 because the condition on line 705 was never true1ajpqrstumnovkwxyzlABDghfideCbc
706 self.new_lang = Language.from_name('En')
707 # print(f"DEBUG: robotframework.py TestCaseTable __init__ self.new_lang={self.new_lang}")
708 self.normalized_settings = normalize_dict(self.new_lang.settings) 1ajpqrstumnovkwxyzlABDghfideCbc
709 _Table.__init__(self, prev_tokenizer) 1ajpqrstumnovkwxyzlABDghfideCbc
710 # print(f"DEBUG: robotframework.py TestCaseTable self.normalized_settings={self.normalized_settings}")
712 @property
713 def _tokenizer_class(self):
714 if self._test_template or (self._default_template and self._test_template is not False): 1ajpqrstumnovkwxyzlABDghfideCbc
715 return TemplatedKeywordCall 1ajkghfidebc
716 return KeywordCall 1ajpqrstumnovkwxyzlABDghfideCbc
718 def _continues(self, value, index):
719 return index > 0 and _Table._continues(self, value, index) 1ajkghfidebc
721 def _tokenize(self, value, index):
722 if index == 2 and self._test_template and self._is_template_set(value): 1ajkghfidebc
723 return KeywordCall(new_lang=self.new_lang).tokenize(value) 1ajkghfidebc
724 if index == 0 and value: 1ajkghfidebc
725 self._test_template = None 1ajkghfidebc
726 return GherkinTokenizer(new_lang=self.new_lang).tokenize(value, TC_KW_NAME) 1ajkghfidebc
727 if index == 1 and self._is_setting(value): 1ajkghfidebc
728 if self._is_template(value): 1ajkghfidebc
729 self._test_template = True 1ajkghfidebc
730 self._tokenizer = self._setting_class(template_setter=self.set_test_template, new_lang=self.new_lang) 1ajkghfidebc
731 else:
732 self._test_template = None 1ajkghfidebc
733 self._tokenizer = self._setting_class(new_lang=self.new_lang) 1ajkghfidebc
734 if index == 1 and self._is_for_loop(value): 1ajkghfidebc
735 self._tokenizer = ForLoop() 1bc
736 if index == 1 and self._is_empty(value): 1ajkghfidebc
737 return [(value, SYNTAX)] 1bc
738 if index == 1 and not self._is_setting(value): 1ajkghfidebc
739 test_gherkin = GherkinTokenizer(new_lang=self.new_lang).tokenize(value, KEYWORD) 1ajkghfidebc
740 if test_gherkin[0][1] in (GHERKIN, KEYWORD): 740 ↛ 742line 740 didn't jump to line 742 because the condition on line 740 was always true1ajkghfidebc
741 return test_gherkin 1ajkghfidebc
742 return _Table._tokenize(self, value, index) 1ajkghfidebc
744 @staticmethod
745 def _is_setting(value):
746 return value.startswith('[') and value.endswith(']') 1ajkghfidebc
748 def _is_template(self, value):
749 return normalize_lc(value) in (f"[{get_key_by_value(self.normalized_settings, 'template')}]", 1ajkghfidebc
750 f"[{get_key_by_value(self.normalized_settings, 'testsetup')}]",
751 f"[{get_key_by_value(self.normalized_settings, 'tasksetup')}]",
752 f"[{get_key_by_value(self.normalized_settings, 'testteardown')}]",
753 f"[{get_key_by_value(self.normalized_settings, 'taskteardown')}]",
754 f"[{get_key_by_value(self.normalized_settings, 'testtemplate')}]",
755 f"[{get_key_by_value(self.normalized_settings, 'tasktemplate')}]",
756 f"[{get_key_by_value(self.normalized_settings, 'setup')}]",
757 f"[{get_key_by_value(self.normalized_settings, 'teardown')}]")
759 @staticmethod
760 def _is_for_loop(value):
761 return normalize_lc(value, remove=':') == 'for' 1ajkghfidebc
763 def set_test_template(self, template):
764 self._test_template = self._is_template_set(template)
766 def set_default_template(self, template):
767 self._default_template = self._is_template_set(template) 1ajkghfidebc
769 @staticmethod
770 def _is_template_set(template):
771 return normalize_lc(template) not in ('', '\\', 'none', '${empty}') 1ajkghfidebc
774class KeywordTable(TestCaseTable):
775 _tokenizer_class = KeywordCall
776 _setting_class = KeywordSetting
778 def _is_template(self, value):
779 return False 1ajkghfidebc
782# Following code copied directly from Robot Framework 2.7.5.
784class VariableSplitter:
786 def __init__(self, string, identifiers):
787 self.identifier = None 1ajpqrstumnovkwxyzlABghfideCbc
788 self.base = None 1ajpqrstumnovkwxyzlABghfideCbc
789 self.index = None 1ajpqrstumnovkwxyzlABghfideCbc
790 self.start = -1 1ajpqrstumnovkwxyzlABghfideCbc
791 self.end = -1 1ajpqrstumnovkwxyzlABghfideCbc
792 self._identifiers = identifiers 1ajpqrstumnovkwxyzlABghfideCbc
793 self._may_have_internal_variables = False 1ajpqrstumnovkwxyzlABghfideCbc
794 try: 1ajpqrstumnovkwxyzlABghfideCbc
795 self._split(string) 1ajpqrstumnovkwxyzlABghfideCbc
796 except ValueError: 1ajpqrstumnovkwxyzlABghfideCbc
797 pass 1ajpqrstumnovkwxyzlABghfideCbc
798 else:
799 self._finalize() 1ajklghfidebc
801 def get_replaced_base(self, variables):
802 if self._may_have_internal_variables:
803 return variables.replace_string(self.base)
804 return self.base
806 def _finalize(self):
807 self.identifier = self._variable_chars[0] 1ajklghfidebc
808 self.base = ''.join(self._variable_chars[2:-1]) 1ajklghfidebc
809 self.end = self.start + len(self._variable_chars) 1ajklghfidebc
810 if self._has_list_or_dict_variable_index(): 810 ↛ 811line 810 didn't jump to line 811 because the condition on line 810 was never true1ajklghfidebc
811 self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1])
812 self.end += len(self._list_and_dict_variable_index_chars)
814 def _has_list_or_dict_variable_index(self):
815 return self._list_and_dict_variable_index_chars and self._list_and_dict_variable_index_chars[-1] == ']' 1ajklghfidebc
817 def _split(self, string):
818 start_index, max_index = self._find_variable(string) 1ajpqrstumnovkwxyzlABghfideCbc
819 self.start = start_index 1ajklghfidebc
820 self._open_curly = 1 1ajklghfidebc
821 self._state = self._variable_state 1ajklghfidebc
822 self._variable_chars = [string[start_index], '{'] 1ajklghfidebc
823 self._list_and_dict_variable_index_chars = [] 1ajklghfidebc
824 self._string = string 1ajklghfidebc
825 start_index += 2 1ajklghfidebc
826 for index, char in enumerate(string[start_index:]): 1ajklghfidebc
827 index += start_index # Giving start to enumerate only in Py 2.6+ 1ajklghfidebc
828 try: 1ajklghfidebc
829 self._state(char, index) 1ajklghfidebc
830 except StopIteration: 1ajklghfidebc
831 return 1ajklghfidebc
832 if index == max_index and not self._scanning_list_variable_index(): 832 ↛ 833line 832 didn't jump to line 833 because the condition on line 832 was never true1ajklghfidebc
833 return
835 def _scanning_list_variable_index(self):
836 return self._state in [self._waiting_list_variable_index_state, 1ajkghfidebc
837 self._list_variable_index_state]
839 def _find_variable(self, string):
840 max_end_index = string.rfind('}') 1ajpqrstumnovkwxyzlABghfideCbc
841 if max_end_index == -1: 1ajpqrstumnovkwxyzlABghfideCbc
842 raise ValueError('No variable end found') 1ajpqrstumnovkwxyzlABghfideCbc
843 if self._is_escaped(string, max_end_index): 843 ↛ 844line 843 didn't jump to line 844 because the condition on line 843 was never true1ajklghfidebc
844 return self._find_variable(string[:max_end_index])
845 start_index = self._find_start_index(string, 1, max_end_index) 1ajklghfidebc
846 if start_index == -1: 846 ↛ 847line 846 didn't jump to line 847 because the condition on line 846 was never true1ajklghfidebc
847 raise ValueError('No variable start found')
848 return start_index, max_end_index 1ajklghfidebc
850 def _find_start_index(self, string, start, end):
851 index = string.find('{', start, end) - 1 1ajklghfidebc
852 if index < 0: 852 ↛ 853line 852 didn't jump to line 853 because the condition on line 852 was never true1ajklghfidebc
853 return -1
854 if self._start_index_is_ok(string, index): 854 ↛ 856line 854 didn't jump to line 856 because the condition on line 854 was always true1ajklghfidebc
855 return index 1ajklghfidebc
856 return self._find_start_index(string, index+2, end)
858 def _start_index_is_ok(self, string, index):
859 return string[index] in self._identifiers and not self._is_escaped(string, index) 1ajklghfidebc
861 @staticmethod
862 def _is_escaped(string, index):
863 escaped = False 1ajklghfidebc
864 while index > 0 and string[index-1] == '\\': 864 ↛ 865line 864 didn't jump to line 865 because the condition on line 864 was never true1ajklghfidebc
865 index -= 1
866 escaped = not escaped
867 return escaped 1ajklghfidebc
869 def _variable_state(self, char, index):
870 self._variable_chars.append(char) 1ajklghfidebc
871 if char == '}' and not self._is_escaped(self._string, index): 1ajklghfidebc
872 self._open_curly -= 1 1ajklghfidebc
873 if self._open_curly == 0: 873 ↛ exitline 873 didn't return from function '_variable_state' because the condition on line 873 was always true1ajklghfidebc
874 if not self._is_list_or_dict_variable(): 1ajklghfidebc
875 raise StopIteration 1ajklghfidebc
876 self._state = self._waiting_list_variable_index_state 1ajkghfidebc
877 elif char in self._identifiers: 877 ↛ 878line 877 didn't jump to line 878 because the condition on line 877 was never true1ajklghfidebc
878 self._state = self._internal_variable_start_state
880 def _is_list_or_dict_variable(self):
881 return self._variable_chars[0] in ('@', '&') 1ajklghfidebc
883 def _internal_variable_start_state(self, char, index):
884 self._state = self._variable_state
885 if char == '{':
886 self._variable_chars.append(char)
887 self._open_curly += 1
888 self._may_have_internal_variables = True
889 else:
890 self._variable_state(char, index)
892 def _waiting_list_variable_index_state(self, char, index):
893 if char != '[': 893 ↛ 895line 893 didn't jump to line 895 because the condition on line 893 was always true1bc
894 raise StopIteration 1bc
895 self._list_and_dict_variable_index_chars.append(char)
896 self._state = self._list_variable_index_state
898 def _list_variable_index_state(self, char, index):
899 self._list_and_dict_variable_index_chars.append(char)
900 if char == ']':
901 raise StopIteration