]>
Commit | Line | Data |
---|---|---|
53e6db90 DC |
1 | """ |
2 | Functions inferring the syntax tree. | |
3 | """ | |
4 | import copy | |
5 | import itertools | |
6 | ||
7 | from parso.python import tree | |
8 | ||
9 | from jedi import debug | |
10 | from jedi import parser_utils | |
11 | from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \ | |
12 | iterator_to_value_set, iterate_values | |
13 | from jedi.inference.lazy_value import LazyTreeValue | |
14 | from jedi.inference import compiled | |
15 | from jedi.inference import recursion | |
16 | from jedi.inference import analysis | |
17 | from jedi.inference import imports | |
18 | from jedi.inference import arguments | |
19 | from jedi.inference.value import ClassValue, FunctionValue | |
20 | from jedi.inference.value import iterable | |
21 | from jedi.inference.value.dynamic_arrays import ListModification, DictModification | |
22 | from jedi.inference.value import TreeInstance | |
23 | from jedi.inference.helpers import is_string, is_literal, is_number, \ | |
24 | get_names_of_node, is_big_annoying_library | |
25 | from jedi.inference.compiled.access import COMPARISON_OPERATORS | |
26 | from jedi.inference.cache import inference_state_method_cache | |
27 | from jedi.inference.gradual.stub_value import VersionInfo | |
28 | from jedi.inference.gradual import annotation | |
29 | from jedi.inference.names import TreeNameDefinition | |
30 | from jedi.inference.context import CompForContext | |
31 | from jedi.inference.value.decorator import Decoratee | |
32 | from jedi.plugins import plugin_manager | |
33 | ||
34 | operator_to_magic_method = { | |
35 | '+': '__add__', | |
36 | '-': '__sub__', | |
37 | '*': '__mul__', | |
38 | '@': '__matmul__', | |
39 | '/': '__truediv__', | |
40 | '//': '__floordiv__', | |
41 | '%': '__mod__', | |
42 | '**': '__pow__', | |
43 | '<<': '__lshift__', | |
44 | '>>': '__rshift__', | |
45 | '&': '__and__', | |
46 | '|': '__or__', | |
47 | '^': '__xor__', | |
48 | } | |
49 | ||
50 | reverse_operator_to_magic_method = { | |
51 | k: '__r' + v[2:] for k, v in operator_to_magic_method.items() | |
52 | } | |
53 | ||
54 | ||
55 | def _limit_value_infers(func): | |
56 | """ | |
57 | This is for now the way how we limit type inference going wild. There are | |
58 | other ways to ensure recursion limits as well. This is mostly necessary | |
59 | because of instance (self) access that can be quite tricky to limit. | |
60 | ||
61 | I'm still not sure this is the way to go, but it looks okay for now and we | |
62 | can still go anther way in the future. Tests are there. ~ dave | |
63 | """ | |
64 | def wrapper(context, *args, **kwargs): | |
65 | n = context.tree_node | |
66 | inference_state = context.inference_state | |
67 | try: | |
68 | inference_state.inferred_element_counts[n] += 1 | |
69 | maximum = 300 | |
70 | if context.parent_context is None \ | |
71 | and context.get_value() is inference_state.builtins_module: | |
72 | # Builtins should have a more generous inference limit. | |
73 | # It is important that builtins can be executed, otherwise some | |
74 | # functions that depend on certain builtins features would be | |
75 | # broken, see e.g. GH #1432 | |
76 | maximum *= 100 | |
77 | ||
78 | if inference_state.inferred_element_counts[n] > maximum: | |
79 | debug.warning('In value %s there were too many inferences.', n) | |
80 | return NO_VALUES | |
81 | except KeyError: | |
82 | inference_state.inferred_element_counts[n] = 1 | |
83 | return func(context, *args, **kwargs) | |
84 | ||
85 | return wrapper | |
86 | ||
87 | ||
88 | def infer_node(context, element): | |
89 | if isinstance(context, CompForContext): | |
90 | return _infer_node(context, element) | |
91 | ||
92 | if_stmt = element | |
93 | while if_stmt is not None: | |
94 | if_stmt = if_stmt.parent | |
95 | if if_stmt.type in ('if_stmt', 'for_stmt'): | |
96 | break | |
97 | if parser_utils.is_scope(if_stmt): | |
98 | if_stmt = None | |
99 | break | |
100 | predefined_if_name_dict = context.predefined_names.get(if_stmt) | |
101 | # TODO there's a lot of issues with this one. We actually should do | |
102 | # this in a different way. Caching should only be active in certain | |
103 | # cases and this all sucks. | |
104 | if predefined_if_name_dict is None and if_stmt \ | |
105 | and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis: | |
106 | if_stmt_test = if_stmt.children[1] | |
107 | name_dicts = [{}] | |
108 | # If we already did a check, we don't want to do it again -> If | |
109 | # value.predefined_names is filled, we stop. | |
110 | # We don't want to check the if stmt itself, it's just about | |
111 | # the content. | |
112 | if element.start_pos > if_stmt_test.end_pos: | |
113 | # Now we need to check if the names in the if_stmt match the | |
114 | # names in the suite. | |
115 | if_names = get_names_of_node(if_stmt_test) | |
116 | element_names = get_names_of_node(element) | |
117 | str_element_names = [e.value for e in element_names] | |
118 | if any(i.value in str_element_names for i in if_names): | |
119 | for if_name in if_names: | |
120 | definitions = context.inference_state.infer(context, if_name) | |
121 | # Every name that has multiple different definitions | |
122 | # causes the complexity to rise. The complexity should | |
123 | # never fall below 1. | |
124 | if len(definitions) > 1: | |
125 | if len(name_dicts) * len(definitions) > 16: | |
126 | debug.dbg('Too many options for if branch inference %s.', if_stmt) | |
127 | # There's only a certain amount of branches | |
128 | # Jedi can infer, otherwise it will take to | |
129 | # long. | |
130 | name_dicts = [{}] | |
131 | break | |
132 | ||
133 | original_name_dicts = list(name_dicts) | |
134 | name_dicts = [] | |
135 | for definition in definitions: | |
136 | new_name_dicts = list(original_name_dicts) | |
137 | for i, name_dict in enumerate(new_name_dicts): | |
138 | new_name_dicts[i] = name_dict.copy() | |
139 | new_name_dicts[i][if_name.value] = ValueSet([definition]) | |
140 | ||
141 | name_dicts += new_name_dicts | |
142 | else: | |
143 | for name_dict in name_dicts: | |
144 | name_dict[if_name.value] = definitions | |
145 | if len(name_dicts) > 1: | |
146 | result = NO_VALUES | |
147 | for name_dict in name_dicts: | |
148 | with context.predefine_names(if_stmt, name_dict): | |
149 | result |= _infer_node(context, element) | |
150 | return result | |
151 | else: | |
152 | return _infer_node_if_inferred(context, element) | |
153 | else: | |
154 | if predefined_if_name_dict: | |
155 | return _infer_node(context, element) | |
156 | else: | |
157 | return _infer_node_if_inferred(context, element) | |
158 | ||
159 | ||
160 | def _infer_node_if_inferred(context, element): | |
161 | """ | |
162 | TODO This function is temporary: Merge with infer_node. | |
163 | """ | |
164 | parent = element | |
165 | while parent is not None: | |
166 | parent = parent.parent | |
167 | predefined_if_name_dict = context.predefined_names.get(parent) | |
168 | if predefined_if_name_dict is not None: | |
169 | return _infer_node(context, element) | |
170 | return _infer_node_cached(context, element) | |
171 | ||
172 | ||
173 | @inference_state_method_cache(default=NO_VALUES) | |
174 | def _infer_node_cached(context, element): | |
175 | return _infer_node(context, element) | |
176 | ||
177 | ||
178 | @debug.increase_indent | |
179 | @_limit_value_infers | |
180 | def _infer_node(context, element): | |
181 | debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context) | |
182 | inference_state = context.inference_state | |
183 | typ = element.type | |
184 | if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'): | |
185 | return infer_atom(context, element) | |
186 | elif typ == 'lambdef': | |
187 | return ValueSet([FunctionValue.from_context(context, element)]) | |
188 | elif typ == 'expr_stmt': | |
189 | return infer_expr_stmt(context, element) | |
190 | elif typ in ('power', 'atom_expr'): | |
191 | first_child = element.children[0] | |
192 | children = element.children[1:] | |
193 | had_await = False | |
194 | if first_child.type == 'keyword' and first_child.value == 'await': | |
195 | had_await = True | |
196 | first_child = children.pop(0) | |
197 | ||
198 | value_set = context.infer_node(first_child) | |
199 | for (i, trailer) in enumerate(children): | |
200 | if trailer == '**': # has a power operation. | |
201 | right = context.infer_node(children[i + 1]) | |
202 | value_set = _infer_comparison( | |
203 | context, | |
204 | value_set, | |
205 | trailer, | |
206 | right | |
207 | ) | |
208 | break | |
209 | value_set = infer_trailer(context, value_set, trailer) | |
210 | ||
211 | if had_await: | |
212 | return value_set.py__await__().py__stop_iteration_returns() | |
213 | return value_set | |
214 | elif typ in ('testlist_star_expr', 'testlist',): | |
215 | # The implicit tuple in statements. | |
216 | return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)]) | |
217 | elif typ in ('not_test', 'factor'): | |
218 | value_set = context.infer_node(element.children[-1]) | |
219 | for operator in element.children[:-1]: | |
220 | value_set = infer_factor(value_set, operator) | |
221 | return value_set | |
222 | elif typ == 'test': | |
223 | # `x if foo else y` case. | |
224 | return (context.infer_node(element.children[0]) | |
225 | | context.infer_node(element.children[-1])) | |
226 | elif typ == 'operator': | |
227 | # Must be an ellipsis, other operators are not inferred. | |
228 | if element.value != '...': | |
229 | origin = element.parent | |
230 | raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) | |
231 | return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')]) | |
232 | elif typ == 'dotted_name': | |
233 | value_set = infer_atom(context, element.children[0]) | |
234 | for next_name in element.children[2::2]: | |
235 | value_set = value_set.py__getattribute__(next_name, name_context=context) | |
236 | return value_set | |
237 | elif typ == 'eval_input': | |
238 | return context.infer_node(element.children[0]) | |
239 | elif typ == 'annassign': | |
240 | return annotation.infer_annotation(context, element.children[1]) \ | |
241 | .execute_annotation() | |
242 | elif typ == 'yield_expr': | |
243 | if len(element.children) and element.children[1].type == 'yield_arg': | |
244 | # Implies that it's a yield from. | |
245 | element = element.children[1].children[1] | |
246 | generators = context.infer_node(element) \ | |
247 | .py__getattribute__('__iter__').execute_with_values() | |
248 | return generators.py__stop_iteration_returns() | |
249 | ||
250 | # Generator.send() is not implemented. | |
251 | return NO_VALUES | |
252 | elif typ == 'namedexpr_test': | |
253 | return context.infer_node(element.children[2]) | |
254 | else: | |
255 | return infer_or_test(context, element) | |
256 | ||
257 | ||
258 | def infer_trailer(context, atom_values, trailer): | |
259 | trailer_op, node = trailer.children[:2] | |
260 | if node == ')': # `arglist` is optional. | |
261 | node = None | |
262 | ||
263 | if trailer_op == '[': | |
264 | trailer_op, node, _ = trailer.children | |
265 | return atom_values.get_item( | |
266 | _infer_subscript_list(context, node), | |
267 | ContextualizedNode(context, trailer) | |
268 | ) | |
269 | else: | |
270 | debug.dbg('infer_trailer: %s in %s', trailer, atom_values) | |
271 | if trailer_op == '.': | |
272 | return atom_values.py__getattribute__( | |
273 | name_context=context, | |
274 | name_or_str=node | |
275 | ) | |
276 | else: | |
277 | assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op | |
278 | args = arguments.TreeArguments(context.inference_state, context, node, trailer) | |
279 | return atom_values.execute(args) | |
280 | ||
281 | ||
282 | def infer_atom(context, atom): | |
283 | """ | |
284 | Basically to process ``atom`` nodes. The parser sometimes doesn't | |
285 | generate the node (because it has just one child). In that case an atom | |
286 | might be a name or a literal as well. | |
287 | """ | |
288 | state = context.inference_state | |
289 | if atom.type == 'name': | |
290 | # This is the first global lookup. | |
291 | stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom | |
292 | if stmt.type == 'if_stmt': | |
293 | if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()): | |
294 | stmt = atom | |
295 | elif stmt.type == 'lambdef': | |
296 | stmt = atom | |
297 | position = stmt.start_pos | |
298 | if _is_annotation_name(atom): | |
299 | # Since Python 3.7 (with from __future__ import annotations), | |
300 | # annotations are essentially strings and can reference objects | |
301 | # that are defined further down in code. Therefore just set the | |
302 | # position to None, so the finder will not try to stop at a certain | |
303 | # position in the module. | |
304 | position = None | |
305 | return context.py__getattribute__(atom, position=position) | |
306 | elif atom.type == 'keyword': | |
307 | # For False/True/None | |
308 | if atom.value in ('False', 'True', 'None'): | |
309 | return ValueSet([compiled.builtin_from_name(state, atom.value)]) | |
310 | elif atom.value == 'yield': | |
311 | # Contrary to yield from, yield can just appear alone to return a | |
312 | # value when used with `.send()`. | |
313 | return NO_VALUES | |
314 | assert False, 'Cannot infer the keyword %s' % atom | |
315 | ||
316 | elif isinstance(atom, tree.Literal): | |
317 | string = state.compiled_subprocess.safe_literal_eval(atom.value) | |
318 | return ValueSet([compiled.create_simple_object(state, string)]) | |
319 | elif atom.type == 'strings': | |
320 | # Will be multiple string. | |
321 | value_set = infer_atom(context, atom.children[0]) | |
322 | for string in atom.children[1:]: | |
323 | right = infer_atom(context, string) | |
324 | value_set = _infer_comparison(context, value_set, '+', right) | |
325 | return value_set | |
326 | elif atom.type == 'fstring': | |
327 | return compiled.get_string_value_set(state) | |
328 | else: | |
329 | c = atom.children | |
330 | # Parentheses without commas are not tuples. | |
331 | if c[0] == '(' and not len(c) == 2 \ | |
332 | and not (c[1].type == 'testlist_comp' | |
333 | and len(c[1].children) > 1): | |
334 | return context.infer_node(c[1]) | |
335 | ||
336 | try: | |
337 | comp_for = c[1].children[1] | |
338 | except (IndexError, AttributeError): | |
339 | pass | |
340 | else: | |
341 | if comp_for == ':': | |
342 | # Dict comprehensions have a colon at the 3rd index. | |
343 | try: | |
344 | comp_for = c[1].children[3] | |
345 | except IndexError: | |
346 | pass | |
347 | ||
348 | if comp_for.type in ('comp_for', 'sync_comp_for'): | |
349 | return ValueSet([iterable.comprehension_from_atom( | |
350 | state, context, atom | |
351 | )]) | |
352 | ||
353 | # It's a dict/list/tuple literal. | |
354 | array_node = c[1] | |
355 | try: | |
356 | array_node_c = array_node.children | |
357 | except AttributeError: | |
358 | array_node_c = [] | |
359 | if c[0] == '{' and (array_node == '}' or ':' in array_node_c | |
360 | or '**' in array_node_c): | |
361 | new_value = iterable.DictLiteralValue(state, context, atom) | |
362 | else: | |
363 | new_value = iterable.SequenceLiteralValue(state, context, atom) | |
364 | return ValueSet([new_value]) | |
365 | ||
366 | ||
367 | @_limit_value_infers | |
368 | def infer_expr_stmt(context, stmt, seek_name=None): | |
369 | with recursion.execution_allowed(context.inference_state, stmt) as allowed: | |
370 | if allowed: | |
371 | if seek_name is not None: | |
372 | pep0484_values = \ | |
373 | annotation.find_type_from_comment_hint_assign(context, stmt, seek_name) | |
374 | if pep0484_values: | |
375 | return pep0484_values | |
376 | ||
377 | return _infer_expr_stmt(context, stmt, seek_name) | |
378 | return NO_VALUES | |
379 | ||
380 | ||
381 | @debug.increase_indent | |
382 | def _infer_expr_stmt(context, stmt, seek_name=None): | |
383 | """ | |
384 | The starting point of the completion. A statement always owns a call | |
385 | list, which are the calls, that a statement does. In case multiple | |
386 | names are defined in the statement, `seek_name` returns the result for | |
387 | this name. | |
388 | ||
389 | expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | | |
390 | ('=' (yield_expr|testlist_star_expr))*) | |
391 | annassign: ':' test ['=' test] | |
392 | augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | | |
393 | '<<=' | '>>=' | '**=' | '//=') | |
394 | ||
395 | :param stmt: A `tree.ExprStmt`. | |
396 | """ | |
397 | def check_setitem(stmt): | |
398 | atom_expr = stmt.children[0] | |
399 | if atom_expr.type not in ('atom_expr', 'power'): | |
400 | return False, None | |
401 | name = atom_expr.children[0] | |
402 | if name.type != 'name' or len(atom_expr.children) != 2: | |
403 | return False, None | |
404 | trailer = atom_expr.children[-1] | |
405 | return trailer.children[0] == '[', trailer.children[1] | |
406 | ||
407 | debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name) | |
408 | rhs = stmt.get_rhs() | |
409 | ||
410 | value_set = context.infer_node(rhs) | |
411 | ||
412 | if seek_name: | |
413 | n = TreeNameDefinition(context, seek_name) | |
414 | value_set = check_tuple_assignments(n, value_set) | |
415 | ||
416 | first_operator = next(stmt.yield_operators(), None) | |
417 | is_setitem, subscriptlist = check_setitem(stmt) | |
418 | is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator' | |
419 | if is_annassign or is_setitem: | |
420 | # `=` is always the last character in aug assignments -> -1 | |
421 | name = stmt.get_defined_names(include_setitem=True)[0].value | |
422 | left_values = context.py__getattribute__(name, position=stmt.start_pos) | |
423 | ||
424 | if is_setitem: | |
425 | def to_mod(v): | |
426 | c = ContextualizedSubscriptListNode(context, subscriptlist) | |
427 | if v.array_type == 'dict': | |
428 | return DictModification(v, value_set, c) | |
429 | elif v.array_type == 'list': | |
430 | return ListModification(v, value_set, c) | |
431 | return v | |
432 | ||
433 | value_set = ValueSet(to_mod(v) for v in left_values) | |
434 | else: | |
435 | operator = copy.copy(first_operator) | |
436 | operator.value = operator.value[:-1] | |
437 | for_stmt = tree.search_ancestor(stmt, 'for_stmt') | |
438 | if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \ | |
439 | and parser_utils.for_stmt_defines_one_name(for_stmt): | |
440 | # Iterate through result and add the values, that's possible | |
441 | # only in for loops without clutter, because they are | |
442 | # predictable. Also only do it, if the variable is not a tuple. | |
443 | node = for_stmt.get_testlist() | |
444 | cn = ContextualizedNode(context, node) | |
445 | ordered = list(cn.infer().iterate(cn)) | |
446 | ||
447 | for lazy_value in ordered: | |
448 | dct = {for_stmt.children[1].value: lazy_value.infer()} | |
449 | with context.predefine_names(for_stmt, dct): | |
450 | t = context.infer_node(rhs) | |
451 | left_values = _infer_comparison(context, left_values, operator, t) | |
452 | value_set = left_values | |
453 | else: | |
454 | value_set = _infer_comparison(context, left_values, operator, value_set) | |
455 | debug.dbg('infer_expr_stmt result %s', value_set) | |
456 | return value_set | |
457 | ||
458 | ||
459 | def infer_or_test(context, or_test): | |
460 | iterator = iter(or_test.children) | |
461 | types = context.infer_node(next(iterator)) | |
462 | for operator in iterator: | |
463 | right = next(iterator) | |
464 | if operator.type == 'comp_op': # not in / is not | |
465 | operator = ' '.join(c.value for c in operator.children) | |
466 | ||
467 | # handle type inference of and/or here. | |
468 | if operator in ('and', 'or'): | |
469 | left_bools = set(left.py__bool__() for left in types) | |
470 | if left_bools == {True}: | |
471 | if operator == 'and': | |
472 | types = context.infer_node(right) | |
473 | elif left_bools == {False}: | |
474 | if operator != 'and': | |
475 | types = context.infer_node(right) | |
476 | # Otherwise continue, because of uncertainty. | |
477 | else: | |
478 | types = _infer_comparison(context, types, operator, | |
479 | context.infer_node(right)) | |
480 | debug.dbg('infer_or_test types %s', types) | |
481 | return types | |
482 | ||
483 | ||
484 | @iterator_to_value_set | |
485 | def infer_factor(value_set, operator): | |
486 | """ | |
487 | Calculates `+`, `-`, `~` and `not` prefixes. | |
488 | """ | |
489 | for value in value_set: | |
490 | if operator == '-': | |
491 | if is_number(value): | |
492 | yield value.negate() | |
493 | elif operator == 'not': | |
494 | b = value.py__bool__() | |
495 | if b is None: # Uncertainty. | |
496 | return | |
497 | yield compiled.create_simple_object(value.inference_state, not b) | |
498 | else: | |
499 | yield value | |
500 | ||
501 | ||
502 | def _literals_to_types(inference_state, result): | |
503 | # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), | |
504 | # int(), float(), etc). | |
505 | new_result = NO_VALUES | |
506 | for typ in result: | |
507 | if is_literal(typ): | |
508 | # Literals are only valid as long as the operations are | |
509 | # correct. Otherwise add a value-free instance. | |
510 | cls = compiled.builtin_from_name(inference_state, typ.name.string_name) | |
511 | new_result |= cls.execute_with_values() | |
512 | else: | |
513 | new_result |= ValueSet([typ]) | |
514 | return new_result | |
515 | ||
516 | ||
517 | def _infer_comparison(context, left_values, operator, right_values): | |
518 | state = context.inference_state | |
519 | if isinstance(operator, str): | |
520 | operator_str = operator | |
521 | else: | |
522 | operator_str = str(operator.value) | |
523 | if not left_values or not right_values: | |
524 | # illegal slices e.g. cause left/right_result to be None | |
525 | result = (left_values or NO_VALUES) | (right_values or NO_VALUES) | |
526 | return _literals_to_types(state, result) | |
527 | elif operator_str == "|" and all( | |
528 | value.is_class() or value.is_compiled() | |
529 | for value in itertools.chain(left_values, right_values) | |
530 | ): | |
531 | # ^^^ A naive hack for PEP 604 | |
532 | return ValueSet.from_sets((left_values, right_values)) | |
533 | else: | |
534 | # I don't think there's a reasonable chance that a string | |
535 | # operation is still correct, once we pass something like six | |
536 | # objects. | |
537 | if len(left_values) * len(right_values) > 6: | |
538 | return _literals_to_types(state, left_values | right_values) | |
539 | else: | |
540 | return ValueSet.from_sets( | |
541 | _infer_comparison_part(state, context, left, operator, right) | |
542 | for left in left_values | |
543 | for right in right_values | |
544 | ) | |
545 | ||
546 | ||
547 | def _is_annotation_name(name): | |
548 | ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt') | |
549 | if ancestor is None: | |
550 | return False | |
551 | ||
552 | if ancestor.type in ('param', 'funcdef'): | |
553 | ann = ancestor.annotation | |
554 | if ann is not None: | |
555 | return ann.start_pos <= name.start_pos < ann.end_pos | |
556 | elif ancestor.type == 'expr_stmt': | |
557 | c = ancestor.children | |
558 | if len(c) > 1 and c[1].type == 'annassign': | |
559 | return c[1].start_pos <= name.start_pos < c[1].end_pos | |
560 | return False | |
561 | ||
562 | ||
563 | def _is_list(value): | |
564 | return value.array_type == 'list' | |
565 | ||
566 | ||
567 | def _is_tuple(value): | |
568 | return value.array_type == 'tuple' | |
569 | ||
570 | ||
571 | def _bool_to_value(inference_state, bool_): | |
572 | return compiled.builtin_from_name(inference_state, str(bool_)) | |
573 | ||
574 | ||
575 | def _get_tuple_ints(value): | |
576 | if not isinstance(value, iterable.SequenceLiteralValue): | |
577 | return None | |
578 | numbers = [] | |
579 | for lazy_value in value.py__iter__(): | |
580 | if not isinstance(lazy_value, LazyTreeValue): | |
581 | return None | |
582 | node = lazy_value.data | |
583 | if node.type != 'number': | |
584 | return None | |
585 | try: | |
586 | numbers.append(int(node.value)) | |
587 | except ValueError: | |
588 | return None | |
589 | return numbers | |
590 | ||
591 | ||
592 | def _infer_comparison_part(inference_state, context, left, operator, right): | |
593 | l_is_num = is_number(left) | |
594 | r_is_num = is_number(right) | |
595 | if isinstance(operator, str): | |
596 | str_operator = operator | |
597 | else: | |
598 | str_operator = str(operator.value) | |
599 | ||
600 | if str_operator == '*': | |
601 | # for iterables, ignore * operations | |
602 | if isinstance(left, iterable.Sequence) or is_string(left): | |
603 | return ValueSet([left]) | |
604 | elif isinstance(right, iterable.Sequence) or is_string(right): | |
605 | return ValueSet([right]) | |
606 | elif str_operator == '+': | |
607 | if l_is_num and r_is_num or is_string(left) and is_string(right): | |
608 | return left.execute_operation(right, str_operator) | |
609 | elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right): | |
610 | return ValueSet([iterable.MergedArray(inference_state, (left, right))]) | |
611 | elif str_operator == '-': | |
612 | if l_is_num and r_is_num: | |
613 | return left.execute_operation(right, str_operator) | |
614 | elif str_operator == '%': | |
615 | # With strings and numbers the left type typically remains. Except for | |
616 | # `int() % float()`. | |
617 | return ValueSet([left]) | |
618 | elif str_operator in COMPARISON_OPERATORS: | |
619 | if left.is_compiled() and right.is_compiled(): | |
620 | # Possible, because the return is not an option. Just compare. | |
621 | result = left.execute_operation(right, str_operator) | |
622 | if result: | |
623 | return result | |
624 | else: | |
625 | if str_operator in ('is', '!=', '==', 'is not'): | |
626 | operation = COMPARISON_OPERATORS[str_operator] | |
627 | bool_ = operation(left, right) | |
628 | # Only if == returns True or != returns False, we can continue. | |
629 | # There's no guarantee that they are not equal. This can help | |
630 | # in some cases, but does not cover everything. | |
631 | if (str_operator in ('is', '==')) == bool_: | |
632 | return ValueSet([_bool_to_value(inference_state, bool_)]) | |
633 | ||
634 | if isinstance(left, VersionInfo): | |
635 | version_info = _get_tuple_ints(right) | |
636 | if version_info is not None: | |
637 | bool_result = compiled.access.COMPARISON_OPERATORS[operator]( | |
638 | inference_state.environment.version_info, | |
639 | tuple(version_info) | |
640 | ) | |
641 | return ValueSet([_bool_to_value(inference_state, bool_result)]) | |
642 | ||
643 | return ValueSet([ | |
644 | _bool_to_value(inference_state, True), | |
645 | _bool_to_value(inference_state, False) | |
646 | ]) | |
647 | elif str_operator in ('in', 'not in'): | |
648 | return NO_VALUES | |
649 | ||
650 | def check(obj): | |
651 | """Checks if a Jedi object is either a float or an int.""" | |
652 | return isinstance(obj, TreeInstance) and \ | |
653 | obj.name.string_name in ('int', 'float') | |
654 | ||
655 | # Static analysis, one is a number, the other one is not. | |
656 | if str_operator in ('+', '-') and l_is_num != r_is_num \ | |
657 | and not (check(left) or check(right)): | |
658 | message = "TypeError: unsupported operand type(s) for +: %s and %s" | |
659 | analysis.add(context, 'type-error-operation', operator, | |
660 | message % (left, right)) | |
661 | ||
662 | if left.is_class() or right.is_class(): | |
663 | return NO_VALUES | |
664 | ||
665 | method_name = operator_to_magic_method[str_operator] | |
666 | magic_methods = left.py__getattribute__(method_name) | |
667 | if magic_methods: | |
668 | result = magic_methods.execute_with_values(right) | |
669 | if result: | |
670 | return result | |
671 | ||
672 | if not magic_methods: | |
673 | reverse_method_name = reverse_operator_to_magic_method[str_operator] | |
674 | magic_methods = right.py__getattribute__(reverse_method_name) | |
675 | ||
676 | result = magic_methods.execute_with_values(left) | |
677 | if result: | |
678 | return result | |
679 | ||
680 | result = ValueSet([left, right]) | |
681 | debug.dbg('Used operator %s resulting in %s', operator, result) | |
682 | return result | |
683 | ||
684 | ||
685 | @plugin_manager.decorate() | |
686 | def tree_name_to_values(inference_state, context, tree_name): | |
687 | value_set = NO_VALUES | |
688 | module_node = context.get_root_context().tree_node | |
689 | # First check for annotations, like: `foo: int = 3` | |
690 | if module_node is not None: | |
691 | names = module_node.get_used_names().get(tree_name.value, []) | |
692 | found_annotation = False | |
693 | for name in names: | |
694 | expr_stmt = name.parent | |
695 | ||
696 | if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": | |
697 | correct_scope = parser_utils.get_parent_scope(name) == context.tree_node | |
698 | if correct_scope: | |
699 | found_annotation = True | |
700 | value_set |= annotation.infer_annotation( | |
701 | context, expr_stmt.children[1].children[1] | |
702 | ).execute_annotation() | |
703 | if found_annotation: | |
704 | return value_set | |
705 | ||
706 | types = [] | |
707 | node = tree_name.get_definition(import_name_always=True, include_setitem=True) | |
708 | if node is None: | |
709 | node = tree_name.parent | |
710 | if node.type == 'global_stmt': | |
711 | c = context.create_context(tree_name) | |
712 | if c.is_module(): | |
713 | # In case we are already part of the module, there is no point | |
714 | # in looking up the global statement anymore, because it's not | |
715 | # valid at that point anyway. | |
716 | return NO_VALUES | |
717 | # For global_stmt lookups, we only need the first possible scope, | |
718 | # which means the function itself. | |
719 | filter = next(c.get_filters()) | |
720 | names = filter.get(tree_name.value) | |
721 | return ValueSet.from_sets(name.infer() for name in names) | |
722 | elif node.type not in ('import_from', 'import_name'): | |
723 | c = context.create_context(tree_name) | |
724 | return infer_atom(c, tree_name) | |
725 | ||
726 | typ = node.type | |
727 | if typ == 'for_stmt': | |
728 | types = annotation.find_type_from_comment_hint_for(context, node, tree_name) | |
729 | if types: | |
730 | return types | |
731 | if typ == 'with_stmt': | |
732 | types = annotation.find_type_from_comment_hint_with(context, node, tree_name) | |
733 | if types: | |
734 | return types | |
735 | ||
736 | if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): | |
737 | try: | |
738 | types = context.predefined_names[node][tree_name.value] | |
739 | except KeyError: | |
740 | cn = ContextualizedNode(context, node.children[3]) | |
741 | for_types = iterate_values( | |
742 | cn.infer(), | |
743 | contextualized_node=cn, | |
744 | is_async=node.parent.type == 'async_stmt', | |
745 | ) | |
746 | n = TreeNameDefinition(context, tree_name) | |
747 | types = check_tuple_assignments(n, for_types) | |
748 | elif typ == 'expr_stmt': | |
749 | types = infer_expr_stmt(context, node, tree_name) | |
750 | elif typ == 'with_stmt': | |
751 | value_managers = context.infer_node(node.get_test_node_from_name(tree_name)) | |
752 | if node.parent.type == 'async_stmt': | |
753 | # In the case of `async with` statements, we need to | |
754 | # first get the coroutine from the `__aenter__` method, | |
755 | # then "unwrap" via the `__await__` method | |
756 | enter_methods = value_managers.py__getattribute__('__aenter__') | |
757 | coro = enter_methods.execute_with_values() | |
758 | return coro.py__await__().py__stop_iteration_returns() | |
759 | enter_methods = value_managers.py__getattribute__('__enter__') | |
760 | return enter_methods.execute_with_values() | |
761 | elif typ in ('import_from', 'import_name'): | |
762 | types = imports.infer_import(context, tree_name) | |
763 | elif typ in ('funcdef', 'classdef'): | |
764 | types = _apply_decorators(context, node) | |
765 | elif typ == 'try_stmt': | |
766 | # TODO an exception can also be a tuple. Check for those. | |
767 | # TODO check for types that are not classes and add it to | |
768 | # the static analysis report. | |
769 | exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling()) | |
770 | types = exceptions.execute_with_values() | |
771 | elif typ == 'param': | |
772 | types = NO_VALUES | |
773 | elif typ == 'del_stmt': | |
774 | types = NO_VALUES | |
775 | elif typ == 'namedexpr_test': | |
776 | types = infer_node(context, node) | |
777 | else: | |
778 | raise ValueError("Should not happen. type: %s" % typ) | |
779 | return types | |
780 | ||
781 | ||
782 | # We don't want to have functions/classes that are created by the same | |
783 | # tree_node. | |
784 | @inference_state_method_cache() | |
785 | def _apply_decorators(context, node): | |
786 | """ | |
787 | Returns the function, that should to be executed in the end. | |
788 | This is also the places where the decorators are processed. | |
789 | """ | |
790 | if node.type == 'classdef': | |
791 | decoratee_value = ClassValue( | |
792 | context.inference_state, | |
793 | parent_context=context, | |
794 | tree_node=node | |
795 | ) | |
796 | else: | |
797 | decoratee_value = FunctionValue.from_context(context, node) | |
798 | initial = values = ValueSet([decoratee_value]) | |
799 | ||
800 | if is_big_annoying_library(context): | |
801 | return values | |
802 | ||
803 | for dec in reversed(node.get_decorators()): | |
804 | debug.dbg('decorator: %s %s', dec, values, color="MAGENTA") | |
805 | with debug.increase_indent_cm(): | |
806 | dec_values = context.infer_node(dec.children[1]) | |
807 | trailer_nodes = dec.children[2:-1] | |
808 | if trailer_nodes: | |
809 | # Create a trailer and infer it. | |
810 | trailer = tree.PythonNode('trailer', trailer_nodes) | |
811 | trailer.parent = dec | |
812 | dec_values = infer_trailer(context, dec_values, trailer) | |
813 | ||
814 | if not len(dec_values): | |
815 | code = dec.get_code(include_prefix=False) | |
816 | # For the short future, we don't want to hear about the runtime | |
817 | # decorator in typing that was intentionally omitted. This is not | |
818 | # "correct", but helps with debugging. | |
819 | if code != '@runtime\n': | |
820 | debug.warning('decorator not found: %s on %s', dec, node) | |
821 | return initial | |
822 | ||
823 | values = dec_values.execute(arguments.ValuesArguments([values])) | |
824 | if not len(values): | |
825 | debug.warning('not possible to resolve wrappers found %s', node) | |
826 | return initial | |
827 | ||
828 | debug.dbg('decorator end %s', values, color="MAGENTA") | |
829 | if values != initial: | |
830 | return ValueSet([Decoratee(c, decoratee_value) for c in values]) | |
831 | return values | |
832 | ||
833 | ||
834 | def check_tuple_assignments(name, value_set): | |
835 | """ | |
836 | Checks if tuples are assigned. | |
837 | """ | |
838 | lazy_value = None | |
839 | for index, node in name.assignment_indexes(): | |
840 | cn = ContextualizedNode(name.parent_context, node) | |
841 | iterated = value_set.iterate(cn) | |
842 | if isinstance(index, slice): | |
843 | # For no star unpacking is not possible. | |
844 | return NO_VALUES | |
845 | i = 0 | |
846 | while i <= index: | |
847 | try: | |
848 | lazy_value = next(iterated) | |
849 | except StopIteration: | |
850 | # We could do this with the default param in next. But this | |
851 | # would allow this loop to run for a very long time if the | |
852 | # index number is high. Therefore break if the loop is | |
853 | # finished. | |
854 | return NO_VALUES | |
855 | else: | |
856 | i += lazy_value.max | |
857 | value_set = lazy_value.infer() | |
858 | return value_set | |
859 | ||
860 | ||
861 | class ContextualizedSubscriptListNode(ContextualizedNode): | |
862 | def infer(self): | |
863 | return _infer_subscript_list(self.context, self.node) | |
864 | ||
865 | ||
866 | def _infer_subscript_list(context, index): | |
867 | """ | |
868 | Handles slices in subscript nodes. | |
869 | """ | |
870 | if index == ':': | |
871 | # Like array[:] | |
872 | return ValueSet([iterable.Slice(context, None, None, None)]) | |
873 | ||
874 | elif index.type == 'subscript' and not index.children[0] == '.': | |
875 | # subscript basically implies a slice operation | |
876 | # e.g. array[:3] | |
877 | result = [] | |
878 | for el in index.children: | |
879 | if el == ':': | |
880 | if not result: | |
881 | result.append(None) | |
882 | elif el.type == 'sliceop': | |
883 | if len(el.children) == 2: | |
884 | result.append(el.children[1]) | |
885 | else: | |
886 | result.append(el) | |
887 | result += [None] * (3 - len(result)) | |
888 | ||
889 | return ValueSet([iterable.Slice(context, *result)]) | |
890 | elif index.type == 'subscriptlist': | |
891 | return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)]) | |
892 | ||
893 | # No slices | |
894 | return context.infer_node(index) |