2 Functions inferring the syntax tree.
7 from parso
.python
import tree
10 from jedi
import parser_utils
11 from jedi
.inference
.base_value
import ValueSet
, NO_VALUES
, ContextualizedNode
, \
12 iterator_to_value_set
, iterate_values
13 from jedi
.inference
.lazy_value
import LazyTreeValue
14 from jedi
.inference
import compiled
15 from jedi
.inference
import recursion
16 from jedi
.inference
import analysis
17 from jedi
.inference
import imports
18 from jedi
.inference
import arguments
19 from jedi
.inference
.value
import ClassValue
, FunctionValue
20 from jedi
.inference
.value
import iterable
21 from jedi
.inference
.value
.dynamic_arrays
import ListModification
, DictModification
22 from jedi
.inference
.value
import TreeInstance
23 from jedi
.inference
.helpers
import is_string
, is_literal
, is_number
, \
24 get_names_of_node
, is_big_annoying_library
25 from jedi
.inference
.compiled
.access
import COMPARISON_OPERATORS
26 from jedi
.inference
.cache
import inference_state_method_cache
27 from jedi
.inference
.gradual
.stub_value
import VersionInfo
28 from jedi
.inference
.gradual
import annotation
29 from jedi
.inference
.names
import TreeNameDefinition
30 from jedi
.inference
.context
import CompForContext
31 from jedi
.inference
.value
.decorator
import Decoratee
32 from jedi
.plugins
import plugin_manager
34 operator_to_magic_method
= {
50 reverse_operator_to_magic_method
= {
51 k
: '__r' + v
[2:] for k
, v
in operator_to_magic_method
.items()
55 def _limit_value_infers(func
):
57 This is for now the way how we limit type inference going wild. There are
58 other ways to ensure recursion limits as well. This is mostly necessary
59 because of instance (self) access that can be quite tricky to limit.
61 I'm still not sure this is the way to go, but it looks okay for now and we
62 can still go anther way in the future. Tests are there. ~ dave
64 def wrapper(context
, *args
, **kwargs
):
66 inference_state
= context
.inference_state
68 inference_state
.inferred_element_counts
[n
] += 1
70 if context
.parent_context
is None \
71 and context
.get_value() is inference_state
.builtins_module
:
72 # Builtins should have a more generous inference limit.
73 # It is important that builtins can be executed, otherwise some
74 # functions that depend on certain builtins features would be
75 # broken, see e.g. GH #1432
78 if inference_state
.inferred_element_counts
[n
] > maximum
:
79 debug
.warning('In value %s there were too many inferences.', n
)
82 inference_state
.inferred_element_counts
[n
] = 1
83 return func(context
, *args
, **kwargs
)
88 def infer_node(context
, element
):
89 if isinstance(context
, CompForContext
):
90 return _infer_node(context
, element
)
93 while if_stmt
is not None:
94 if_stmt
= if_stmt
.parent
95 if if_stmt
.type in ('if_stmt', 'for_stmt'):
97 if parser_utils
.is_scope(if_stmt
):
100 predefined_if_name_dict
= context
.predefined_names
.get(if_stmt
)
101 # TODO there's a lot of issues with this one. We actually should do
102 # this in a different way. Caching should only be active in certain
103 # cases and this all sucks.
104 if predefined_if_name_dict
is None and if_stmt \
105 and if_stmt
.type == 'if_stmt' and context
.inference_state
.is_analysis
:
106 if_stmt_test
= if_stmt
.children
[1]
108 # If we already did a check, we don't want to do it again -> If
109 # value.predefined_names is filled, we stop.
110 # We don't want to check the if stmt itself, it's just about
112 if element
.start_pos
> if_stmt_test
.end_pos
:
113 # Now we need to check if the names in the if_stmt match the
114 # names in the suite.
115 if_names
= get_names_of_node(if_stmt_test
)
116 element_names
= get_names_of_node(element
)
117 str_element_names
= [e
.value
for e
in element_names
]
118 if any(i
.value
in str_element_names
for i
in if_names
):
119 for if_name
in if_names
:
120 definitions
= context
.inference_state
.infer(context
, if_name
)
121 # Every name that has multiple different definitions
122 # causes the complexity to rise. The complexity should
123 # never fall below 1.
124 if len(definitions
) > 1:
125 if len(name_dicts
) * len(definitions
) > 16:
126 debug
.dbg('Too many options for if branch inference %s.', if_stmt
)
127 # There's only a certain amount of branches
128 # Jedi can infer, otherwise it will take to
133 original_name_dicts
= list(name_dicts
)
135 for definition
in definitions
:
136 new_name_dicts
= list(original_name_dicts
)
137 for i
, name_dict
in enumerate(new_name_dicts
):
138 new_name_dicts
[i
] = name_dict
.copy()
139 new_name_dicts
[i
][if_name
.value
] = ValueSet([definition
])
141 name_dicts
+= new_name_dicts
143 for name_dict
in name_dicts
:
144 name_dict
[if_name
.value
] = definitions
145 if len(name_dicts
) > 1:
147 for name_dict
in name_dicts
:
148 with context
.predefine_names(if_stmt
, name_dict
):
149 result |
= _infer_node(context
, element
)
152 return _infer_node_if_inferred(context
, element
)
154 if predefined_if_name_dict
:
155 return _infer_node(context
, element
)
157 return _infer_node_if_inferred(context
, element
)
160 def _infer_node_if_inferred(context
, element
):
162 TODO This function is temporary: Merge with infer_node.
165 while parent
is not None:
166 parent
= parent
.parent
167 predefined_if_name_dict
= context
.predefined_names
.get(parent
)
168 if predefined_if_name_dict
is not None:
169 return _infer_node(context
, element
)
170 return _infer_node_cached(context
, element
)
173 @inference_state_method_cache(default
=NO_VALUES
)
174 def _infer_node_cached(context
, element
):
175 return _infer_node(context
, element
)
178 @debug.increase_indent
180 def _infer_node(context
, element
):
181 debug
.dbg('infer_node %s@%s in %s', element
, element
.start_pos
, context
)
182 inference_state
= context
.inference_state
184 if typ
in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
185 return infer_atom(context
, element
)
186 elif typ
== 'lambdef':
187 return ValueSet([FunctionValue
.from_context(context
, element
)])
188 elif typ
== 'expr_stmt':
189 return infer_expr_stmt(context
, element
)
190 elif typ
in ('power', 'atom_expr'):
191 first_child
= element
.children
[0]
192 children
= element
.children
[1:]
194 if first_child
.type == 'keyword' and first_child
.value
== 'await':
196 first_child
= children
.pop(0)
198 value_set
= context
.infer_node(first_child
)
199 for (i
, trailer
) in enumerate(children
):
200 if trailer
== '**': # has a power operation.
201 right
= context
.infer_node(children
[i
+ 1])
202 value_set
= _infer_comparison(
209 value_set
= infer_trailer(context
, value_set
, trailer
)
212 return value_set
.py__await__().py__stop_iteration_returns()
214 elif typ
in ('testlist_star_expr', 'testlist',):
215 # The implicit tuple in statements.
216 return ValueSet([iterable
.SequenceLiteralValue(inference_state
, context
, element
)])
217 elif typ
in ('not_test', 'factor'):
218 value_set
= context
.infer_node(element
.children
[-1])
219 for operator
in element
.children
[:-1]:
220 value_set
= infer_factor(value_set
, operator
)
223 # `x if foo else y` case.
224 return (context
.infer_node(element
.children
[0])
225 | context
.infer_node(element
.children
[-1]))
226 elif typ
== 'operator':
227 # Must be an ellipsis, other operators are not inferred.
228 if element
.value
!= '...':
229 origin
= element
.parent
230 raise AssertionError("unhandled operator %s in %s " % (repr(element
.value
), origin
))
231 return ValueSet([compiled
.builtin_from_name(inference_state
, 'Ellipsis')])
232 elif typ
== 'dotted_name':
233 value_set
= infer_atom(context
, element
.children
[0])
234 for next_name
in element
.children
[2::2]:
235 value_set
= value_set
.py__getattribute__(next_name
, name_context
=context
)
237 elif typ
== 'eval_input':
238 return context
.infer_node(element
.children
[0])
239 elif typ
== 'annassign':
240 return annotation
.infer_annotation(context
, element
.children
[1]) \
241 .execute_annotation()
242 elif typ
== 'yield_expr':
243 if len(element
.children
) and element
.children
[1].type == 'yield_arg':
244 # Implies that it's a yield from.
245 element
= element
.children
[1].children
[1]
246 generators
= context
.infer_node(element
) \
247 .py__getattribute__('__iter__').execute_with_values()
248 return generators
.py__stop_iteration_returns()
250 # Generator.send() is not implemented.
252 elif typ
== 'namedexpr_test':
253 return context
.infer_node(element
.children
[2])
255 return infer_or_test(context
, element
)
258 def infer_trailer(context
, atom_values
, trailer
):
259 trailer_op
, node
= trailer
.children
[:2]
260 if node
== ')': # `arglist` is optional.
263 if trailer_op
== '[':
264 trailer_op
, node
, _
= trailer
.children
265 return atom_values
.get_item(
266 _infer_subscript_list(context
, node
),
267 ContextualizedNode(context
, trailer
)
270 debug
.dbg('infer_trailer: %s in %s', trailer
, atom_values
)
271 if trailer_op
== '.':
272 return atom_values
.py__getattribute__(
273 name_context
=context
,
277 assert trailer_op
== '(', 'trailer_op is actually %s' % trailer_op
278 args
= arguments
.TreeArguments(context
.inference_state
, context
, node
, trailer
)
279 return atom_values
.execute(args
)
282 def infer_atom(context
, atom
):
284 Basically to process ``atom`` nodes. The parser sometimes doesn't
285 generate the node (because it has just one child). In that case an atom
286 might be a name or a literal as well.
288 state
= context
.inference_state
289 if atom
.type == 'name':
290 # This is the first global lookup.
291 stmt
= tree
.search_ancestor(atom
, 'expr_stmt', 'lambdef', 'if_stmt') or atom
292 if stmt
.type == 'if_stmt':
293 if not any(n
.start_pos
<= atom
.start_pos
< n
.end_pos
for n
in stmt
.get_test_nodes()):
295 elif stmt
.type == 'lambdef':
297 position
= stmt
.start_pos
298 if _is_annotation_name(atom
):
299 # Since Python 3.7 (with from __future__ import annotations),
300 # annotations are essentially strings and can reference objects
301 # that are defined further down in code. Therefore just set the
302 # position to None, so the finder will not try to stop at a certain
303 # position in the module.
305 return context
.py__getattribute__(atom
, position
=position
)
306 elif atom
.type == 'keyword':
307 # For False/True/None
308 if atom
.value
in ('False', 'True', 'None'):
309 return ValueSet([compiled
.builtin_from_name(state
, atom
.value
)])
310 elif atom
.value
== 'yield':
311 # Contrary to yield from, yield can just appear alone to return a
312 # value when used with `.send()`.
314 assert False, 'Cannot infer the keyword %s' % atom
316 elif isinstance(atom
, tree
.Literal
):
317 string
= state
.compiled_subprocess
.safe_literal_eval(atom
.value
)
318 return ValueSet([compiled
.create_simple_object(state
, string
)])
319 elif atom
.type == 'strings':
320 # Will be multiple string.
321 value_set
= infer_atom(context
, atom
.children
[0])
322 for string
in atom
.children
[1:]:
323 right
= infer_atom(context
, string
)
324 value_set
= _infer_comparison(context
, value_set
, '+', right
)
326 elif atom
.type == 'fstring':
327 return compiled
.get_string_value_set(state
)
330 # Parentheses without commas are not tuples.
331 if c
[0] == '(' and not len(c
) == 2 \
332 and not (c
[1].type == 'testlist_comp'
333 and len(c
[1].children
) > 1):
334 return context
.infer_node(c
[1])
337 comp_for
= c
[1].children
[1]
338 except (IndexError, AttributeError):
342 # Dict comprehensions have a colon at the 3rd index.
344 comp_for
= c
[1].children
[3]
348 if comp_for
.type in ('comp_for', 'sync_comp_for'):
349 return ValueSet([iterable
.comprehension_from_atom(
353 # It's a dict/list/tuple literal.
356 array_node_c
= array_node
.children
357 except AttributeError:
359 if c
[0] == '{' and (array_node
== '}' or ':' in array_node_c
360 or '**' in array_node_c
):
361 new_value
= iterable
.DictLiteralValue(state
, context
, atom
)
363 new_value
= iterable
.SequenceLiteralValue(state
, context
, atom
)
364 return ValueSet([new_value
])
368 def infer_expr_stmt(context
, stmt
, seek_name
=None):
369 with recursion
.execution_allowed(context
.inference_state
, stmt
) as allowed
:
371 if seek_name
is not None:
373 annotation
.find_type_from_comment_hint_assign(context
, stmt
, seek_name
)
375 return pep0484_values
377 return _infer_expr_stmt(context
, stmt
, seek_name
)
381 @debug.increase_indent
382 def _infer_expr_stmt(context
, stmt
, seek_name
=None):
384 The starting point of the completion. A statement always owns a call
385 list, which are the calls, that a statement does. In case multiple
386 names are defined in the statement, `seek_name` returns the result for
389 expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
390 ('=' (yield_expr|testlist_star_expr))*)
391 annassign: ':' test ['=' test]
392 augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
393 '<<=' | '>>=' | '**=' | '//=')
395 :param stmt: A `tree.ExprStmt`.
397 def check_setitem(stmt
):
398 atom_expr
= stmt
.children
[0]
399 if atom_expr
.type not in ('atom_expr', 'power'):
401 name
= atom_expr
.children
[0]
402 if name
.type != 'name' or len(atom_expr
.children
) != 2:
404 trailer
= atom_expr
.children
[-1]
405 return trailer
.children
[0] == '[', trailer
.children
[1]
407 debug
.dbg('infer_expr_stmt %s (%s)', stmt
, seek_name
)
410 value_set
= context
.infer_node(rhs
)
413 n
= TreeNameDefinition(context
, seek_name
)
414 value_set
= check_tuple_assignments(n
, value_set
)
416 first_operator
= next(stmt
.yield_operators(), None)
417 is_setitem
, subscriptlist
= check_setitem(stmt
)
418 is_annassign
= first_operator
not in ('=', None) and first_operator
.type == 'operator'
419 if is_annassign
or is_setitem
:
420 # `=` is always the last character in aug assignments -> -1
421 name
= stmt
.get_defined_names(include_setitem
=True)[0].value
422 left_values
= context
.py__getattribute__(name
, position
=stmt
.start_pos
)
426 c
= ContextualizedSubscriptListNode(context
, subscriptlist
)
427 if v
.array_type
== 'dict':
428 return DictModification(v
, value_set
, c
)
429 elif v
.array_type
== 'list':
430 return ListModification(v
, value_set
, c
)
433 value_set
= ValueSet(to_mod(v
) for v
in left_values
)
435 operator
= copy
.copy(first_operator
)
436 operator
.value
= operator
.value
[:-1]
437 for_stmt
= tree
.search_ancestor(stmt
, 'for_stmt')
438 if for_stmt
is not None and for_stmt
.type == 'for_stmt' and value_set \
439 and parser_utils
.for_stmt_defines_one_name(for_stmt
):
440 # Iterate through result and add the values, that's possible
441 # only in for loops without clutter, because they are
442 # predictable. Also only do it, if the variable is not a tuple.
443 node
= for_stmt
.get_testlist()
444 cn
= ContextualizedNode(context
, node
)
445 ordered
= list(cn
.infer().iterate(cn
))
447 for lazy_value
in ordered
:
448 dct
= {for_stmt
.children
[1].value
: lazy_value
.infer()}
449 with context
.predefine_names(for_stmt
, dct
):
450 t
= context
.infer_node(rhs
)
451 left_values
= _infer_comparison(context
, left_values
, operator
, t
)
452 value_set
= left_values
454 value_set
= _infer_comparison(context
, left_values
, operator
, value_set
)
455 debug
.dbg('infer_expr_stmt result %s', value_set
)
459 def infer_or_test(context
, or_test
):
460 iterator
= iter(or_test
.children
)
461 types
= context
.infer_node(next(iterator
))
462 for operator
in iterator
:
463 right
= next(iterator
)
464 if operator
.type == 'comp_op': # not in / is not
465 operator
= ' '.join(c
.value
for c
in operator
.children
)
467 # handle type inference of and/or here.
468 if operator
in ('and', 'or'):
469 left_bools
= set(left
.py__bool__() for left
in types
)
470 if left_bools
== {True}
:
471 if operator
== 'and':
472 types
= context
.infer_node(right
)
473 elif left_bools
== {False}
:
474 if operator
!= 'and':
475 types
= context
.infer_node(right
)
476 # Otherwise continue, because of uncertainty.
478 types
= _infer_comparison(context
, types
, operator
,
479 context
.infer_node(right
))
480 debug
.dbg('infer_or_test types %s', types
)
484 @iterator_to_value_set
485 def infer_factor(value_set
, operator
):
487 Calculates `+`, `-`, `~` and `not` prefixes.
489 for value
in value_set
:
493 elif operator
== 'not':
494 b
= value
.py__bool__()
495 if b
is None: # Uncertainty.
497 yield compiled
.create_simple_object(value
.inference_state
, not b
)
502 def _literals_to_types(inference_state
, result
):
503 # Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
504 # int(), float(), etc).
505 new_result
= NO_VALUES
508 # Literals are only valid as long as the operations are
509 # correct. Otherwise add a value-free instance.
510 cls
= compiled
.builtin_from_name(inference_state
, typ
.name
.string_name
)
511 new_result |
= cls
.execute_with_values()
513 new_result |
= ValueSet([typ
])
517 def _infer_comparison(context
, left_values
, operator
, right_values
):
518 state
= context
.inference_state
519 if isinstance(operator
, str):
520 operator_str
= operator
522 operator_str
= str(operator
.value
)
523 if not left_values
or not right_values
:
524 # illegal slices e.g. cause left/right_result to be None
525 result
= (left_values
or NO_VALUES
) |
(right_values
or NO_VALUES
)
526 return _literals_to_types(state
, result
)
527 elif operator_str
== "|" and all(
528 value
.is_class() or value
.is_compiled()
529 for value
in itertools
.chain(left_values
, right_values
)
531 # ^^^ A naive hack for PEP 604
532 return ValueSet
.from_sets((left_values
, right_values
))
534 # I don't think there's a reasonable chance that a string
535 # operation is still correct, once we pass something like six
537 if len(left_values
) * len(right_values
) > 6:
538 return _literals_to_types(state
, left_values | right_values
)
540 return ValueSet
.from_sets(
541 _infer_comparison_part(state
, context
, left
, operator
, right
)
542 for left
in left_values
543 for right
in right_values
547 def _is_annotation_name(name
):
548 ancestor
= tree
.search_ancestor(name
, 'param', 'funcdef', 'expr_stmt')
552 if ancestor
.type in ('param', 'funcdef'):
553 ann
= ancestor
.annotation
555 return ann
.start_pos
<= name
.start_pos
< ann
.end_pos
556 elif ancestor
.type == 'expr_stmt':
557 c
= ancestor
.children
558 if len(c
) > 1 and c
[1].type == 'annassign':
559 return c
[1].start_pos
<= name
.start_pos
< c
[1].end_pos
564 return value
.array_type
== 'list'
567 def _is_tuple(value
):
568 return value
.array_type
== 'tuple'
571 def _bool_to_value(inference_state
, bool_
):
572 return compiled
.builtin_from_name(inference_state
, str(bool_
))
575 def _get_tuple_ints(value
):
576 if not isinstance(value
, iterable
.SequenceLiteralValue
):
579 for lazy_value
in value
.py__iter__():
580 if not isinstance(lazy_value
, LazyTreeValue
):
582 node
= lazy_value
.data
583 if node
.type != 'number':
586 numbers
.append(int(node
.value
))
592 def _infer_comparison_part(inference_state
, context
, left
, operator
, right
):
593 l_is_num
= is_number(left
)
594 r_is_num
= is_number(right
)
595 if isinstance(operator
, str):
596 str_operator
= operator
598 str_operator
= str(operator
.value
)
600 if str_operator
== '*':
601 # for iterables, ignore * operations
602 if isinstance(left
, iterable
.Sequence
) or is_string(left
):
603 return ValueSet([left
])
604 elif isinstance(right
, iterable
.Sequence
) or is_string(right
):
605 return ValueSet([right
])
606 elif str_operator
== '+':
607 if l_is_num
and r_is_num
or is_string(left
) and is_string(right
):
608 return left
.execute_operation(right
, str_operator
)
609 elif _is_list(left
) and _is_list(right
) or _is_tuple(left
) and _is_tuple(right
):
610 return ValueSet([iterable
.MergedArray(inference_state
, (left
, right
))])
611 elif str_operator
== '-':
612 if l_is_num
and r_is_num
:
613 return left
.execute_operation(right
, str_operator
)
614 elif str_operator
== '%':
615 # With strings and numbers the left type typically remains. Except for
617 return ValueSet([left
])
618 elif str_operator
in COMPARISON_OPERATORS
:
619 if left
.is_compiled() and right
.is_compiled():
620 # Possible, because the return is not an option. Just compare.
621 result
= left
.execute_operation(right
, str_operator
)
625 if str_operator
in ('is', '!=', '==', 'is not'):
626 operation
= COMPARISON_OPERATORS
[str_operator
]
627 bool_
= operation(left
, right
)
628 # Only if == returns True or != returns False, we can continue.
629 # There's no guarantee that they are not equal. This can help
630 # in some cases, but does not cover everything.
631 if (str_operator
in ('is', '==')) == bool_
:
632 return ValueSet([_bool_to_value(inference_state
, bool_
)])
634 if isinstance(left
, VersionInfo
):
635 version_info
= _get_tuple_ints(right
)
636 if version_info
is not None:
637 bool_result
= compiled
.access
.COMPARISON_OPERATORS
[operator
](
638 inference_state
.environment
.version_info
,
641 return ValueSet([_bool_to_value(inference_state
, bool_result
)])
644 _bool_to_value(inference_state
, True),
645 _bool_to_value(inference_state
, False)
647 elif str_operator
in ('in', 'not in'):
651 """Checks if a Jedi object is either a float or an int."""
652 return isinstance(obj
, TreeInstance
) and \
653 obj
.name
.string_name
in ('int', 'float')
655 # Static analysis, one is a number, the other one is not.
656 if str_operator
in ('+', '-') and l_is_num
!= r_is_num \
657 and not (check(left
) or check(right
)):
658 message
= "TypeError: unsupported operand type(s) for +: %s and %s"
659 analysis
.add(context
, 'type-error-operation', operator
,
660 message
% (left
, right
))
662 if left
.is_class() or right
.is_class():
665 method_name
= operator_to_magic_method
[str_operator
]
666 magic_methods
= left
.py__getattribute__(method_name
)
668 result
= magic_methods
.execute_with_values(right
)
672 if not magic_methods
:
673 reverse_method_name
= reverse_operator_to_magic_method
[str_operator
]
674 magic_methods
= right
.py__getattribute__(reverse_method_name
)
676 result
= magic_methods
.execute_with_values(left
)
680 result
= ValueSet([left
, right
])
681 debug
.dbg('Used operator %s resulting in %s', operator
, result
)
685 @plugin_manager.decorate()
686 def tree_name_to_values(inference_state
, context
, tree_name
):
687 value_set
= NO_VALUES
688 module_node
= context
.get_root_context().tree_node
689 # First check for annotations, like: `foo: int = 3`
690 if module_node
is not None:
691 names
= module_node
.get_used_names().get(tree_name
.value
, [])
692 found_annotation
= False
694 expr_stmt
= name
.parent
696 if expr_stmt
.type == "expr_stmt" and expr_stmt
.children
[1].type == "annassign":
697 correct_scope
= parser_utils
.get_parent_scope(name
) == context
.tree_node
699 found_annotation
= True
700 value_set |
= annotation
.infer_annotation(
701 context
, expr_stmt
.children
[1].children
[1]
702 ).execute_annotation()
707 node
= tree_name
.get_definition(import_name_always
=True, include_setitem
=True)
709 node
= tree_name
.parent
710 if node
.type == 'global_stmt':
711 c
= context
.create_context(tree_name
)
713 # In case we are already part of the module, there is no point
714 # in looking up the global statement anymore, because it's not
715 # valid at that point anyway.
717 # For global_stmt lookups, we only need the first possible scope,
718 # which means the function itself.
719 filter = next(c
.get_filters())
720 names
= filter.get(tree_name
.value
)
721 return ValueSet
.from_sets(name
.infer() for name
in names
)
722 elif node
.type not in ('import_from', 'import_name'):
723 c
= context
.create_context(tree_name
)
724 return infer_atom(c
, tree_name
)
727 if typ
== 'for_stmt':
728 types
= annotation
.find_type_from_comment_hint_for(context
, node
, tree_name
)
731 if typ
== 'with_stmt':
732 types
= annotation
.find_type_from_comment_hint_with(context
, node
, tree_name
)
736 if typ
in ('for_stmt', 'comp_for', 'sync_comp_for'):
738 types
= context
.predefined_names
[node
][tree_name
.value
]
740 cn
= ContextualizedNode(context
, node
.children
[3])
741 for_types
= iterate_values(
743 contextualized_node
=cn
,
744 is_async
=node
.parent
.type == 'async_stmt',
746 n
= TreeNameDefinition(context
, tree_name
)
747 types
= check_tuple_assignments(n
, for_types
)
748 elif typ
== 'expr_stmt':
749 types
= infer_expr_stmt(context
, node
, tree_name
)
750 elif typ
== 'with_stmt':
751 value_managers
= context
.infer_node(node
.get_test_node_from_name(tree_name
))
752 if node
.parent
.type == 'async_stmt':
753 # In the case of `async with` statements, we need to
754 # first get the coroutine from the `__aenter__` method,
755 # then "unwrap" via the `__await__` method
756 enter_methods
= value_managers
.py__getattribute__('__aenter__')
757 coro
= enter_methods
.execute_with_values()
758 return coro
.py__await__().py__stop_iteration_returns()
759 enter_methods
= value_managers
.py__getattribute__('__enter__')
760 return enter_methods
.execute_with_values()
761 elif typ
in ('import_from', 'import_name'):
762 types
= imports
.infer_import(context
, tree_name
)
763 elif typ
in ('funcdef', 'classdef'):
764 types
= _apply_decorators(context
, node
)
765 elif typ
== 'try_stmt':
766 # TODO an exception can also be a tuple. Check for those.
767 # TODO check for types that are not classes and add it to
768 # the static analysis report.
769 exceptions
= context
.infer_node(tree_name
.get_previous_sibling().get_previous_sibling())
770 types
= exceptions
.execute_with_values()
773 elif typ
== 'del_stmt':
775 elif typ
== 'namedexpr_test':
776 types
= infer_node(context
, node
)
778 raise ValueError("Should not happen. type: %s" % typ
)
782 # We don't want to have functions/classes that are created by the same
784 @inference_state_method_cache()
785 def _apply_decorators(context
, node
):
787 Returns the function, that should to be executed in the end.
788 This is also the places where the decorators are processed.
790 if node
.type == 'classdef':
791 decoratee_value
= ClassValue(
792 context
.inference_state
,
793 parent_context
=context
,
797 decoratee_value
= FunctionValue
.from_context(context
, node
)
798 initial
= values
= ValueSet([decoratee_value
])
800 if is_big_annoying_library(context
):
803 for dec
in reversed(node
.get_decorators()):
804 debug
.dbg('decorator: %s %s', dec
, values
, color
="MAGENTA")
805 with debug
.increase_indent_cm():
806 dec_values
= context
.infer_node(dec
.children
[1])
807 trailer_nodes
= dec
.children
[2:-1]
809 # Create a trailer and infer it.
810 trailer
= tree
.PythonNode('trailer', trailer_nodes
)
812 dec_values
= infer_trailer(context
, dec_values
, trailer
)
814 if not len(dec_values
):
815 code
= dec
.get_code(include_prefix
=False)
816 # For the short future, we don't want to hear about the runtime
817 # decorator in typing that was intentionally omitted. This is not
818 # "correct", but helps with debugging.
819 if code
!= '@runtime\n':
820 debug
.warning('decorator not found: %s on %s', dec
, node
)
823 values
= dec_values
.execute(arguments
.ValuesArguments([values
]))
825 debug
.warning('not possible to resolve wrappers found %s', node
)
828 debug
.dbg('decorator end %s', values
, color
="MAGENTA")
829 if values
!= initial
:
830 return ValueSet([Decoratee(c
, decoratee_value
) for c
in values
])
834 def check_tuple_assignments(name
, value_set
):
836 Checks if tuples are assigned.
839 for index
, node
in name
.assignment_indexes():
840 cn
= ContextualizedNode(name
.parent_context
, node
)
841 iterated
= value_set
.iterate(cn
)
842 if isinstance(index
, slice):
843 # For no star unpacking is not possible.
848 lazy_value
= next(iterated
)
849 except StopIteration:
850 # We could do this with the default param in next. But this
851 # would allow this loop to run for a very long time if the
852 # index number is high. Therefore break if the loop is
857 value_set
= lazy_value
.infer()
861 class ContextualizedSubscriptListNode(ContextualizedNode
):
863 return _infer_subscript_list(self
.context
, self
.node
)
866 def _infer_subscript_list(context
, index
):
868 Handles slices in subscript nodes.
872 return ValueSet([iterable
.Slice(context
, None, None, None)])
874 elif index
.type == 'subscript' and not index
.children
[0] == '.':
875 # subscript basically implies a slice operation
878 for el
in index
.children
:
882 elif el
.type == 'sliceop':
883 if len(el
.children
) == 2:
884 result
.append(el
.children
[1])
887 result
+= [None] * (3 - len(result
))
889 return ValueSet([iterable
.Slice(context
, *result
)])
890 elif index
.type == 'subscriptlist':
891 return ValueSet([iterable
.SequenceLiteralValue(context
.inference_state
, context
, index
)])
894 return context
.infer_node(index
)