]> crepu.dev Git - config.git/blob - djavu-asus/elpy/rpc-venv/lib/python3.11/site-packages/jedi/inference/cache.py
Configuracion en desarrollo PC pega
[config.git] / djavu-asus / elpy / rpc-venv / lib / python3.11 / site-packages / jedi / inference / cache.py
1 """
2 - the popular ``_memoize_default`` works like a typical memoize and returns the
3 default otherwise.
4 - ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
5 """
6 from functools import wraps
7
8 from jedi import debug
9
10 _NO_DEFAULT = object()
11 _RECURSION_SENTINEL = object()
12
13
14 def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False,
15 second_arg_is_inference_state=False):
16 """ This is a typical memoization decorator, BUT there is one difference:
17 To prevent recursion it sets defaults.
18
19 Preventing recursion is in this case the much bigger use than speed. I
20 don't think, that there is a big speed difference, but there are many cases
21 where recursion could happen (think about a = b; b = a).
22 """
23 def func(function):
24 def wrapper(obj, *args, **kwargs):
25 # TODO These checks are kind of ugly and slow.
26 if inference_state_is_first_arg:
27 cache = obj.memoize_cache
28 elif second_arg_is_inference_state:
29 cache = args[0].memoize_cache # needed for meta classes
30 else:
31 cache = obj.inference_state.memoize_cache
32
33 try:
34 memo = cache[function]
35 except KeyError:
36 cache[function] = memo = {}
37
38 key = (obj, args, frozenset(kwargs.items()))
39 if key in memo:
40 return memo[key]
41 else:
42 if default is not _NO_DEFAULT:
43 memo[key] = default
44 rv = function(obj, *args, **kwargs)
45 memo[key] = rv
46 return rv
47 return wrapper
48
49 return func
50
51
52 def inference_state_function_cache(default=_NO_DEFAULT):
53 def decorator(func):
54 return _memoize_default(default=default, inference_state_is_first_arg=True)(func)
55
56 return decorator
57
58
59 def inference_state_method_cache(default=_NO_DEFAULT):
60 def decorator(func):
61 return _memoize_default(default=default)(func)
62
63 return decorator
64
65
66 def inference_state_as_method_param_cache():
67 def decorator(call):
68 return _memoize_default(second_arg_is_inference_state=True)(call)
69
70 return decorator
71
72
73 class CachedMetaClass(type):
74 """
75 This is basically almost the same than the decorator above, it just caches
76 class initializations. Either you do it this way or with decorators, but
77 with decorators you lose class access (isinstance, etc).
78 """
79 @inference_state_as_method_param_cache()
80 def __call__(self, *args, **kwargs):
81 return super().__call__(*args, **kwargs)
82
83
84 def inference_state_method_generator_cache():
85 """
86 This is a special memoizer. It memoizes generators and also checks for
87 recursion errors and returns no further iterator elemends in that case.
88 """
89 def func(function):
90 @wraps(function)
91 def wrapper(obj, *args, **kwargs):
92 cache = obj.inference_state.memoize_cache
93 try:
94 memo = cache[function]
95 except KeyError:
96 cache[function] = memo = {}
97
98 key = (obj, args, frozenset(kwargs.items()))
99
100 if key in memo:
101 actual_generator, cached_lst = memo[key]
102 else:
103 actual_generator = function(obj, *args, **kwargs)
104 cached_lst = []
105 memo[key] = actual_generator, cached_lst
106
107 i = 0
108 while True:
109 try:
110 next_element = cached_lst[i]
111 if next_element is _RECURSION_SENTINEL:
112 debug.warning('Found a generator recursion for %s' % obj)
113 # This means we have hit a recursion.
114 return
115 except IndexError:
116 cached_lst.append(_RECURSION_SENTINEL)
117 next_element = next(actual_generator, None)
118 if next_element is None:
119 cached_lst.pop()
120 return
121 cached_lst[-1] = next_element
122 yield next_element
123 i += 1
124 return wrapper
125
126 return func