code
stringlengths
51
31k
url
stringlengths
31
59
complexity
int64
1
153
n_ast_nodes
int64
12
5.6k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
1.46k
n_words
int64
2
2.17k
file_name
stringlengths
5
56
ast_levels
int64
4
32
nloc
int64
1
451
repo
stringlengths
3
28
fun_name
stringlengths
2
73
n_whitespaces
int64
2
13.8k
path
stringlengths
7
134
vocab_size
int64
2
671
commit_message
stringlengths
51
15.3k
id
int64
20
338k
language
stringclasses
1 value
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
token_counts
int64
6
3.32k
def _format(val, valtype, floatfmt, missingval="", has_invisible=True): # noqa if val is None: return missingval if valtype in [int, _text_type]: return "{0}".format(val) elif valtype is _binary_type: try: return _text_type(val, "ascii") except TypeError: return _text_type(val) elif valtype is float: is_a_colored_number = has_invisible and isinstance( val, (_text_type, _binary_type) ) if is_a_colored_number: raw_val = _strip_invisible(val) formatted_val = format(float(raw_val), floatfmt) return val.replace(raw_val, formatted_val) else: return format(float(val), floatfmt) else: return "{0}".format(val)
https://github.com/ray-project/ray.git
8
251
adf24bfa9723b0621183bb27f0c889b813c06e8a
65
tabulate.py
15
22
ray
_format
224
python/ray/_private/thirdparty/tabulate/tabulate.py
47
[State Observability] Use a table format by default (#26159) NOTE: tabulate is copied/pasted to the codebase for table formatting. This PR changes the default layout to be the table format for both summary and list APIs.
125,184
Python
18
0
132
def set_context(context=None, font_scale=1, rc=None): context_object = plotting_context(context, font_scale, rc) mpl.rcParams.update(context_object)
https://github.com/mwaskom/seaborn.git
1
53
34662f4be5c364e7518f9c1118c9b362038ee5dd
10
rcmod.py
8
3
seaborn
set_context
19
seaborn/rcmod.py
10
Convert docs to pydata-sphinx-theme and add new material (#2842) * Do basic conversion of site to pydata_sphinx_theme * Remove some pae structure customizations we no longer need * Add some custom CSS * Tweak a few more colors * Remove vestigial div closing tag * Reorganize release notes into hierarchical pages * Rebuild full docs and fix some resulting issues * Make release note doc refs absolute * Convert homepage to use sphinx-design instead of hand-crafted html * Remove original custom css * Simplify header and put archive switcher in footer * Streamline API docs for objects * Play around with templates to fix shrinking content (not perfect yet) * Improve use of horizontal space without sidebars * Various tweaks * Convert tutorial homepage source to native sphinx-design directives * Move intro page into tutorial * More tweaks * Tweak theme colors and footer * Remove reference to navbar version * Note that error bar tutorial demonstrates new features as of v0.12 * Update layout customization for new theme features * Various layout and CSS tweaks * Narrow support guidance to StackOverflow * Run all notebooks * Adapt to new dropdown navbar in pydata theme * Separate tutorial source and outputs * Separate dostring source and outputs * Add scale API template * Update API docs * Fix requirements * Add new objects * Point doc requirements at v0.10 RC for theme
42,070
Python
9
0
34
async def async_unload(self) -> None: await self._syno_api_executer(self.dsm.logout)
https://github.com/home-assistant/core.git
1
35
5d7d652237b2368320a68c772ce3d837e4c1d04b
7
common.py
10
3
core
async_unload
21
homeassistant/components/synology_dsm/common.py
7
Replace Synology DSM services with buttons (#57352)
311,030
Python
5
0
19
def clone_keras_tensors(args, keras_tensor_mapping): result = [] for obj in tf.nest.flatten(args): if node_module.is_keras_tensor(obj): if id(obj) in keras_tensor_mapping: cpy = keras_tensor_mapping[id(obj)] else: # Create copy of keras_tensor if we haven't done it before cpy = _clone_keras_tensor(obj) cpy._keras_history = obj._keras_history keras_tensor_mapping[id(obj)] = cpy result.append(cpy) else: result.append(obj) return tf.nest.pack_sequence_as(args, result)
https://github.com/keras-team/keras.git
4
160
3613c3defc39c236fb1592c4f7ba1a9cc887343a
46
functional_utils.py
16
14
keras
clone_keras_tensors
191
keras/engine/functional_utils.py
35
Remove pylint comments. PiperOrigin-RevId: 452353044
278,720
Python
16
0
98
def get_plain_headed_box(self) -> "Box": return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
https://github.com/pypa/pipenv.git
1
31
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
7
box.py
7
9
pipenv
get_plain_headed_box
21
pipenv/patched/pip/_vendor/rich/box.py
7
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
22,168
Python
4
0
17
def apply_and_enforce(*args, **kwargs): func = kwargs.pop("_func") expected_ndim = kwargs.pop("expected_ndim") out = func(*args, **kwargs) if getattr(out, "ndim", 0) != expected_ndim: out_ndim = getattr(out, "ndim", 0) raise ValueError( f"Dimension mismatch: expected output of {func} " f"to have dims = {expected_ndim}. Got {out_ndim} instead." ) return out
https://github.com/dask/dask.git
2
129
2b90415b02d3ad1b08362889e0818590ca3133f4
44
core.py
12
11
dask
apply_and_enforce
106
dask/array/core.py
36
Add kwarg ``enforce_ndim`` to ``dask.array.map_blocks()`` (#8865)
156,567
Python
10
0
68
def get_jobs_by_meta(queue, func_name, meta): # get all jobs from Queue jobs = (job for job in queue.get_jobs() if job.func.__name__ == func_name ) # return only with same meta data return [job for job in jobs if hasattr(job, 'meta') and job.meta == meta]
https://github.com/heartexlabs/label-studio.git
6
83
283628097a10e8abafc94c683bc8be2d79a5998f
42
redis.py
11
6
label-studio
get_jobs_by_meta
90
label_studio/core/redis.py
33
feat: DEV-2075: Add mixin to Project to support mechanism to cancel old jobs (#2547) * feat: DEV-2075: Add mixin to Project to support mechanism to cancel old jobs
178,165
Python
10
0
52
def enable_tf_random_generator(): global _USE_GENERATOR_FOR_RNG _USE_GENERATOR_FOR_RNG = True @keras_export("keras.backend.experimental.disable_tf_random_generator", v1=[])
https://github.com/keras-team/keras.git
1
38
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.experimental.disable_tf_random_generator", v1=[])
9
backend.py
8
3
keras
enable_tf_random_generator
17
keras/backend.py
8
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
269,601
Python
4
1
10
def collocation_list(self, num=20, window_size=2): if not ( "_collocations" in self.__dict__ and self._num == num and self._window_size == window_size ): self._num = num self._window_size = window_size # print("Building collocations list") from nltk.corpus import stopwords ignored_words = stopwords.words("english") finder = BigramCollocationFinder.from_words(self.tokens, window_size) finder.apply_freq_filter(2) finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words) bigram_measures = BigramAssocMeasures() self._collocations = list( finder.nbest(bigram_measures.likelihood_ratio, num) ) return self._collocations
https://github.com/nltk/nltk.git
5
205
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
61
text.py
14
18
nltk
collocation_list
258
nltk/text.py
48
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <[email protected]> Co-authored-by: Tom Aarsen <[email protected]> Co-authored-by: Tom Aarsen <[email protected]>
42,548
Python
27
0
126
def library_section_payload(section): try: children_media_class = ITEM_TYPE_MEDIA_CLASS[section.TYPE] except KeyError as err: raise UnknownMediaType(f"Unknown type received: {section.TYPE}") from err server_id = section._server.machineIdentifier # pylint: disable=protected-access return BrowseMedia( title=section.title, media_class=MEDIA_CLASS_DIRECTORY, media_content_id=generate_plex_uri(server_id, section.key), media_content_type="library", can_play=False, can_expand=True, children_media_class=children_media_class, )
https://github.com/home-assistant/core.git
2
126
653305b998dd033365576db303b32dd5df3a6c54
34
media_browser.py
13
15
core
library_section_payload
116
homeassistant/components/plex/media_browser.py
33
Support multiple Plex servers in media browser (#68321)
294,024
Python
21
0
77
def gen_skeleton(): # create as Py27Dict and insert key one by one to preserve input order skeleton = Py27Dict() skeleton["openapi"] = "3.0.1" skeleton["info"] = Py27Dict() skeleton["info"]["version"] = "1.0" skeleton["info"]["title"] = ref("AWS::StackName") skeleton["paths"] = Py27Dict() return skeleton
https://github.com/aws/serverless-application-model.git
1
111
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
36
open_api.py
9
8
serverless-application-model
gen_skeleton
99
samtranslator/open_api/open_api.py
27
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <[email protected]>
213,030
Python
4
0
55
def fit(self, X, y=None): self._validate_params() if self.fit_inverse_transform and self.kernel == "precomputed": raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.") X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X) self._centerer = KernelCenterer() K = self._get_kernel(X) self._fit_transform(K) if self.fit_inverse_transform: # no need to use the kernel to transform X, use shortcut expression X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) self._fit_inverse_transform(X_transformed, X) self.X_fit_ = X return self
https://github.com/scikit-learn/scikit-learn.git
4
175
3312bc2ea6aad559643a1d920e3380fa123f627c
57
_kernel_pca.py
12
13
scikit-learn
fit
171
sklearn/decomposition/_kernel_pca.py
48
MAINT validate parameter in KernelPCA (#24020) Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
260,625
Python
24
0
106
def get_conn(self) -> Any: in_cluster = self._coalesce_param( self.in_cluster, self.conn_extras.get("extra__kubernetes__in_cluster") or None ) cluster_context = self._coalesce_param( self.cluster_context, self.conn_extras.get("extra__kubernetes__cluster_context") or None ) kubeconfig_path = self._coalesce_param( self.config_file, self.conn_extras.get("extra__kubernetes__kube_config_path") or None ) kubeconfig = self.conn_extras.get("extra__kubernetes__kube_config") or None num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o]) if num_selected_configuration > 1: raise AirflowException( "Invalid connection configuration. Options kube_config_path, " "kube_config, in_cluster are mutually exclusive. " "You can only use one option at a time." ) disable_verify_ssl = self._coalesce_param( self.disable_verify_ssl, _get_bool(self._get_field("disable_verify_ssl")) ) disable_tcp_keepalive = self._coalesce_param( self.disable_tcp_keepalive, _get_bool(self._get_field("disable_tcp_keepalive")) ) # BEGIN apply settings from core kubernetes configuration # this section should be removed in next major release deprecation_warnings: List[Tuple[str, Any]] = [] if disable_verify_ssl is None and self._deprecated_core_disable_verify_ssl is True: deprecation_warnings.append(('verify_ssl', False)) disable_verify_ssl = self._deprecated_core_disable_verify_ssl # by default, hook will try in_cluster first. so we only need to # apply core airflow config and alert when False and in_cluster not otherwise set. if in_cluster is None and self._deprecated_core_in_cluster is False: deprecation_warnings.append(('in_cluster', self._deprecated_core_in_cluster)) in_cluster = self._deprecated_core_in_cluster if not cluster_context and self._deprecated_core_cluster_context: deprecation_warnings.append(('cluster_context', self._deprecated_core_cluster_context)) cluster_context = self._deprecated_core_cluster_context if not kubeconfig_path and self._deprecated_core_config_file: deprecation_warnings.append(('config_file', self._deprecated_core_config_file)) kubeconfig_path = self._deprecated_core_config_file if disable_tcp_keepalive is None and self._deprecated_core_disable_tcp_keepalive is True: deprecation_warnings.append(('enable_tcp_keepalive', False)) disable_tcp_keepalive = True if deprecation_warnings: self._deprecation_warning_core_param(deprecation_warnings) # END apply settings from core kubernetes configuration if disable_verify_ssl is True: _disable_verify_ssl() if disable_tcp_keepalive is not True: _enable_tcp_keepalive() if in_cluster: self.log.debug("loading kube_config from: in_cluster configuration") config.load_incluster_config() return client.ApiClient() if kubeconfig_path is not None: self.log.debug("loading kube_config from: %s", kubeconfig_path) config.load_kube_config( config_file=kubeconfig_path, client_configuration=self.client_configuration, context=cluster_context, ) return client.ApiClient() if kubeconfig is not None: with tempfile.NamedTemporaryFile() as temp_config: self.log.debug("loading kube_config from: connection kube_config") temp_config.write(kubeconfig.encode()) temp_config.flush() config.load_kube_config( config_file=temp_config.name, client_configuration=self.client_configuration, context=cluster_context, ) return client.ApiClient() return self._get_default_client(cluster_context=cluster_context)
https://github.com/apache/airflow.git
24
759
60eb9e106f5915398eafd6aa339ec710c102dc09
267
kubernetes.py
13
71
airflow
get_conn
1,032
airflow/providers/cncf/kubernetes/hooks/kubernetes.py
146
Use KubernetesHook to create api client in KubernetesPodOperator (#20578) Add support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them. KPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.
42,787
Python
49
0
460
def load_data(self) -> Any: model = load(self.model_path+self.model_filename+"_model.joblib") with open(self.model_path+self.model_filename+"_metadata.json", 'r') as fp: self.data = json.load(fp) if self.data.get('training_features_list'): self.training_features_list = [*self.data.get('training_features_list')] self.data_dictionary['train_features'] = pd.read_pickle(self.model_path+ self.model_filename+"_trained_df.pkl") self.model_path = self.data['model_path'] self.model_filename = self.data['model_filename'] if self.config['freqai']['feature_parameters']['principal_component_analysis']: self.pca = pk.load(open(self.model_path+self.model_filename+"_pca_object.pkl","rb")) return model
https://github.com/freqtrade/freqtrade.git
3
272
fc837c4daa27a18ff0e86128f4d52089b88fa5fb
37
data_handler.py
15
18
freqtrade
load_data
180
freqtrade/freqai/data_handler.py
29
add freqao backend machinery, user interface, documentation
149,758
Python
19
0
155
def _bind(self, *args, **kwargs): from ray.experimental.dag.class_node import ClassNode return ClassNode(self.__ray_metadata__.modified_class, args, kwargs, {})
https://github.com/ray-project/ray.git
1
56
c065e3f69ec248383d98b45a8d1c00832ccfdd57
13
actor.py
9
3
ray
_bind
34
python/ray/actor.py
13
[Ray DAG] Implement experimental Ray DAG API for task/class (#22058)
144,300
Python
11
0
38
def find_device(data): if isinstance(data, Mapping): for obj in data.values(): device = find_device(obj) if device is not None: return device elif isinstance(data, (tuple, list)): for obj in data: device = find_device(obj) if device is not None: return device elif isinstance(data, torch.Tensor): return data.device
https://github.com/huggingface/accelerate.git
8
128
f56f4441b3d448f4a81d5131c03e7dd73eac3ba0
42
operations.py
13
13
accelerate
find_device
149
src/accelerate/utils/operations.py
22
Big model inference (#345) * Big model inference * Reorganize port cleanup * Last cleanup * Test fix * Quality * Update src/accelerate/big_modeling.py Co-authored-by: Patrick von Platen <[email protected]> * Fix bug in default mem * Check device map is complete * More tests * Make load function more general * Apply suggestions from code review Co-authored-by: Zachary Mueller <[email protected]> * Quality * Address more review comments * Check generation results for gpt2 * Add main wrapper around everything * Tests for final API * Clean infer_auto_device * Type annotations * Apply suggestions from code review Co-authored-by: Sourab Mangrulkar <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> * Address review comments * Last review comment for now * Fix bug in clean_device_map * Add doc * Style * Fixes + dtype support * Fix test * Add option to offload CPU state_dict * Indent typo * Final tweaks Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: Zachary Mueller <[email protected]> Co-authored-by: Sourab Mangrulkar <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
337,524
Python
11
0
82
async def wait(self) -> None: if self._is_set: return if not self._loop: self._loop = get_running_loop() self._event = asyncio.Event() await self._event.wait()
https://github.com/PrefectHQ/prefect.git
3
78
a368874d1b145c1ec5201e5efd3c26ce7c1e8611
19
primitives.py
10
12
prefect
wait
80
src/prefect/_internal/concurrency/primitives.py
17
Add thread-safe async primitives `Event` and `Future` (#7865) Co-authored-by: Serina Grill <[email protected]>
60,126
Python
8
0
44
def test_get_settings_no_request(self): context = Context() template = Template( "{% load wagtailsettings_tags %}" "{% get_settings %}" "{{ settings.tests.testgenericsetting.title }}" ) self.assertEqual(template.render(context), self.default_settings.title)
https://github.com/wagtail/wagtail.git
1
67
d967eccef28ce47f60d26be1c28f2d83a25f40b0
21
test_templates.py
10
8
wagtail
test_get_settings_no_request
89
wagtail/contrib/settings/tests/generic/test_templates.py
18
Add generic settings to compliment site-specific settings (#8327)
78,295
Python
10
0
36
def get_tail(self, n=10, raw=True, output=False, include_latest=False): self.writeout_cache() if not include_latest: n += 1 # cursor/line/entry this_cur = list( self._run_sql( "WHERE session == ? ORDER BY line DESC LIMIT ? ", (self.session_number, n), raw=raw, output=output, ) ) other_cur = list( self._run_sql( "WHERE session != ? ORDER BY session DESC, line DESC LIMIT ?", (self.session_number, n), raw=raw, output=output, ) ) everything = this_cur + other_cur everything = everything[:n] if not include_latest: return list(everything)[:0:-1] return list(everything)[::-1]
https://github.com/ipython/ipython.git
3
198
dc5bcc1c50892a5128fcf128af28887226144927
73
history.py
12
25
ipython
get_tail
344
IPython/core/history.py
44
This fixed the mixing of multiple history seen in #13631 It forces get_tail to put the current session last in the returned results.
208,719
Python
13
0
128
def opt_combinations_only(): experimental_opt_combinations = test_combinations.combine( mode='eager', opt_cls=optimizer_experimental.Optimizer) orig_opt_combination = test_combinations.combine( opt_cls=optimizer_v2.OptimizerV2) return experimental_opt_combinations + orig_opt_combination @tf_test_utils.with_control_flow_v2
https://github.com/keras-team/keras.git
1
70
b96518a22bfd92a29811e507dec0b34248a8a3f5
@tf_test_utils.with_control_flow_v2
16
loss_scale_optimizer_test.py
10
6
keras
opt_combinations_only
29
keras/mixed_precision/loss_scale_optimizer_test.py
12
- Consolidate disparate test-related files into a single testing_infra folder. - Cleanup TODO related to removing testing infra as a dependency of the Keras target. - Standardize import naming: there is now only "test_combinations" for test combinations, and "test_utils" for utilities. The TF utilities module "test_util" is now always imported as "tf_test_utils" to avoid confusion. PiperOrigin-RevId: 426773173
268,911
Python
13
1
37
def _normalize_entries(entries, separators=None): norm_files = {} for entry in entries: norm_files[normalize_file(entry.path, separators=separators)] = entry return norm_files
https://github.com/ray-project/ray.git
2
57
0e6c042e29cbbe429d81c9c1af3c75c261f00980
16
util.py
12
5
ray
_normalize_entries
11
python/ray/_private/thirdparty/pathspec/util.py
13
[Bugfix] fix invalid excluding of Black (#24042) - We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options - Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.
148,285
Python
7
0
36
def overlap_frag(p, overlap, fragsize=8, overlap_fragsize=None): if overlap_fragsize is None: overlap_fragsize = fragsize q = p.copy() del q[IP].payload q[IP].add_payload(overlap) qfrag = fragment(q, overlap_fragsize) qfrag[-1][IP].flags |= 1 return qfrag + fragment(p, fragsize)
https://github.com/secdev/scapy.git
2
117
08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf
30
inet.py
10
9
scapy
overlap_frag
61
scapy/layers/inet.py
26
E275 - Missing whitespace after keyword (#3711) Co-authored-by: Alexander Aring <[email protected]> Co-authored-by: Anmol Sarma <[email protected]> Co-authored-by: antoine.torre <[email protected]> Co-authored-by: Antoine Vacher <[email protected]> Co-authored-by: Arnaud Ebalard <[email protected]> Co-authored-by: atlowl <[email protected]> Co-authored-by: Brian Bienvenu <[email protected]> Co-authored-by: Chris Packham <[email protected]> Co-authored-by: CQ <[email protected]> Co-authored-by: Daniel Collins <[email protected]> Co-authored-by: Federico Maggi <[email protected]> Co-authored-by: Florian Maury <[email protected]> Co-authored-by: _Frky <[email protected]> Co-authored-by: g-mahieux <[email protected]> Co-authored-by: gpotter2 <[email protected]> Co-authored-by: Guillaume Valadon <[email protected]> Co-authored-by: Hao Zheng <[email protected]> Co-authored-by: Haresh Khandelwal <[email protected]> Co-authored-by: Harri Hämäläinen <[email protected]> Co-authored-by: hecke <[email protected]> Co-authored-by: Jan Romann <[email protected]> Co-authored-by: Jan Sebechlebsky <[email protected]> Co-authored-by: jdiog0 <[email protected]> Co-authored-by: jockque <[email protected]> Co-authored-by: Julien Bedel <[email protected]> Co-authored-by: Keith Scott <[email protected]> Co-authored-by: Kfir Gollan <[email protected]> Co-authored-by: Lars Munch <[email protected]> Co-authored-by: ldp77 <[email protected]> Co-authored-by: Leonard Crestez <[email protected]> Co-authored-by: Marcel Patzlaff <[email protected]> Co-authored-by: Martijn Thé <[email protected]> Co-authored-by: Martine Lenders <[email protected]> Co-authored-by: Michael Farrell <[email protected]> Co-authored-by: Michał Mirosław <[email protected]> Co-authored-by: mkaliszan <[email protected]> Co-authored-by: mtury <[email protected]> Co-authored-by: Neale Ranns <[email protected]> Co-authored-by: Octavian Toader <[email protected]> Co-authored-by: Peter Eisenlohr <[email protected]> Co-authored-by: Phil <[email protected]> Co-authored-by: Pierre Lalet <[email protected]> Co-authored-by: Pierre Lorinquer <[email protected]> Co-authored-by: piersoh <[email protected]> Co-authored-by: plorinquer <[email protected]> Co-authored-by: pvinci <[email protected]> Co-authored-by: Rahul Jadhav <[email protected]> Co-authored-by: Robin Jarry <[email protected]> Co-authored-by: romain-perez <[email protected]> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <[email protected]> Co-authored-by: Sebastian Baar <[email protected]> Co-authored-by: sebastien mainand <[email protected]> Co-authored-by: smehner1 <[email protected]> Co-authored-by: speakinghedge <[email protected]> Co-authored-by: Steven Van Acker <[email protected]> Co-authored-by: Thomas Faivre <[email protected]> Co-authored-by: Tran Tien Dat <[email protected]> Co-authored-by: Wael Mahlous <[email protected]> Co-authored-by: waeva <[email protected]> Co-authored-by: Alexander Aring <[email protected]> Co-authored-by: Anmol Sarma <[email protected]> Co-authored-by: antoine.torre <[email protected]> Co-authored-by: Antoine Vacher <[email protected]> Co-authored-by: Arnaud Ebalard <[email protected]> Co-authored-by: atlowl <[email protected]> Co-authored-by: Brian Bienvenu <[email protected]> Co-authored-by: Chris Packham <[email protected]> Co-authored-by: CQ <[email protected]> Co-authored-by: Daniel Collins <[email protected]> Co-authored-by: Federico Maggi <[email protected]> Co-authored-by: Florian Maury <[email protected]> Co-authored-by: _Frky <[email protected]> Co-authored-by: g-mahieux <[email protected]> Co-authored-by: gpotter2 <[email protected]> Co-authored-by: Guillaume Valadon <[email protected]> Co-authored-by: Hao Zheng <[email protected]> Co-authored-by: Haresh Khandelwal <[email protected]> Co-authored-by: Harri Hämäläinen <[email protected]> Co-authored-by: hecke <[email protected]> Co-authored-by: Jan Romann <[email protected]> Co-authored-by: Jan Sebechlebsky <[email protected]> Co-authored-by: jdiog0 <[email protected]> Co-authored-by: jockque <[email protected]> Co-authored-by: Julien Bedel <[email protected]> Co-authored-by: Keith Scott <[email protected]> Co-authored-by: Kfir Gollan <[email protected]> Co-authored-by: Lars Munch <[email protected]> Co-authored-by: ldp77 <[email protected]> Co-authored-by: Leonard Crestez <[email protected]> Co-authored-by: Marcel Patzlaff <[email protected]> Co-authored-by: Martijn Thé <[email protected]> Co-authored-by: Martine Lenders <[email protected]> Co-authored-by: Michael Farrell <[email protected]> Co-authored-by: Michał Mirosław <[email protected]> Co-authored-by: mkaliszan <[email protected]> Co-authored-by: mtury <[email protected]> Co-authored-by: Neale Ranns <[email protected]> Co-authored-by: Octavian Toader <[email protected]> Co-authored-by: Peter Eisenlohr <[email protected]> Co-authored-by: Phil <[email protected]> Co-authored-by: Pierre Lalet <[email protected]> Co-authored-by: Pierre Lorinquer <[email protected]> Co-authored-by: piersoh <[email protected]> Co-authored-by: pvinci <[email protected]> Co-authored-by: Rahul Jadhav <[email protected]> Co-authored-by: Robin Jarry <[email protected]> Co-authored-by: romain-perez <[email protected]> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <[email protected]> Co-authored-by: Sebastian Baar <[email protected]> Co-authored-by: sebastien mainand <[email protected]> Co-authored-by: smehner1 <[email protected]> Co-authored-by: Steven Van Acker <[email protected]> Co-authored-by: Thomas Faivre <[email protected]> Co-authored-by: Tran Tien Dat <[email protected]> Co-authored-by: Wael Mahlous <[email protected]> Co-authored-by: waeva <[email protected]>
209,543
Python
13
0
76
def __len__(self): return sum(len(s) for s in self.shards)
https://github.com/ray-project/ray.git
2
34
3f03ef8ba8016b095c611c4d2e118771e4a750ca
8
distributed_learners.py
9
2
ray
__len__
22
rllib/agents/alpha_star/distributed_learners.py
8
[RLlib] AlphaStar: Parallelized, multi-agent/multi-GPU learning via league-based self-play. (#21356)
144,233
Python
6
0
20
def update_exe_pe_checksum(exe_path): import pefile # Compute checksum using our equivalent of the MapFileAndCheckSumW - for large files, it is significantly faster # than pure-pyton pefile.PE.generate_checksum(). However, it requires the file to be on disk (i.e., cannot operate # on a memory buffer). try: checksum = compute_exe_pe_checksum(exe_path) except Exception as e: raise RuntimeError("Failed to compute PE checksum!") from e # Update the checksum with pefile.PE(exe_path, fast_load=True) as pe: pe.OPTIONAL_HEADER.CheckSum = checksum # Generate updated EXE data data = pe.write() # Rewrite the exe with open(exe_path, 'wb') as fp: fp.write(data)
https://github.com/pyinstaller/pyinstaller.git
2
135
41483cb9e6d5086416c8fea6ad6781782c091c60
88
winutils.py
11
11
pyinstaller
update_exe_pe_checksum
163
PyInstaller/utils/win32/winutils.py
68
winutils: optimize PE headers fixup Attempt to optimize PE headers fix-up from both time- and memory- intensity perspective. First, avoid specifying `fast_load=False` in `pefile.PE` constructor, because that triggers the bytes statistics collection https://github.com/erocarrera/pefile/blob/v2022.5.30/pefile.py#L2862-L2876 which takes a long time for large files. Instead, we can obtain full headers (required for build timestamp modification) by calling `pe.full_load()` ourselves. Second, use (an equivalent of) `MapFileAndCheckSumW` to compute the PE checksum. For large files, it is orders of magnitude faster than its pure-python `pefile.PE.generate_checksum` counterpart. The downside is that `MapFileAndCheckSumW` requires an on-disk file as opposed to a memory buffer, so we need to split the PE headers fixup into two separate steps, with each modifying the corresponding PE headers and (re)writing the whole file. Even so, this brings the fix-up process for a 700MB executable down to seconds instead of minutes. In addition, as noted on MSDN, `MapFileAndCheckSumW` internally calls its ASCII variant (`MapFileAndCheckSumA`), so it cannot handle file paths that contain characters that are not representable in the current code page. Therefore, we implement our own equivalent using `ctypes` and pure widechar-based win32 API functions.
263,805
Python
17
0
72
def test_get_page_url_when_for_settings_fetched_via_for_site(self): self._create_importantpagessitesetting_object() settings = ImportantPagesSiteSetting.for_site(self.default_site) # Force site root paths query beforehand self.default_site.root_page._get_site_root_paths() for page_fk_field, expected_result in ( ("sign_up_page", "http://localhost/"), ("general_terms_page", "http://localhost/"), ("privacy_policy_page", "http://other/"), ): with self.subTest(page_fk_field=page_fk_field): # only the first request for each URL will trigger queries. # 2 are triggered instead of 1 here, because tests use the # database cache backed, and the cache is queried each time # to fetch site root paths (because there's no 'request' to # store them on) with self.assertNumQueries(2): self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called directly self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called indirectly via shortcut self.assertEqual( getattr(settings.page_url, page_fk_field), expected_result )
https://github.com/wagtail/wagtail.git
2
201
d967eccef28ce47f60d26be1c28f2d83a25f40b0
102
test_model.py
16
20
wagtail
test_get_page_url_when_for_settings_fetched_via_for_site
506
wagtail/contrib/settings/tests/site_specific/test_model.py
74
Add generic settings to compliment site-specific settings (#8327)
78,323
Python
17
0
115
def recursively_deserialize_keras_object(config, module_objects=None): if isinstance(config, dict): if 'class_name' in config: return generic_utils.deserialize_keras_object( config, module_objects=module_objects) else: return { key: recursively_deserialize_keras_object(config[key], module_objects) for key in config } elif isinstance(config, (tuple, list)): return [ recursively_deserialize_keras_object(x, module_objects) for x in config ] else: raise ValueError( f'Unable to decode Keras layer config. Config should be a dictionary, ' f'tuple or list. Received: config={config}')
https://github.com/keras-team/keras.git
6
142
e61cbc52fd3b0170769c120e9b8dabc8c4205322
58
load.py
15
18
keras
recursively_deserialize_keras_object
140
keras/saving/saved_model/load.py
48
Support Keras saving/loading for ShardedVariables with arbitrary partitions. PiperOrigin-RevId: 439837516
269,136
Python
12
0
89
def get_latest_device_activity(self, device_id, activity_types): if device_id not in self._latest_activities: return None latest_device_activities = self._latest_activities[device_id] latest_activity = None for activity_type in activity_types: if activity_type in latest_device_activities: if ( latest_activity is not None and latest_device_activities[activity_type].activity_start_time <= latest_activity.activity_start_time ): continue latest_activity = latest_device_activities[activity_type] return latest_activity
https://github.com/home-assistant/core.git
6
106
dadcc5ebcbcf951ff677568b281c5897d990c8ae
42
activity.py
14
15
core
get_latest_device_activity
227
homeassistant/components/august/activity.py
28
spelling: components/august (#64232) Co-authored-by: Josh Soref <[email protected]>
309,801
Python
9
0
69
def project_root() -> Path: return Path(os.path.dirname(__file__)).parent.parent
https://github.com/RasaHQ/rasa.git
1
40
9f634d248769198881bbb78ccd8d333982462ef5
6
prepare_nightly_release.py
12
3
rasa
project_root
12
scripts/prepare_nightly_release.py
6
[ATO-114]Add nightly workflows and creation scripts
159,623
Python
7
0
23
def add_device_change(self, user_id, device_ids, host): for device_id in device_ids: stream_id = self.get_success( self.store.add_device_change_to_streams( "user_id", [device_id], ["!some:room"] ) ) self.get_success( self.store.add_device_list_outbound_pokes( user_id=user_id, device_id=device_id, room_id="!some:room", stream_id=stream_id, hosts=[host], context={}, ) )
https://github.com/matrix-org/synapse.git
2
121
aa2811026402394b4013033f075d8f509cdc1257
28
test_devices.py
14
17
synapse
add_device_change
279
tests/storage/test_devices.py
24
Process device list updates asynchronously (#12365)
248,026
Python
14
0
79
def self_check() -> None: # Verify all supported Python versions have a coverage version. for version in SUPPORTED_PYTHON_VERSIONS: get_coverage_version(version) # Verify all controller Python versions are mapped to the latest coverage version. for version in CONTROLLER_PYTHON_VERSIONS: if get_coverage_version(version) != CONTROLLER_COVERAGE_VERSION: raise InternalError(f'Controller Python version {version} is not mapped to the latest coverage version.') self_check()
https://github.com/ansible/ansible.git
4
71
b9606417598217106e394c12c776d8c5ede9cd98
54
coverage_util.py
13
7
ansible
self_check
93
test/lib/ansible_test/_internal/coverage_util.py
35
ansible-test - Support multiple coverage versions. ci_complete ci_coverage
267,050
Python
7
0
35
def predict(self, X): if self.weights == "uniform": # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if _y.ndim == 1: _y = _y.reshape((-1, 1)) if weights is None: y_pred = np.mean(_y[neigh_ind], axis=1) else: y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64) denom = np.sum(weights, axis=1) for j in range(_y.shape[1]): num = np.sum(_y[neigh_ind, j] * weights, axis=1) y_pred[:, j] = num / denom if self._y.ndim == 1: y_pred = y_pred.ravel() return y_pred
https://github.com/scikit-learn/scikit-learn.git
6
310
fb082b223dc9f1dd327f48dc9b830ee382d6f661
99
_regression.py
15
21
scikit-learn
predict
320
sklearn/neighbors/_regression.py
65
MAINT Do not compute distances for uniform weighting (#22280)
258,546
Python
26
0
199
def getImageDescriptor(self, im, xy=None): # Defaule use full image and place at upper left if xy is None: xy = (0, 0) # Image separator, bb = b"\x2C" # Image position and size bb += int2long(xy[0]) # Left position bb += int2long(xy[1]) # Top position bb += int2long(im.size[0]) # image width bb += int2long(im.size[1]) # image height # packed field: local color table flag1, interlace0, sorted table0, # reserved00, lct size111=7=2^(7+1)=256. bb += b"\x87" # LZW minimum size code now comes later, # begining of [image data] blocks return bb
https://github.com/thumbor/thumbor.git
2
130
3c745ef193e9af9244cc406734e67815377472ed
90
pil.py
10
10
thumbor
getImageDescriptor
217
thumbor/engines/extensions/pil.py
61
Reformat of files using black These files were not properly formatted.
190,825
Python
7
0
74
def test_calculate_scores_one_dim_with_scale(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = keras.layers.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) attention_layer.scale = -2.0 actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = -2*1.1*1.6 = -3.52 expected = np.array([[[-3.52]]], dtype=np.float32) self.assertAllClose(expected, actual)
https://github.com/keras-team/keras.git
1
203
84afc5193d38057e2e2badf9c889ea87d80d8fbf
62
attention_test.py
12
9
keras
test_calculate_scores_one_dim_with_scale
153
keras/layers/attention/attention_test.py
36
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
272,348
Python
22
0
139
def prepare_all_coins_df() -> pd.DataFrame: gecko_coins_df = load_coins_list("coingecko_coins.json") paprika_coins_df = load_coins_list("coinpaprika_coins.json") paprika_coins_df = paprika_coins_df[paprika_coins_df["is_active"]] paprika_coins_df = paprika_coins_df[["rank", "id", "name", "symbol", "type"]] # TODO: Think about scheduled job, that once a day will update data binance_coins_df = load_binance_map().rename(columns={"symbol": "Binance"}) coinbase_coins_df = load_coinbase_map().rename(columns={"symbol": "Coinbase"}) gecko_paprika_coins_df = pd.merge( gecko_coins_df, paprika_coins_df, on="name", how="left" ) df_merged = pd.merge( left=gecko_paprika_coins_df, right=binance_coins_df, left_on="id_x", right_on="id", how="left", ) df_merged.rename( columns={ "id_x": "CoinGecko", "symbol_x": "Symbol", "id_y": "CoinPaprika", }, inplace=True, ) df_merged = pd.merge( left=df_merged, right=coinbase_coins_df, left_on="CoinGecko", right_on="id", how="left", ) return df_merged[["CoinGecko", "CoinPaprika", "Binance", "Coinbase", "Symbol"]]
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
339
ea964109d654394cc0a5237e6ec5510ba6404097
84
cryptocurrency_helpers.py
12
48
OpenBBTerminal
prepare_all_coins_df
266
gamestonk_terminal/cryptocurrency/cryptocurrency_helpers.py
65
Crypto menu refactor (#1119) * enabled some crypto commands in dd to be called independent of source loaded * support for coin_map_df in all dd functions + load ta and plot chart refactor * updated tests and removed coingecko scrapping where possible * removed ref of command from hugo * updated pycoingecko version * refactoring load * refactored load to fetch prices; pred can run independent of source now * load by default usd on cp/cg and usdt on cb/bin * updated to rich for formatting and updated dependencies * fixed changes requested * update docs * revert discord requirements * removed absolute from calculate change for price * fixing pr issues * fix loading issue when similar coins exist, move coins to home, fill n/a * update docs for coins * adds load to ta and pred menu
281,114
Python
22
0
191
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, NystromformerEncoder): module.gradient_checkpointing = value NYSTROMFORMER_START_DOCSTRING = r NYSTROMFORMER_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare Nyströmformer Model transformer outputting raw hidden-states without any specific head on top.", NYSTROMFORMER_START_DOCSTRING, )
https://github.com/huggingface/transformers.git
2
64
28e091430eea9e0d40839e56fd0d57aec262f5f9
@add_start_docstrings( "The bare Nyströmformer Model transformer outputting raw hidden-states without any specific head on top.", NYSTROMFORMER_START_DOCSTRING, )
33
modeling_nystromformer.py
9
3
transformers
_set_gradient_checkpointing
52
src/transformers/models/nystromformer/modeling_nystromformer.py
30
Add Nystromformer (#14659) * Initial commit * Config and modelling changes Added Nystromformer-specific attributes to config and removed all decoder functionality from modelling. * Modelling and test changes Added Nystrom approximation and removed decoder tests. * Code quality fixes * Modeling changes and conversion script Initial commits to conversion script, modeling changes. * Minor modeling changes and conversion script * Modeling changes * Correct modeling, add tests and documentation * Code refactor * Remove tokenizers * Code refactor * Update __init__.py * Fix bugs * Update src/transformers/__init__.py Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/__init__.py Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/nystromformer/__init__.py Co-authored-by: NielsRogge <[email protected]> * Update docs/source/model_doc/nystromformer.mdx Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <[email protected]> * Update modeling and test_modeling * Code refactor * .rst to .mdx * doc changes * Doc changes * Update modeling_nystromformer.py * Doc changes * Fix copies * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * Update configuration_nystromformer.py * Fix copies * Update tests/test_modeling_nystromformer.py Co-authored-by: NielsRogge <[email protected]> * Update test_modeling_nystromformer.py * Apply suggestions from code review Co-authored-by: Lysandre Debut <[email protected]> * Fix code style * Update modeling_nystromformer.py * Update modeling_nystromformer.py * Fix code style * Reformat modeling file * Update modeling_nystromformer.py * Modify NystromformerForMultipleChoice * Fix code quality * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Code style changes and torch.no_grad() * make style * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
34,009
Python
10
1
24
def bcoo_dot_general_sampled(A, B, indices, *, dimension_numbers): (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers cdims = (api_util._ensure_index_tuple(lhs_contract), api_util._ensure_index_tuple(rhs_contract)) bdims = (api_util._ensure_index_tuple(lhs_batch), api_util._ensure_index_tuple(rhs_batch)) return bcoo_dot_general_sampled_p.bind(A, B, indices, dimension_numbers=(cdims, bdims)) @bcoo_dot_general_sampled_p.def_impl
https://github.com/google/jax.git
1
124
3184dd65a222354bffa2466d9a375162f5649132
@bcoo_dot_general_sampled_p.def_impl
27
bcoo.py
9
8
jax
bcoo_dot_general_sampled
91
jax/experimental/sparse/bcoo.py
23
[sparse] Update docstrings for bcoo primitives. PiperOrigin-RevId: 438685829
119,984
Python
16
1
80
def _get_instance_id(from_dict, new_id, default=''): instance_id = default for key in new_id.split('.'): if not hasattr(from_dict, 'get'): instance_id = default break instance_id = from_dict.get(key, default) from_dict = instance_id return smart_str(instance_id)
https://github.com/ansible/awx.git
3
95
a3a216f91f1158fd54c001c34cbdf2f68ccbc272
28
_inventory_source.py
11
9
awx
_get_instance_id
83
awx/main/migrations/_inventory_source.py
21
Fix up new Django 3.0 deprecations Mostly text based: force/smart_text, ugettext_*
80,736
Python
10
0
56
def compile_sample(self, batch_size, samples=None, images=None, masks=None): num_images = self._config.get("preview_images", 14) num_images = min(batch_size, num_images) if batch_size is not None else num_images retval = {} for side in ("a", "b"): logger.debug("Compiling samples: (side: '%s', samples: %s)", side, num_images) side_images = images[side] if images is not None else self._target[side] side_masks = masks[side] if masks is not None else self._masks[side] side_samples = samples[side] if samples is not None else self._samples[side] retval[side] = [side_samples[0:num_images], side_images[0:num_images], side_masks[0:num_images]] return retval
https://github.com/deepfakes/faceswap.git
6
225
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
74
_base.py
11
13
faceswap
compile_sample
225
plugins/train/trainer/_base.py
48
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
100,396
Python
20
0
153
def temperature(self) -> float | None: return self._attr_temperature
https://github.com/home-assistant/core.git
1
25
90e1fb6ce2faadb9a35fdbe1774fce7b4456364f
8
__init__.py
6
6
core
temperature
22
homeassistant/components/weather/__init__.py
8
Weather unit conversion (#73441) Co-authored-by: Erik <[email protected]>
314,211
Python
4
0
14
def get_time(self) -> float: return self._get_time()
https://github.com/pypa/pipenv.git
1
26
f3166e673fe8d40277b804d35d77dcdb760fc3b3
6
progress.py
7
3
pipenv
get_time
20
pipenv/patched/notpip/_vendor/rich/progress.py
6
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
20,801
Python
4
0
14
def test_add_unstyled_rows_to_styled_rows(self, st_element, get_proto): df1 = pd.DataFrame([5, 6]) df2 = pd.DataFrame([7, 8]) css_values = [ {css_s("color", "black")}, {css_s("color", "black")}, set(), set(), ] x = st_element(df1.style.applymap(lambda val: "color: black")) x._legacy_add_rows(df2) proto_df = get_proto(self._get_element()) self._assert_column_css_styles(proto_df, 0, css_values)
https://github.com/streamlit/streamlit.git
1
173
2c153aa179a27539f856e389870161d5a58da213
35
legacy_dataframe_styling_test.py
12
13
streamlit
test_add_unstyled_rows_to_styled_rows
142
lib/tests/streamlit/legacy_dataframe_styling_test.py
28
Pandas 1.4 styler fix (#4316) Change the way we detect custom styling in a DataFrame, to account for changes in Pandas 1.4. Our DataFrame styling support is based on internal Pandas APIs, so they're always subject to change out from underneath us. In general, we'd prefer to only pass `display_value` data to the frontend when a DataFrame cell has been custom-formatted by the user, to save on bandwidth. However, Panda's Styler's internals are private, and it doesn't give us a consistent way of testing whether a cell has a custom `display_value` or not. Prior to Pandas 1.4, we could test whether a cell's `display_value` differed from its `value`, and only stick the `display_value` in the protobuf when that was the case. In 1.4, an unmodified Styler will contain `display_value` strings for all cells, regardless of whether any formatting has been applied to that cell, so we no longer have this ability (or at least I couldn't figure out a reasonable way to test for this). So instead, as of this PR, calling `st._legacy_dataframe(df.styler)` will *always* result in `display_value` strings being written to the dataframe protobuf (even though there isn't any custom formatting). This means that styled DataFrames may result in more data being sent to the frontend now than was the case before. In practice, I don't think this is a big deal - only the legacy DataFrame code has styling support; and often, if you're styling a DataFrame, you're customizing the formatting on most or all of its cells anyway. I also made a number of small type-safety changes as I was working with the dataframe code, and those are all in the PR as well. (I've left a PR comment under the actual logic changes.)
118,718
Python
19
0
106
def handle_error_code(requests_obj, error_code_map): for error_code, error_msg in error_code_map.items(): if requests_obj.status_code == error_code: console.print(error_msg)
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
53
401e4c739a6f9d18944e0ab49c782e97b56fda94
13
helper_funcs.py
11
4
OpenBBTerminal
handle_error_code
37
gamestonk_terminal/helper_funcs.py
13
Output Missing API Key Message to Console (#1357) * Decorator to output error msg to console of missing API Key * Refactor FMP & alpha advantage * Refactor FRED & QUANDL * Refactor Polygon * Refactor FRED * Refactor FRED * Refactor Finnhub & coinmarketcap & Newsapi * Allow disabling of check api * Updating tests : disable check api for tests * Refactor Finnhub & SI & Binance * Fix linting * Fix test & add black formatting * Fix test failing * Fix test failing * Refactor CryptoPanic & Whales alert & Glassnode & Coinglass * Refactor ETHexplorer & Smartstake & Alpha Advanage & Coinbase * Add decorators to controllers * Fix test & Refactor Coinbase, RH, Reddit * Add contributing guideline * Update CONTRIBUTING.md * Update CONTRIBUTING.md * fix tests * add decorator to snews cmd Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: didierlopes.eth <[email protected]>
282,770
Python
9
0
32
def detect(byte_str): if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytes): raise TypeError( f"Expected object of type bytes or bytearray, got: {type(byte_str)}" ) byte_str = bytearray(byte_str) detector = UniversalDetector() detector.feed(byte_str) return detector.close()
https://github.com/pypa/pipenv.git
3
99
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
31
__init__.py
15
10
pipenv
detect
97
pipenv/patched/pip/_vendor/chardet/__init__.py
27
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
21,882
Python
11
0
53
def delete_file(self, filename=None, path=None, report_error=False): if filename is not None or path is not None or (filename is None and path is None): self.set_location(filename=filename, path=path) try: os.remove(self.full_filename) except Exception as e: if report_error: _error_popup_with_traceback('UserSettings delete_file warning ***', 'Exception trying to perform os.remove', e) self.dict = {}
https://github.com/PySimpleGUI/PySimpleGUI.git
7
133
f776589349476a41b98aa1f467aff2f30e2a8fc2
46
PySimpleGUI.py
13
9
PySimpleGUI
delete_file
129
PySimpleGUI.py
37
Added report_error setting for user_settings_delete_file. Global Settings window complete rework to use Tabs. Hoping nothing broke, but just remember things are in flux for a little bit while the ttk scrollbars are finishing up
212,923
Python
13
0
83
def compose(base_map, next_map): ax1, a1, b1 = base_map ax2, a2, b2 = next_map if ax1 is None: ax = ax2 elif ax2 is None or ax1 == ax2: ax = ax1 else: raise AxisMismatchException return ax, a1 * a2, a1 * b2 + b1
https://github.com/jindongwang/transferlearning.git
4
91
cc4d0564756ca067516f71718a3d135996525909
44
coord_map.py
9
10
transferlearning
compose
86
code/deep/BJMMD/caffe/python/caffe/coord_map.py
31
Balanced joint maximum mean discrepancy for deep transfer learning
60,235
Python
11
0
58
def is_doc_id_none(self) -> bool: return self.doc_id is None @dataclass
https://github.com/jerryjliu/llama_index.git
1
29
c22d865acb3899a181921d94b6e94e665a12b432
@dataclass
9
schema.py
7
3
llama_index
is_doc_id_none
22
gpt_index/schema.py
9
Add index composability! (#86) Summary of changes - Bumped version to 0.1.0 - Abstracted out a BaseDocument class that both Document (from data loaders) and IndexStruct (our data struct classes) inherit from. - Add a DocumentStore that contains the id's of all BaseDocuments. Both Document objects and IndexStruct objects are registered in here, allowing us to recursively fetch and query sub-index structures within an index structure. - Add a reference document id to each Node class. This allows us to recursively query within another index struct after we traverse a node, if the reference document id of that node corresponds to another index struct in the DocumentStore. - Use Node as the central abstraction containing both "text" as well as a reference document_id: use for List, Tree, KeywordTable - Factored out a QueryRunner to recursively run queries. I grappled with some circular dependency issues but I believe the current approach works. - Add a bunch of unit tests Co-authored-by: Jerry Liu <[email protected]>
225,809
Python
5
1
14
async def test_setup_not_ready(hass, ialarmxr_api, mock_config_entry): ialarmxr_api.return_value.get_mac = Mock(side_effect=ConnectionError) mock_config_entry.add_to_hass(hass) assert not await hass.config_entries.async_setup(mock_config_entry.entry_id) await hass.async_block_till_done() assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY
https://github.com/home-assistant/core.git
1
91
42c80dda85f567192c182da2b4c603408a890381
19
test_init.py
10
6
core
test_setup_not_ready
37
tests/components/ialarm_xr/test_init.py
17
Create iAlarmXR integration (#67817) * Creating iAlarmXR integration * fixing after review code * fixing remaining review hints * fixing remaining review hints * updating underlying pyialarm library * Creating iAlarmXR integration * fixing after review code * fixing remaining review hints * fixing remaining review hints * updating underlying pyialarm library * fixing after iMicknl review * Improving exception handling * Updating pyialarmxr library * fixing after merge dev * fixing after iMicknl review * Update CODEOWNERS Co-authored-by: Ludovico de Nittis <[email protected]> * fixing iot_class * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <[email protected]> * fixing after bdraco review * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <[email protected]> * reverting catching exception in setup step * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <[email protected]> * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <[email protected]> * fixing after bdraco suggestions * Update homeassistant/components/ialarmxr/alarm_control_panel.py Co-authored-by: J. Nick Koston <[email protected]> * Update homeassistant/components/ialarmxr/alarm_control_panel.py Co-authored-by: Mick Vleeshouwer <[email protected]> * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <[email protected]> * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <[email protected]> * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <[email protected]> * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <[email protected]> * Update homeassistant/components/ialarmxr/utils.py Co-authored-by: J. Nick Koston <[email protected]> * regenerate translation and rename function to async_get_ialarmxr_mac * removing and collapsing unused error messages * fixing tests * improve code coverage in tests * improve code coverage in tests * improve code coverage in tests * fixing retry policy with new pyalarmxr library * snake case fix * renaming integration in ialarm_xr * renaming control panel name Co-authored-by: Ludovico de Nittis <[email protected]> Co-authored-by: J. Nick Koston <[email protected]> Co-authored-by: Mick Vleeshouwer <[email protected]>
301,395
Python
17
0
55
def collect_qtqml_files(self): # No-op if requested Qt-based package is not available. if self.version is None: return [], [] # Not all PyQt5/PySide2 installs have QML files. In this case, location['Qml2ImportsPath'] is empty. # Furthermore, even if location path is provided, the directory itself may not exist. # # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359735031 # https://github.com/pyinstaller/pyinstaller/issues/3864 # # In Qt 6, Qml2ImportsPath was deprecated in favor of QmlImportsPath. The former is not available in PySide6 # 6.4.0 anymore (but is in PyQt6 6.4.0). Use the new QmlImportsPath if available. if 'QmlImportsPath' in self.location: qml_src_dir = self.location['QmlImportsPath'] else: qml_src_dir = self.location['Qml2ImportsPath'] if not qml_src_dir or not os.path.isdir(qml_src_dir): logger.warning('%s: QML directory %r does not exist. QML files not packaged.', self, qml_src_dir) return [], [] qml_dst_dir = os.path.join(self.qt_rel_dir, 'qml') datas = [(qml_src_dir, qml_dst_dir)] binaries = [ # Produce ``/path/to/Qt/Qml/path_to_qml_binary/qml_binary, PyQt5/Qt/Qml/path_to_qml_binary``. ( qml_plugin_file, os.path.join(qml_dst_dir, os.path.dirname(os.path.relpath(qml_plugin_file, qml_src_dir))) ) for qml_plugin_file in misc.dlls_in_subdirs(qml_src_dir) ] return binaries, datas
https://github.com/pyinstaller/pyinstaller.git
6
243
d789a7daa7712716c89259b987349917a89aece7
146
__init__.py
15
19
pyinstaller
collect_qtqml_files
397
PyInstaller/utils/hooks/qt/__init__.py
99
hookutils: reorganize the Qt hook utilities Reorganize the Qt module information to provide information necessary to deal with variations between different python Qt bindings (PySide2, PyQt5, PySide6, and PyQt6). Replace the existing table-like dictionary with list of entries, which is easier to format and document. From this list, we now generate two dictionaries; one that maps Qt module (shared library) names to the module info entries (the same role as the old dictionary), and one that maps python module names to the module info entries. The latter is necessary to accommodate python modules that do not have corresponding Qt shared libraries (header-only Qt modules, such as QtAxContainer; or statically-linked module, such as QSci), but we still need to provide information about plugins or translation files. The new information list is based on manual inspection of source code for Qt 5.15 and 6.3, and should provide comprehensive information about all plugin names and translation file basenames. In addition, most of the helper functions, which take a reference to the `QtLibraryInfo` class as their first argument, have been turned into methods of the `QtLibraryInfo` class. The corresponding hooks have also been adjusted.
264,031
Python
20
0
144
def test_patricks_move(self): self.assertEqual(self.pg.node.parent, self.pe.node) # perform moves under slave... self.move_page(self.pg, self.pc) self.reload_pages() # page is now under PC self.assertEqual(self.pg.node.parent, self.pc.node) self.assertEqual(self.pg.get_absolute_url(), self.pg.publisher_public.get_absolute_url()) self.move_page(self.pe, self.pg) self.reload_pages() self.assertEqual(self.pe.node.parent, self.pg.node) self.ph = self.ph.reload() # check urls - they should stay be the same now after the move self.assertEqual( self.pg.publisher_public.get_absolute_url(), self.pg.get_absolute_url() ) self.assertEqual( self.ph.publisher_public.get_absolute_url(), self.ph.get_absolute_url() ) # check if urls are correct after move self.assertEqual( self.pg.publisher_public.get_absolute_url(), '%smaster/slave-home/pc/pg/' % self.get_pages_root() ) self.assertEqual( self.ph.publisher_public.get_absolute_url(), '%smaster/slave-home/pc/pg/pe/ph/' % self.get_pages_root() )
https://github.com/django-cms/django-cms.git
1
356
c1290c9ff89cb00caa5469129fd527e9d82cd820
72
test_permmod.py
11
26
django-cms
test_patricks_move
314
cms/tests/test_permmod.py
50
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <[email protected]> * ci: codespell config taken from #7292
82,418
Python
15
0
215
async def test_text_new_min_max_pattern(hass): text = MockTextEntity(native_min=-1, native_max=500, pattern=r"[a-z]") text.hass = hass assert text.capability_attributes == { ATTR_MIN: 0, ATTR_MAX: MAX_LENGTH_STATE_STATE, ATTR_MODE: TextMode.TEXT, ATTR_PATTERN: r"[a-z]", }
https://github.com/home-assistant/core.git
1
85
003e4224c89a6da381960dc5347750d1521d85c9
24
test_init.py
10
9
core
test_text_new_min_max_pattern
67
tests/components/text/test_init.py
23
Add `text` platform (#79454) Co-authored-by: Franck Nijhof <[email protected]> Co-authored-by: Franck Nijhof <[email protected]>
291,315
Python
15
0
55
def load_dataset(verbose=False, remove=()): data_train = fetch_20newsgroups( subset="train", categories=categories, shuffle=True, random_state=42, remove=remove, ) data_test = fetch_20newsgroups( subset="test", categories=categories, shuffle=True, random_state=42, remove=remove, ) # order of labels in `target_names` can be different from `categories` target_names = data_train.target_names # split target in a training set and a test set y_train, y_test = data_train.target, data_test.target # Extracting features from the training data using a sparse vectorizer t0 = time() vectorizer = TfidfVectorizer( sublinear_tf=True, max_df=0.5, min_df=5, stop_words="english" ) X_train = vectorizer.fit_transform(data_train.data) duration_train = time() - t0 # Extracting features from the test data using the same vectorizer t0 = time() X_test = vectorizer.transform(data_test.data) duration_test = time() - t0 feature_names = vectorizer.get_feature_names_out() if verbose: # compute size of loaded data data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print( f"{len(data_train.data)} documents - " f"{data_train_size_mb:.2f}MB (training set)" ) print(f"{len(data_test.data)} documents - {data_test_size_mb:.2f}MB (test set)") print(f"{len(target_names)} categories") print( f"vectorize training done in {duration_train:.3f}s " f"at {data_train_size_mb / duration_train:.3f}MB/s" ) print(f"n_samples: {X_train.shape[0]}, n_features: {X_train.shape[1]}") print( f"vectorize testing done in {duration_test:.3f}s " f"at {data_test_size_mb / duration_test:.3f}MB/s" ) print(f"n_samples: {X_test.shape[0]}, n_features: {X_test.shape[1]}") return X_train, X_test, y_train, y_test, feature_names, target_names # %% # Compare feature effects # ----------------------- # We train a first classification model without attempting to strip the metadata # of the dataset. X_train, X_test, y_train, y_test, feature_names, target_names = load_dataset( verbose=True ) # %% # Our first model is an instance of the # :class:`~sklearn.linear_model.RidgeClassifier` class. This is a linear # classification model that uses the mean squared error on {-1, 1} encoded # targets, one for each possible class. Contrary to # :class:`~sklearn.linear_model.LogisticRegression`, # :class:`~sklearn.linear_model.RidgeClassifier` does not # provide probabilistic predictions (no `predict_proba` method), # but it is often faster to train. from sklearn.linear_model import RidgeClassifier clf = RidgeClassifier(tol=1e-2, solver="sparse_cg") clf.fit(X_train, y_train) pred = clf.predict(X_test) # %% # We plot the confusion matrix of this classifier to find if there is a pattern # in the classification errors. import matplotlib.pyplot as plt from sklearn.metrics import ConfusionMatrixDisplay fig, ax = plt.subplots(figsize=(10, 5)) ConfusionMatrixDisplay.from_predictions(y_test, pred, ax=ax) ax.xaxis.set_ticklabels(target_names) ax.yaxis.set_ticklabels(target_names) _ = ax.set_title( f"Confusion Matrix for {clf.__class__.__name__}\non the original documents" ) # %% # The confusion matrix highlights that documents of the `alt.atheism` class are # often confused with documents with the class `talk.religion.misc` class and # vice-versa which is expected since the topics are semantically related. # # We also observe that some documents of the `sci.space` class can be misclassified as # `comp.graphics` while the converse is much rarer. A manual inspection of those # badly classified documents would be required to get some insights on this # asymmetry. It could be the case that the vocabulary of the space topic could # be more specific than the vocabulary for computer graphics. # # We can gain a deeper understanding of how this classifier makes its decisions # by looking at the words with the highest average feature effects: import pandas as pd import numpy as np
https://github.com/scikit-learn/scikit-learn.git
2
713
71028322e8964cf1f341a7b293abaefeb5275e12
475
plot_document_classification_20newsgroups.py
15
48
scikit-learn
load_dataset
735
examples/text/plot_document_classification_20newsgroups.py
266
DOC rework plot_document_classification_20newsgroups.py example (#22928) Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
260,017
Python
67
0
224
def apply(self, func, *args, **kwargs): logger = get_logger() logger.debug(f"ENTER::Partition.apply::{self._identity}") data = self._data call_queue = self.call_queue + [[func, args, kwargs]] if len(call_queue) > 1: logger.debug(f"SUBMIT::_apply_list_of_funcs::{self._identity}") result, length, width, ip = _apply_list_of_funcs.remote(call_queue, data) else: # We handle `len(call_queue) == 1` in a different way because # this dramatically improves performance. result, length, width, ip = _apply_func.remote(data, func, *args, **kwargs) logger.debug(f"SUBMIT::_apply_func::{self._identity}") logger.debug(f"EXIT::Partition.apply::{self._identity}") return PandasOnUnidistDataframePartition(result, length, width, ip)
https://github.com/modin-project/modin.git
2
222
193505fdf0c984743397ba3df56262f30aee13a8
64
partition.py
13
13
modin
apply
193
modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py
51
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <[email protected]>
155,176
Python
21
0
126
def test_roundtrip_nullable_dtypes(tmp_path, write_engine, read_engine): if read_engine == "fastparquet" or write_engine == "fastparquet": pytest.xfail("https://github.com/dask/fastparquet/issues/465") df = pd.DataFrame( { "a": pd.Series([1, 2, pd.NA, 3, 4], dtype="Int64"), "b": pd.Series([True, pd.NA, False, True, False], dtype="boolean"), "c": pd.Series([0.1, 0.2, 0.3, pd.NA, 0.4], dtype="Float64"), "d": pd.Series(["a", "b", "c", "d", pd.NA], dtype="string"), } ) ddf = dd.from_pandas(df, npartitions=2) ddf.to_parquet(tmp_path, engine=write_engine) ddf2 = dd.read_parquet(tmp_path, engine=read_engine) assert_eq(df, ddf2) @PYARROW_MARK
https://github.com/dask/dask.git
3
278
b1e468e8645baee30992fbfa84250d816ac1098a
@PYARROW_MARK
60
test_parquet.py
14
15
dask
test_roundtrip_nullable_dtypes
148
dask/dataframe/io/tests/test_parquet.py
55
Add support for `use_nullable_dtypes` to `dd.read_parquet` (#9617)
157,201
Python
22
1
182
def async_update_group_state(self) -> None: self._attr_assumed_state = False states = [ state for entity_id in self._entities if (state := self.hass.states.get(entity_id)) is not None ] self._attr_assumed_state |= not states_equal(states) # Set group as unavailable if all members are unavailable or missing self._attr_available = any(state.state != STATE_UNAVAILABLE for state in states) valid_state = any( state.state not in (STATE_UNKNOWN, STATE_UNAVAILABLE) for state in states ) if not valid_state: # Set as unknown if all members are unknown or unavailable self._is_on = None else: # Set as ON if any member is ON self._is_on = any(state.state == STATE_ON for state in states) percentage_states = self._async_states_by_support_flag( FanEntityFeature.SET_SPEED ) self._percentage = reduce_attribute(percentage_states, ATTR_PERCENTAGE) self._attr_assumed_state |= not attribute_equal( percentage_states, ATTR_PERCENTAGE ) if ( percentage_states and percentage_states[0].attributes.get(ATTR_PERCENTAGE_STEP) and attribute_equal(percentage_states, ATTR_PERCENTAGE_STEP) ): self._speed_count = ( round(100 / percentage_states[0].attributes[ATTR_PERCENTAGE_STEP]) or 100 ) else: self._speed_count = 100 self._set_attr_most_frequent( "_oscillating", FanEntityFeature.OSCILLATE, ATTR_OSCILLATING ) self._set_attr_most_frequent( "_direction", FanEntityFeature.DIRECTION, ATTR_DIRECTION ) self._attr_supported_features = reduce( ior, [feature for feature in SUPPORTED_FLAGS if self._fans[feature]], 0 ) self._attr_assumed_state |= any( state.attributes.get(ATTR_ASSUMED_STATE) for state in states )
https://github.com/home-assistant/core.git
14
410
38a8e86ddeb65ee8c731b90a7063a3b3702dc1ef
167
fan.py
16
47
core
async_update_group_state
606
homeassistant/components/group/fan.py
90
Cleanup supported_features in group (#82242) * Cleanup supported_features in group * Remove defaults (already set to 0 in fan and media_player)
290,831
Python
41
0
265
def predict(self, X): check_is_fitted(self) X = self._validate_data(X, accept_sparse="csr", reset=False) return self.classes_[ pairwise_distances_argmin(X, self.centroids_, metric=self.metric) ]
https://github.com/scikit-learn/scikit-learn.git
1
75
e01035d3b2dc147cbbe9f6dbd7210a76119991e8
15
_nearest_centroid.py
10
6
scikit-learn
predict
61
sklearn/neighbors/_nearest_centroid.py
15
OPTIM use pairwise_distances_argmin in NearestCentroid.predict (#24645) Co-authored-by: Julien Jerphanion <[email protected]>
261,351
Python
11
0
48
def _suplabels(self, t, info, **kwargs): suplab = getattr(self, info['name']) x = kwargs.pop('x', None) y = kwargs.pop('y', None) if info['name'] in ['_supxlabel', '_suptitle']: autopos = y is None elif info['name'] == '_supylabel': autopos = x is None if x is None: x = info['x0'] if y is None: y = info['y0'] if 'horizontalalignment' not in kwargs and 'ha' not in kwargs: kwargs['horizontalalignment'] = info['ha'] if 'verticalalignment' not in kwargs and 'va' not in kwargs: kwargs['verticalalignment'] = info['va'] if 'rotation' not in kwargs: kwargs['rotation'] = info['rotation'] if 'fontproperties' not in kwargs: if 'fontsize' not in kwargs and 'size' not in kwargs: kwargs['size'] = mpl.rcParams[info['size']] if 'fontweight' not in kwargs and 'weight' not in kwargs: kwargs['weight'] = mpl.rcParams[info['weight']] sup = self.text(x, y, t, **kwargs) if suplab is not None: suplab.set_text(t) suplab.set_position((x, y)) suplab.update_from(sup) sup.remove() else: suplab = sup suplab._autopos = autopos setattr(self, info['name'], suplab) self.stale = True return suplab
https://github.com/matplotlib/matplotlib.git
16
487
eeac402ec56d7e69234e0cd7b15f59d53852e457
146
figure.py
13
35
matplotlib
_suplabels
463
lib/matplotlib/figure.py
69
Add rcparam for figure label size and weight (#22566) * Add rcparam for figure label size and weight
109,149
Python
22
0
283
def test_backfill_execute_subdag_with_removed_task(self): dag = self.dagbag.get_dag('example_subdag_operator') subdag = dag.get_task('section-1').subdag session = settings.Session() executor = MockExecutor() job = BackfillJob( dag=subdag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True ) dr = DagRun( dag_id=subdag.dag_id, execution_date=DEFAULT_DATE, run_id="test", run_type=DagRunType.BACKFILL_JOB ) session.add(dr) removed_task_ti = TI( task=EmptyOperator(task_id='removed_task'), run_id=dr.run_id, state=State.REMOVED ) removed_task_ti.dag_id = subdag.dag_id dr.task_instances.append(removed_task_ti) session.commit() with timeout(seconds=30): job.run() for task in subdag.tasks: instance = ( session.query(TI) .filter( TI.dag_id == subdag.dag_id, TI.task_id == task.task_id, TI.execution_date == DEFAULT_DATE ) .first() ) assert instance is not None assert instance.state == State.SUCCESS removed_task_ti.refresh_from_db() assert removed_task_ti.state == State.REMOVED subdag.clear() dag.clear()
https://github.com/apache/airflow.git
2
372
49e336ae0302b386a2f47269a6d13988382d975f
84
test_backfill_job.py
15
34
airflow
test_backfill_execute_subdag_with_removed_task
398
tests/jobs/test_backfill_job.py
65
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
47,465
Python
49
0
232
def __call__(self, mask_out, bboxes, bbox_num, origin_shape): num_mask = mask_out.shape[0] origin_shape = paddle.cast(origin_shape, 'int32') # TODO: support bs > 1 and mask output dtype is bool pred_result = paddle.zeros( [num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32') im_h, im_w = origin_shape[0][0], origin_shape[0][1] pred_mask = self.paste_mask(mask_out[:, None, :, :], bboxes[:, 2:], im_h, im_w) pred_mask = pred_mask >= self.binary_thresh pred_result = paddle.cast(pred_mask, 'int32') return pred_result @register
https://github.com/PaddlePaddle/PaddleDetection.git
1
197
afb3b7a1c7842921b8eacae9d2ac4f2e660ea7e1
@register
59
post_process.py
11
11
PaddleDetection
__call__
174
ppdet/modeling/post_process.py
46
Remove conditional block in RCNN export onnx (#5371) * support rcnn onnx * clean code * update cascade rcnn * add todo for rpn proposals
210,267
Python
19
1
129
def context_stub(cls): context = { 'job': { 'allow_simultaneous': False, 'artifacts': {}, 'controller_node': 'foo_controller', 'created': datetime.datetime(2018, 11, 13, 6, 4, 0, 0, tzinfo=datetime.timezone.utc), 'custom_virtualenv': 'my_venv', 'description': 'Sample job description', 'diff_mode': False, 'elapsed': 0.403018, 'execution_node': 'awx', 'failed': False, 'finished': False, 'force_handlers': False, 'forks': 0, 'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0, 'failed': False, 'processed': 0, 'rescued': 0}, 'id': 42, 'job_explanation': 'Sample job explanation', 'job_slice_count': 1, 'job_slice_number': 0, 'job_tags': '', 'job_type': 'run', 'launch_type': 'workflow', 'limit': 'bar_limit', 'modified': datetime.datetime(2018, 12, 13, 6, 4, 0, 0, tzinfo=datetime.timezone.utc), 'name': 'Stub JobTemplate', 'playbook': 'ping.yml', 'scm_branch': '', 'scm_revision': '', 'skip_tags': '', 'start_at_task': '', 'started': '2019-07-29T17:38:14.137461Z', 'status': 'running', 'summary_fields': { 'created_by': {'first_name': '', 'id': 1, 'last_name': '', 'username': 'admin'}, 'instance_group': {'id': 1, 'name': 'tower'}, 'inventory': { 'description': 'Sample inventory description', 'has_active_failures': False, 'has_inventory_sources': False, 'hosts_with_active_failures': 0, 'id': 17, 'inventory_sources_with_failures': 0, 'kind': '', 'name': 'Stub Inventory', 'organization_id': 121, 'total_groups': 0, 'total_hosts': 1, 'total_inventory_sources': 0, }, 'job_template': {'description': 'Sample job template description', 'id': 39, 'name': 'Stub JobTemplate'}, 'labels': {'count': 0, 'results': []}, 'project': {'description': 'Sample project description', 'id': 38, 'name': 'Stub project', 'scm_type': 'git', 'status': 'successful'}, 'schedule': { 'description': 'Sample schedule', 'id': 42, 'name': 'Stub schedule', 'next_run': datetime.datetime(2038, 1, 1, 0, 0, 0, 0, tzinfo=datetime.timezone.utc), }, 'unified_job_template': { 'description': 'Sample unified job template description', 'id': 39, 'name': 'Stub Job Template', 'unified_job_type': 'job', }, }, 'timeout': 0, 'type': 'job', 'url': '/api/v2/jobs/13/', 'use_fact_cache': False, 'verbosity': 0, }, 'job_friendly_name': 'Job', 'url': 'https://towerhost/#/jobs/playbook/1010', 'approval_status': 'approved', 'approval_node_name': 'Approve Me', 'workflow_url': 'https://towerhost/#/jobs/workflow/1010', 'job_metadata': , } return context
https://github.com/ansible/awx.git
1
894
389c4a318035cdb02a972ba8200391765f522169
244
notifications.py
19
96
awx
context_stub
1,599
awx/main/models/notifications.py
146
Adding fields to job_metadata for workflows and approval nodes (#12255)
81,344
Python
7
0
480
def test_build_in_tf_function(self): m = metrics.MeanTensor(dtype=tf.float64)
https://github.com/keras-team/keras.git
1
32
84afc5193d38057e2e2badf9c889ea87d80d8fbf
5
base_metric_test.py
10
11
keras
test_build_in_tf_function
19
keras/metrics/base_metric_test.py
5
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
274,647
Python
8
0
117
def func_dump(func): if os.name == "nt": raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/") code = codecs.encode(raw_code, "base64").decode("ascii") else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, "base64").decode("ascii") defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure
https://github.com/keras-team/keras.git
4
185
84afc5193d38057e2e2badf9c889ea87d80d8fbf
42
generic_utils.py
14
13
keras
func_dump
105
keras/utils/generic_utils.py
28
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
276,840
Python
20
0
109
def get_evaluation_sets(self) -> List[dict]: return self.evaluation_set_client.get_evaluation_sets()
https://github.com/deepset-ai/haystack.git
1
33
a273c3a51dd432bd125e5b35df4be94260a2cdb7
6
deepsetcloud.py
8
8
haystack
get_evaluation_sets
20
haystack/document_stores/deepsetcloud.py
6
EvaluationSetClient for deepset cloud to fetch evaluation sets and la… (#2345) * EvaluationSetClient for deepset cloud to fetch evaluation sets and labels for one specific evaluation set * make DeepsetCloudDocumentStore able to fetch uploaded evaluation set names * fix missing renaming of get_evaluation_set_names in DeepsetCloudDocumentStore * update documentation for evaluation set functionality in deepset cloud document store * DeepsetCloudDocumentStore tests for evaluation set functionality * rename index to evaluation_set_name for DeepsetCloudDocumentStore evaluation set functionality * raise DeepsetCloudError when no labels were found for evaluation set * make use of .get_with_auto_paging in EvaluationSetClient * Return result of get_with_auto_paging() as it parses the response already * Make schema import source more specific * fetch all evaluation sets for a workspace in deepset Cloud * Rename evaluation_set_name to label_index * make use of generator functionality for fetching labels * Update Documentation & Code Style * Adjust function input for DeepsetCloudDocumentStore.get_all_labels, adjust tests for it, fix typos, make linter happy * Match error message with pytest.raises * Update Documentation & Code Style * DeepsetCloudDocumentStore.get_labels_count raises DeepsetCloudError when no evaluation set was found to count labels on * remove unneeded import in tests * DeepsetCloudDocumentStore tests, make reponse bodies a string through json.dumps * DeepsetcloudDocumentStore.get_label_count - move raise to return * stringify uuid before json.dump as uuid is not serilizable * DeepsetcloudDocumentStore - adjust response mocking in tests * DeepsetcloudDocumentStore - json dump response body in test * DeepsetCloudDocumentStore introduce label_index, EvaluationSetClient rename label_index to evaluation_set * Update Documentation & Code Style * DeepsetCloudDocumentStore rename evaluation_set to evaluation_set_response as there is a name clash with the input variable * DeepsetCloudDocumentStore - rename missed variable in test * DeepsetCloudDocumentStore - rename missed label_index to index in doc string, rename label_index to evaluation_set in EvaluationSetClient * Update Documentation & Code Style * DeepsetCloudDocumentStore - update docstrings for EvaluationSetClient * DeepsetCloudDocumentStore - fix typo in doc string Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
257,048
Python
5
0
19
def test_simple(self): code_owner_1 = self.create_codeowners( self.project_1, self.code_mapping_1, raw=self.data_1["raw"] ) code_owner_2 = self.create_codeowners( self.project_2, self.code_mapping_2, raw=self.data_2["raw"] ) response = self.get_success_response(self.organization.slug, status=status.HTTP_200_OK) for code_owner in [code_owner_1, code_owner_2]: assert code_owner.project.slug in response.data.keys() associations, errors = ProjectCodeOwners.validate_codeowners_associations( code_owner.raw, code_owner.project ) assert "associations" in response.data[code_owner.project.slug].keys() assert response.data[code_owner.project.slug]["associations"] == associations assert "errors" in response.data[code_owner.project.slug].keys() assert response.data[code_owner.project.slug]["errors"] == errors
https://github.com/getsentry/sentry.git
2
274
5efa5eeb57ae6ddf740256e08ce3b9ff4ec98eaa
52
test_organization_codeowners_associations.py
13
17
sentry
test_simple
215
tests/sentry/api/endpoints/test_organization_codeowners_associations.py
36
feat(codeowners): Add endpoint to view code owner associations per organization (#31030) See API-2186 So the earlier version of this PR just had the endpoint return the entire serialized ProjectCodeOwners for an organization. While that works, the intention behind this feature is to read and use the associations, so sending the raw codeowners file, and timestamps are unnecessary and increase the latency with such large payloads, especially for larger orgs. @NisanthanNanthakumar suggested limiting what the endpoint returns to just what the feature will need on the frontend, and making the endpoint name a bit more specific. OrganizationCodeOwners -> OrganizationCodeOwnersAssocations. Along with this refactor, tests have been updated.
95,412
Python
26
0
175
def set(self, **kwargs) -> "Mobject": for attr, value in kwargs.items(): setattr(self, attr, value) return self
https://github.com/ManimCommunity/manim.git
2
53
6d15ca5e745ecdd5d0673adbd55fc7a589abdae3
15
mobject.py
9
54
manim
set
47
manim/mobject/mobject.py
14
Clarify the docs for MObject.animate, MObject.set and Variable. (#2407) * Clarify the docs for MObject.animate, MObject.set and Variable. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Slight reword * Apply suggestions from code review Co-authored-by: Benjamin Hackl <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
189,402
Python
7
0
32
def test_driver_3(): args_list = [ 'tests/tests.csv', '-is', ',', '-target', 'class', '-g', '1', '-p', '2', '-cv', '3', '-s',' 45', '-config', 'TPOT light', '-v', '2' ] args = _get_arg_parser().parse_args(args_list) with captured_output() as (out, err): tpot_driver(args) ret_stdout = out.getvalue() assert "TPOT settings" in ret_stdout assert "Final Pareto front testing scores" not in ret_stdout try: ret_val = float(ret_stdout.split('\n')[-2].split(': ')[-1]) except Exception: ret_val = -float('inf') assert ret_val > 0.0
https://github.com/EpistasisLab/tpot.git
2
231
388616b6247ca4ea8de4e2f340d6206aee523541
64
driver_tests.py
17
23
tpot
test_driver_3
265
tests/driver_tests.py
53
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
181,598
Python
15
0
125
def _find_all_or_none(qt_library_info, mandatory_dll_patterns, optional_dll_patterns=None): optional_dll_patterns = optional_dll_patterns or [] # Resolve path to the the corresponding python package (actually, its parent directory). Used to preserve directory # structure when DLLs are collected from the python package (e.g., PyPI wheels). package_parent_path = pathlib.Path(qt_library_info.package_location).resolve().parent # In PyQt5/PyQt6, the DLLs we are looking for are located in location['BinariesPath'], whereas in PySide2/PySide6, # they are located in location['PrefixPath']. dll_path = qt_library_info.location['BinariesPath' if qt_library_info.is_pyqt else 'PrefixPath'] dll_path = pathlib.Path(dll_path).resolve() # Helper for processing single DLL pattern
https://github.com/pyinstaller/pyinstaller.git
6
105
49abfa5498b1db83b8f1b2e859e461b1e8540c6f
81
qt.py
12
15
pyinstaller
_find_all_or_none
111
PyInstaller/utils/hooks/qt.py
58
hookutils: qt: ensure ANGLE DLLs are collected from Anaconda Qt5 Anaconda's Qt5 ships ANGLE DLLs (`libEGL.dll` and `libGLESv2.dll`) but does not seem to provide the `d3dcompiler_XY.dll`. Therefore, we need to adjust the extra Qt DLL collection to consider the latter an optional dependency whose absence does not preclude the collection of the ANGLE DLL group. Rework the `get_qt_binaries` hook utility function and its `_find_all_or_none` helper to peform collection based on a list of mandatory and a list of optional patterns, instead of a single list and number of expected matches (since up until now, all matches were always expected to be found).
263,901
Python
13
0
100
def test_asarray_with_order(is_array_api): if is_array_api: xp = pytest.importorskip("numpy.array_api") else: xp = numpy X = xp.asarray([1.2, 3.4, 5.1]) X_new = _asarray_with_order(X, order="F") X_new_np = numpy.asarray(X_new) assert X_new_np.flags["F_CONTIGUOUS"]
https://github.com/scikit-learn/scikit-learn.git
2
104
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
25
test_array_api.py
11
9
scikit-learn
test_asarray_with_order
60
sklearn/utils/tests/test_array_api.py
20
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
261,040
Python
13
0
67
def set_params(self, **params): self.check_params(params) self.sk_params.update(params) return self
https://github.com/keras-team/keras.git
1
43
84afc5193d38057e2e2badf9c889ea87d80d8fbf
7
scikit_learn.py
8
4
keras
set_params
35
keras/wrappers/scikit_learn.py
7
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
277,191
Python
6
0
25
def _validate_argument_count(self) -> None: if isinstance(self.operator_class, str): return # No need to validate deserialized operator. operator = self._create_unmapped_operator( mapped_kwargs={k: unittest.mock.MagicMock(name=k) for k in self.mapped_kwargs}, partial_kwargs=self.partial_kwargs, real=False, ) if operator.task_group: operator.task_group._remove(operator) dag = operator.get_dag() if dag: dag._remove_task(operator.task_id)
https://github.com/apache/airflow.git
5
143
0cd3b11f3a5c406fbbd4433d8e44d326086db634
36
mappedoperator.py
14
20
airflow
_validate_argument_count
152
airflow/models/mappedoperator.py
33
Straighten up MappedOperator hierarchy and typing (#21505)
44,722
Python
21
0
90
def turn_on(self, transition_time, pipeline, **kwargs): # The night effect does not need a turned on light if kwargs.get(ATTR_EFFECT) == EFFECT_NIGHT: if EFFECT_NIGHT in self._effect_list: pipeline.night_light() self._effect = EFFECT_NIGHT return pipeline.on() # Set up transition. args = {} if self.config[CONF_FADE] and not self.is_on and self._brightness: args["brightness"] = self.limitlessled_brightness() if ATTR_BRIGHTNESS in kwargs: self._brightness = kwargs[ATTR_BRIGHTNESS] args["brightness"] = self.limitlessled_brightness() if ATTR_HS_COLOR in kwargs and self._supported & SUPPORT_COLOR: self._color = kwargs[ATTR_HS_COLOR] # White is a special case. if self._color[1] < MIN_SATURATION: pipeline.white() self._color = WHITE else: args["color"] = self.limitlessled_color() if ATTR_COLOR_TEMP in kwargs: if self._supported & SUPPORT_COLOR: pipeline.white() self._color = WHITE if self._supported & SUPPORT_COLOR_TEMP: self._temperature = kwargs[ATTR_COLOR_TEMP] args["temperature"] = self.limitlessled_temperature() if args: pipeline.transition(transition_time, **args) # Flash. if ATTR_FLASH in kwargs and self._supported & LightEntityFeature.FLASH: duration = 0 if kwargs[ATTR_FLASH] == FLASH_LONG: duration = 1 pipeline.flash(duration=duration) # Add effects. if ATTR_EFFECT in kwargs and self._effect_list: if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP: self._effect = EFFECT_COLORLOOP pipeline.append(COLORLOOP) if kwargs[ATTR_EFFECT] == EFFECT_WHITE: pipeline.white() self._color = WHITE
https://github.com/home-assistant/core.git
21
477
6635fc4e3111f72bfa6095c97b3f522429fa1a8b
158
light.py
13
41
core
turn_on
656
homeassistant/components/limitlessled/light.py
88
Use LightEntityFeature enum in limitlessled (#71061)
299,547
Python
42
0
291
async def test_duplicate_removal(hass, mqtt_mock_entry_no_yaml_config, caplog): await mqtt_mock_entry_no_yaml_config() async_fire_mqtt_message( hass, "homeassistant/binary_sensor/bla/config", '{ "name": "Beer", "state_topic": "test-topic" }', ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "") await hass.async_block_till_done() assert "Component has already been discovered: binary_sensor bla" in caplog.text caplog.clear() async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "") await hass.async_block_till_done() assert "Component has already been discovered: binary_sensor bla" not in caplog.text
https://github.com/home-assistant/core.git
1
137
52561ce0769ddcf1e8688c8909692b66495e524b
51
test_discovery.py
8
15
core
test_duplicate_removal
108
tests/components/mqtt/test_discovery.py
32
Update MQTT tests to use the config entry setup (#72373) * New testframework and tests for fan platform * Merge test_common_new to test_common * Add alarm_control_panel * Add binary_sensor * Add button * Add camera * Add climate * Add config_flow * Add cover * Add device_tracker_disovery * Add device_trigger * Add diagnostics * Add discovery * Add humidifier * Add init * Add lecacy_vacuum * Add light_json * Add light_template * Add light * Add lock * Add number * Add scene * Add select * Add sensor * Add siren * Add state_vacuum * Add subscription * Add switch * Add tag * Add trigger * Add missed tests * Add another missed test * Add device_tracker * Remove commented out code * Correct tests according comments * Improve mqtt_mock_entry and recover tests * Split fixtures with and without yaml setup * Update fixtures manual_mqtt * Update fixtures mqtt_json * Fix test tasmota * Update fixture mqtt_room * Revert fixture changes, improve test * re-add test
302,108
Python
8
0
75
def _extract_archive(file_path, path=".", archive_format="auto"): if archive_format is None: return False if archive_format == "auto": archive_format = ["tar", "zip"] if isinstance(archive_format, str): archive_format = [archive_format] file_path = io_utils.path_to_string(file_path) path = io_utils.path_to_string(path) for archive_type in archive_format: if archive_type == "tar": open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type == "zip": open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False @keras_export("keras.utils.get_file")
https://github.com/keras-team/keras.git
11
297
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.utils.get_file")
79
data_utils.py
21
29
keras
_extract_archive
397
keras/utils/data_utils.py
53
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
276,769
Python
29
1
169
def test_run_image_classification_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f.split() if is_cuda_and_apex_available(): testargs.append("--fp16") _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE) result = get_results(tmp_dir) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"], 0.625) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "step_1"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "image_classification_no_trainer")))
https://github.com/huggingface/transformers.git
2
195
acb709d55150501698b5b500ca49683b913d4b3d
33
test_accelerate_examples.py
12
23
transformers
test_run_image_classification_no_trainer
106
examples/pytorch/test_accelerate_examples.py
29
Change no trainer image_classification test (#17635) * Adjust test arguments and use a new example test
31,499
Python
23
0
112
def nest_paths(paths): nested = [] for path in paths: parts = PurePath(path).parent.parts branch = nested for part in parts: part = dirname_to_title(part) branch = find_or_create_node(branch, part) branch.append(path) return nested
https://github.com/mkdocs/mkdocs.git
3
90
1c50987f9c17b228fdf22456aa369b83bd6b11b9
29
__init__.py
12
10
mkdocs
nest_paths
91
mkdocs/utils/__init__.py
19
Refactor URI handling to not have to deal with backslashes
224,518
Python
12
0
55
def run_before_hook(self): return None
https://github.com/wagtail/wagtail.git
1
16
bc1a2ab1148b0f27cfd1435f8cb0e44c2721102d
4
mixins.py
6
2
wagtail
run_before_hook
18
wagtail/admin/views/generic/mixins.py
4
Extract mixins from Snippet views and use it in generic create/edit/delete views (#8361)
77,226
Python
2
0
8
def test_driver(): batcmd = "python -m tpot.driver tests/tests.csv -is , -target class -g 1 -p 2 -os 4 -cv 5 -s 45 -v 1" ret_stdout = subprocess.check_output(batcmd, shell=True) try: ret_val = float(ret_stdout.decode('UTF-8').split('\n')[-2].split(': ')[-1]) except Exception as e: ret_val = -float('inf') assert ret_val > 0.0
https://github.com/EpistasisLab/tpot.git
2
123
388616b6247ca4ea8de4e2f340d6206aee523541
44
driver_tests.py
19
8
tpot
test_driver
76
tests/driver_tests.py
39
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
181,586
Python
12
0
69
def close(self) -> None: self._save() self._handles.close() XLS_SIGNATURES = ( b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2 b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3 b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4 b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary ) ZIP_SIGNATURE = b"PK\x03\x04" PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,))) @doc(storage_options=_shared_docs["storage_options"])
https://github.com/pandas-dev/pandas.git
1
147
047137ce2619cfe2027e3999dfb92eb614d9a485
@doc(storage_options=_shared_docs["storage_options"])
34
_base.py
10
4
pandas
close
66
pandas/io/excel/_base.py
28
DEP: Protect some ExcelWriter attributes (#45795) * DEP: Deprecate ExcelWriter attributes * DEP: Deprecate ExcelWriter attributes * Fixup for test * Move tests and restore check_extension y * Deprecate xlwt fm_date and fm_datetime; doc improvements
164,682
Python
13
1
20
async def test_max_concurrent_in_progress_functions(extra_req_num): max_req = 10 a = A(max_num_call=max_req) # Run more than allowed concurrent async functions should trigger rate limiting res_arr = await asyncio.gather( *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)] ) fail_cnt = 0 for ok in res_arr: fail_cnt += 0 if ok else 1 expected_fail_cnt = max(0, extra_req_num) assert fail_cnt == expected_fail_cnt, ( f"{expected_fail_cnt} out of {max_req + extra_req_num} " f"concurrent runs should fail with max={max_req} but {fail_cnt}." ) assert a.num_call_ == 0, "All requests should be done" @pytest.mark.asyncio @pytest.mark.parametrize( "failures", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], )
https://github.com/ray-project/ray.git
5
270
365ffe21e592589880e3116302705b5e08a5b81f
@pytest.mark.asyncio @pytest.mark.parametrize( "failures", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], )
120
test_state_head.py
15
15
ray
test_max_concurrent_in_progress_functions
225
dashboard/tests/test_state_head.py
78
[Core | State Observability] Implement API Server (Dashboard) HTTP Requests Throttling (#26257) This is to limit the max number of HTTP requests the dashboard (API server) will accept before rejecting more requests. This will make sure the observability requests do not overload the downstream systems (raylet/gcs) when delegating too many concurrent state observability requests to the cluster.
124,708
Python
21
1
96
def astar_path(G, source, target, heuristic=None, weight="weight"): if source not in G or target not in G: msg = f"Either source {source} or target {target} is not in G" raise nx.NodeNotFound(msg) if heuristic is None: # The default heuristic is h=0 - same as Dijkstra's algorithm
https://github.com/networkx/networkx.git
13
80
b28d30bd552a784d60692fd2d2016f8bcd1cfa17
45
astar.py
10
41
networkx
astar_path
75
networkx/algorithms/shortest_paths/astar.py
34
Updated astar docstring (#5797) The docstring now reflects on heuristic admissibility and heuristic value caching
176,904
Python
9
0
273
def test_note_generic_issue(self, mock_func, occurrence): event = self.store_event( data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id ) event = event.for_group(event.groups[0]) notification = NoteActivityNotification( Activity( project=self.project, group=event.group, user=self.user, type=ActivityType.NOTE, data={"text": "text", "mentions": []}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert text == f"New comment by {self.name}" assert attachment["title"] == TEST_ISSUE_OCCURRENCE.issue_title assert attachment["text"] == notification.activity.data["text"] assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=note_activity-slack-user|Notification Settings>" )
https://github.com/getsentry/sentry.git
1
269
3255fa4ebb9fbc1df6bb063c0eb77a0298ca8f72
62
test_note.py
14
24
sentry
test_note_generic_issue
294
tests/sentry/integrations/slack/notifications/test_note.py
48
feat(integrations): Support generic issue type alerts (#42110) Add support for issue alerting integrations that use the message builder (Slack and MSTeams) for generic issue types. Preview text for Slack alert: <img width="350" alt="Screen Shot 2022-12-08 at 4 07 16 PM" src="https://user-images.githubusercontent.com/29959063/206593405-7a206d88-a31a-4e85-8c15-1f7534733ca7.png"> Slack generic issue alert shows the `occurrence.issue_title` and the "important" evidence value <img width="395" alt="Screen Shot 2022-12-08 at 4 11 20 PM" src="https://user-images.githubusercontent.com/29959063/206593408-6942d74d-4238-4df9-bfee-601ce2bc1098.png"> MSTeams generic issue alert shows the `occurrence.issue_title` and the "important" evidence value <img width="654" alt="Screen Shot 2022-12-08 at 4 13 45 PM" src="https://user-images.githubusercontent.com/29959063/206593410-2773746a-16b3-4652-ba2c-a7d5fdc76992.png"> Fixes #42047
89,927
Python
30
0
151
async def async_add_devices(address, multiple):
https://github.com/home-assistant/core.git
2
16
a9ca774e7ed1d8fe502a53d5b765c1d9b393a524
4
device.py
6
3
core
async_add_devices
7
homeassistant/components/insteon/api/device.py
4
Insteon Device Control Panel (#70834) Co-authored-by: Paulus Schoutsen <[email protected]>
299,399
Python
3
0
26
def is_subclassed(layer): return ( layer.__module__.find("keras.engine") == -1 and layer.__module__.find("keras.layers") == -1 )
https://github.com/keras-team/keras.git
2
58
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
base_layer_utils.py
11
5
keras
is_subclassed
35
keras/engine/base_layer_utils.py
10
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
270,861
Python
4
0
32
def _async_device_changed(self, *args, **kwargs) -> None: # Don't update disabled entities if self.enabled: _LOGGER.debug("Event %s (%s)", self.name, CONST_ALARM_CONTROL_PANEL_NAME) self.async_write_ha_state() else: _LOGGER.debug( ( "Device Changed Event for %s (Alarm Control Panel) not fired." " Entity is disabled" ), self.name, )
https://github.com/home-assistant/core.git
2
90
cb13418babd21a1e9584978b0c523f1b1e4e1cb0
39
alarm_control_panel.py
13
13
core
_async_device_changed
194
homeassistant/components/homematicip_cloud/alarm_control_panel.py
37
String formatting and max line length - Part 2 (#84393)
297,866
Python
10
0
52
def test_stylesheet_many_classes_dont_overrule_id(): css = "#id {color: red;} .a.b.c.d {color: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b c d", id="id") stylesheet.apply(node) assert node.styles.color == Color(255, 0, 0)
https://github.com/Textualize/textual.git
1
82
4dd0d9fae43583638f34257f97d5749ca4f2c00c
27
test_stylesheet.py
10
6
textual
test_stylesheet_many_classes_dont_overrule_id
45
tests/css/test_stylesheet.py
24
Add various additional tests around CSS specificity
183,841
Python
12
0
47
async def test_check_requesterror(hass, aioclient_mock):
https://github.com/home-assistant/core.git
1
16
b41d0be9522fabda0ac8affd2add6876a66205ea
4
test_config_flow.py
6
18
core
test_check_requesterror
7
tests/components/homewizard/test_config_flow.py
4
Improve HomeWizard request issue reporting (#82366) * Trigger reauth flow when HomeWizard API was disabled * Add tests for reauth flow * Fix typo in test * Add parallel updates constant * Improve error message when device in unreachable during config * Set quality scale * Remove quality scale * Throw error instead of abort when setup fails * Adjust test for new setup behaviour * Trigger reauth flow when API is disabled and continue retrying * Reload entry and raise AuthFailed during init * Abort running config flow * Listen for coordinator updates to trigger reload * Use build-in backoff system * Fix failing test * Test reauth flow is active after disable-api init * Test reauth flow removal
297,532
Python
3
0
112
def to_grams(weight, unit): try: if weight < 0: raise ValueError("Weight must be a positive number") except TypeError: raise TypeError(f"Invalid value '{weight}' for weight (must be a number)") valid_units = WeightUnitChoices.values() if unit not in valid_units: raise ValueError(f"Unknown unit {unit}. Must be one of the following: {', '.join(valid_units)}") if unit == WeightUnitChoices.UNIT_KILOGRAM: return weight * 1000 if unit == WeightUnitChoices.UNIT_GRAM: return weight if unit == WeightUnitChoices.UNIT_POUND: return weight * Decimal(453.592) if unit == WeightUnitChoices.UNIT_OUNCE: return weight * Decimal(28.3495) raise ValueError(f"Unknown unit {unit}. Must be 'kg', 'g', 'lb', 'oz'.")
https://github.com/netbox-community/netbox.git
8
194
204c10c053fddc26ad23ec15a3c60eee38bfc081
87
utils.py
14
18
netbox
to_grams
177
netbox/utilities/utils.py
53
9654 device weight (#10448) * 9654 add weight fields to devices * 9654 changes from code review * 9654 change _abs_weight to grams * Resolve migrations conflict * 9654 code-review changes * 9654 total weight on devices * Misc cleanup Co-authored-by: Jeremy Stretch <[email protected]>
265,772
Python
14
0
106
def _setitem(self, axis, key, value, how="inner"):
https://github.com/modin-project/modin.git
4
26
eddfda4b521366c628596dcb5c21775c7f50eec1
6
query_compiler.py
6
27
modin
_setitem
13
modin/core/storage_formats/pandas/query_compiler.py
6
PERF-#4325: Improve perf of multi-column assignment in `__setitem__` when no new column names are assigning (#4455) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Myachev <[email protected]>
153,944
Python
6
0
168
def test_update_organization_config(self): with self.tasks(): self.assert_setup_flow() org = self.organization project_id = self.project.id enabled_dsn = ProjectKey.get_default(project=Project.objects.get(id=project_id)).get_dsn( public=True ) sentry_auth_token = SentryAppInstallationToken.objects.get_token(org.id, "vercel") env_var_map = { "SENTRY_ORG": {"type": "encrypted", "value": org.slug}, "SENTRY_PROJECT": {"type": "encrypted", "value": self.project.slug}, "SENTRY_DSN": {"type": "encrypted", "value": enabled_dsn}, "SENTRY_AUTH_TOKEN": {"type": "encrypted", "value": sentry_auth_token}, "VERCEL_GIT_COMMIT_SHA": {"type": "system", "value": "VERCEL_GIT_COMMIT_SHA"}, } # mock get_project API call responses.add( responses.GET, f"{VercelClient.base_url}{VercelClient.GET_PROJECT_URL % self.project_id}", json={"link": {"type": "github"}, "framework": "nextjs"}, ) # mock create the env vars for env_var, details in env_var_map.items(): responses.add( responses.POST, f"{VercelClient.base_url}{VercelClient.CREATE_ENV_VAR_URL % self.project_id}", json={ "key": env_var, "value": details["value"], "target": ["production"], "type": details["type"], }, ) integration = Integration.objects.get(provider=self.provider.key) installation = integration.get_installation(org.id) org_integration = OrganizationIntegration.objects.get( organization_id=org.id, integration_id=integration.id ) assert org_integration.config == {} data = {"project_mappings": [[project_id, self.project_id]]} installation.update_organization_config(data) org_integration = OrganizationIntegration.objects.get( organization_id=org.id, integration_id=integration.id ) assert org_integration.config == {"project_mappings": [[project_id, self.project_id]]} # assert the env vars were created correctly req_params = json.loads(responses.calls[5].request.body) assert req_params["key"] == "SENTRY_ORG" assert req_params["value"] == org.slug assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[6].request.body) assert req_params["key"] == "SENTRY_PROJECT" assert req_params["value"] == self.project.slug assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[7].request.body) assert req_params["key"] == "NEXT_PUBLIC_SENTRY_DSN" assert req_params["value"] == enabled_dsn assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[8].request.body) assert req_params["key"] == "SENTRY_AUTH_TOKEN" assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[9].request.body) assert req_params["key"] == "VERCEL_GIT_COMMIT_SHA" assert req_params["value"] == "VERCEL_GIT_COMMIT_SHA" assert req_params["target"] == ["production"] assert req_params["type"] == "system"
https://github.com/getsentry/sentry.git
2
1,025
8201e74ec3d81e89354905c946e62436f0247602
225
test_integration.py
14
68
sentry
test_update_organization_config
858
tests/sentry/integrations/vercel/test_integration.py
107
ref(integrations): Update Vercel endpoints (#36150) This PR updates the endpoints we reach to in the Vercel integration. It seems to work just fine without changes as the payloads returned from vercel haven't updated, but we'll need to specify API Scopes so they don't receive 403s. This also refactored the pagination code to loop 100 at a time, indefinitely I had previously tried to consolidate the project webhooks in this PR, but I'll be doing that separately.
92,181
Python
52
0
566
async def async_step_manual_connection(self, user_input=None): errors = {} if user_input is not None: # We might be able to discover the device via directed UDP # in case its on another subnet if device := await async_discover_device( self.hass, user_input[CONF_ADDRESS] ): await self.async_set_unique_id( dr.format_mac(device.mac_address), raise_on_progress=False ) self._abort_if_unique_id_configured() # Ignore the port from discovery since its always going to be # 2601 if secure is turned on even though they may want insecure user_input[CONF_ADDRESS] = device.ip_address errors, result = await self._async_create_or_error(user_input, False) if not errors: return result return self.async_show_form( step_id="manual_connection", data_schema=vol.Schema( { **BASE_SCHEMA, vol.Required(CONF_ADDRESS): str, vol.Optional(CONF_PREFIX, default=""): str, vol.Required( CONF_PROTOCOL, default=DEFAULT_SECURE_PROTOCOL ): vol.In(ALL_PROTOCOLS), } ), errors=errors, )
https://github.com/home-assistant/core.git
4
242
26c5dca45d9b3dee002dfe1549780747e5007e06
104
config_flow.py
16
28
core
async_step_manual_connection
536
homeassistant/components/elkm1/config_flow.py
80
Ensure elkm1 can be manually configured when discovered instance is not used (#67712)
293,197
Python
32
0
153
def on_page_read_source(self, **kwargs): return f'{self.config["foo"]} source'
https://github.com/mkdocs/mkdocs.git
1
35
e7f07cc82ab2be920ab426ba07456d8b2592714d
6
plugin_tests.py
9
2
mkdocs
on_page_read_source
20
mkdocs/tests/plugin_tests.py
6
Remove spaces at the ends of docstrings, normalize quotes
224,049
Python
4
0
12
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"): r n_samples = X.shape[0] if n_neighbors >= n_samples / 2: raise ValueError( f"n_neighbors ({n_neighbors}) should be less than n_samples / 2" f" ({n_samples / 2})" ) dist_X = pairwise_distances(X, metric=metric) if metric == "precomputed": dist_X = dist_X.copy() # we set the diagonal to np.inf to exclude the points themselves from # their own neighborhood np.fill_diagonal(dist_X, np.inf) ind_X = np.argsort(dist_X, axis=1) # `ind_X[i]` is the index of sorted distances between i and other samples ind_X_embedded = ( NearestNeighbors(n_neighbors=n_neighbors) .fit(X_embedded) .kneighbors(return_distance=False) ) # We build an inverted index of neighbors in the input space: For sample i, # we define `inverted_index[i]` as the inverted index of sorted distances: # inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1) inverted_index = np.zeros((n_samples, n_samples), dtype=int) ordered_indices = np.arange(n_samples + 1) inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:] ranks = ( inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors ) t = np.sum(ranks[ranks > 0]) t = 1.0 - t * ( 2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0)) ) return t
https://github.com/scikit-learn/scikit-learn.git
3
352
ade90145c9c660a1a7baf2315185995899b0f356
173
_t_sne.py
16
84
scikit-learn
trustworthiness
322
sklearn/manifold/_t_sne.py
115
FIX Raise error when n_neighbors >= n_samples / 2 in manifold.trustworthiness (#23033) Co-authored-by: Shao Yang Hong <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
259,640
Python
32
0
228
def start(self): if self.actors and len(self.actors) > 0: raise RuntimeError( "The actors have already been started. " "Please call `shutdown` first if you want to " "restart them." ) logger.debug(f"Starting {self.num_actors} actors.") self.add_actors(self.num_actors) logger.debug(f"{len(self.actors)} actors have successfully started.")
https://github.com/ray-project/ray.git
3
108
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
38
actor_group.py
12
10
ray
start
140
python/ray/util/actor_group.py
34
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
132,895
Python
9
0
49
def _detect_checkpoint_function(train_func, abort=False, partial=False): func_sig = inspect.signature(train_func) validated = True try: # check if signature is func(config, checkpoint_dir=None) if partial: func_sig.bind_partial({}, checkpoint_dir="tmp/path") else: func_sig.bind({}, checkpoint_dir="tmp/path") except Exception as e: logger.debug(str(e)) validated = False if abort and not validated: func_args = inspect.getfullargspec(train_func).args raise ValueError( "Provided training function must have 2 args " "in the signature, and the latter arg must " "contain `checkpoint_dir`. For example: " "`func(config, checkpoint_dir=None)`. Got {}".format(func_args) ) return validated
https://github.com/ray-project/ray.git
5
179
eb69c1ca286a2eec594f02ddaf546657a8127afd
72
util.py
14
20
ray
_detect_checkpoint_function
215
python/ray/tune/utils/util.py
59
[air] Add annotation for Tune module. (#27060) Co-authored-by: Kai Fricke <[email protected]>
126,264
Python
21
0
102
def should_log(self): if self.log_on_each_node: return self.local_process_index == 0 else: return self.process_index == 0
https://github.com/PaddlePaddle/PaddleNLP.git
2
43
44a290e94d1becd1f09fddc3d873f9e19c9d6919
13
trainer_args.py
10
5
PaddleNLP
should_log
56
paddlenlp/trainer/trainer_args.py
10
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
323,123
Python
5
0
25
async def test_all_optional_config(hass): with assert_setup_component(1, "template"): assert await setup.async_setup_component( hass, "template", { "template": { "number": { "state": "{{ 4 }}", "set_value": {"service": "script.set_value"}, "min": "{{ 3 }}", "max": "{{ 5 }}", "step": "{{ 1 }}", } } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() _verify(hass, 4, 1, 3, 5)
https://github.com/home-assistant/core.git
1
169
b70e97e949ca73fe57849625c0b0c51f0b8796f7
50
test_number.py
19
21
core
test_all_optional_config
309
tests/components/template/test_number.py
37
Remove unused calls fixture from template tests (#71735)
300,405
Python
8
0
90
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds) -> Axes: plot_backend = _get_plot_backend("matplotlib") return plot_backend.radviz( frame=frame, class_column=class_column, ax=ax, color=color, colormap=colormap, **kwds, )
https://github.com/pandas-dev/pandas.git
1
88
4bb1fd50a63badd38b5d96d9c4323dae7bc36d8d
21
_misc.py
9
79
pandas
radviz
75
pandas/plotting/_misc.py
21
TYP: Missing return annotations in util/tseries/plotting (#47510) * TYP: Missing return annotations in util/tseries/plotting * the more tricky parts
167,393
Python
10
0
60
def download_file(url, path): # type: (str, str) -> None with open(to_bytes(path), 'wb') as saved_file: download = urlopen(url) shutil.copyfileobj(download, saved_file)
https://github.com/ansible/ansible.git
1
63
68fb3bf90efa3a722ba5ab7d66b1b22adc73198c
19
requirements.py
12
4
ansible
download_file
40
test/lib/ansible_test/_util/target/setup/requirements.py
19
ansible-test - Fix consistency of managed venvs. (#77028)
266,663
Python
10
0
35
def extract_bucket_name(config): return config["artifact_destination"]["output_uri_prefix"].rpartition("gs://")[-1]
https://github.com/apache/airflow.git
1
44
ca4b8d1744cd1de9b6af97dacb0e03de0f014006
4
vertex_ai.py
11
2
airflow
extract_bucket_name
18
airflow/providers/google/cloud/links/vertex_ai.py
4
Create Endpoint and Model Service, Batch Prediction and Hyperparameter Tuning Jobs operators for Vertex AI service (#22088)
46,474
Python
3
0
23
def export_kubernetes(args): Flow.load_config(args.flowpath).to_kubernetes_yaml( output_base_path=args.outpath, k8s_namespace=args.k8s_namespace )
https://github.com/jina-ai/jina.git
1
48
16b16b07a66cd5a8fc7cca1d3f1c378a9c63d38c
6
exporter.py
10
4
jina
export_kubernetes
22
jina/exporter.py
6
refactor: rename cli to jina_cli (#4890) * chore: fix readme * chore: fix readme * chore: fix dockerignore * fix: #4845 * style: fix overload and cli autocomplete * fix: cicd export cli Co-authored-by: Jina Dev Bot <[email protected]>
12,472
Python
9
0
29
def expand_frame(self, frame, source_context=None, source=None): if frame.get("lineno") is None: return False if source_context is None: source = source or self.get_sourceview(frame["abs_path"]) if source is None: logger.debug("No source found for %s", frame["abs_path"]) return False (pre_context, context_line, post_context) = source_context or get_raw_source_context( source=source, lineno=frame["lineno"] ) if pre_context is not None and len(pre_context) > 0: frame["pre_context"] = [trim_line(x) for x in pre_context] if context_line is not None: frame["context_line"] = trim_line(context_line, frame.get("colno") or 0) if post_context is not None and len(post_context) > 0: frame["post_context"] = [trim_line(x) for x in post_context] return True
https://github.com/getsentry/sentry.git
14
272
ae9c0d8a33d509d9719a5a03e06c9797741877e9
87
processor.py
14
18
sentry
expand_frame
257
src/sentry/lang/javascript/processor.py
50
ref(processor): Use symbolic-sourcemapcache for JavaScript Sourcemap processing (#38551) This PR attempts to replace the currently used `rust-sourcemap` crate and it's symbolic python bindings, with `symbolic-sourcemapcache` crate. It makes the whole processing pipeline easier to maintain, as it pushes some work directly to Symbolic, as well as we get better function names due to better scope resolution and in some cases better file URLs. Other than that, we don't use `SourceView` anymore, as it seemed like an unnecessary layer of abstraction for something that is used only for `context_lines` extraction. We cache `utf-8` decoded sources directly now, as this way we can encode them only once for `SmCache` instance initialization, and use the source directly otherwise for context lines extraction. Some tests had to updated to express current behavior. The notable thing is `useless_fn_names = ["<anonymous>", "__webpack_require__", "__webpack_modules__"]`, which is mostly for `production` mode of webpack, that by default trims all the function names, and we decided to fallback to the minified names in those cases instead (this was already the old behavior). It should be possible to extract something better, but we'd need to parse all `sourceContents` from sourcemap to do that, as the only thing we can get better function name for the case mentioned above, is if we look at the right-hand side of default node export, in form of `module.exports = function foo () {}`. This should give us `foo`, yet the only thing we can extract is `module.exports`, as minified form of this expression in webpack production mode is `module.exports = function () {}`.
86,267
Python
17
0
169

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
8
Add dataset card