complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
4
__seqToRE
def __seqToRE(self, to_convert, directive): to_convert = sorted(to_convert, key=len, reverse=True) for value in to_convert: if value != '': break else: return '' regex = '|'.join(re_escape(stuff) for stuff in to_convert) regex = '(?P<%s>%s' % (directive, regex) return '%s)' % regex
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
_strptime.py
113
add python 3.10.4 for windows
55,908
0
124
67
29
219,940
38
XX-Net
13
python3.10.4/Lib/_strptime.py
Python
10
{ "docstring": "Convert a list to a regex string for matching a directive.\n\n Want possible matching values to be from longest to shortest. This\n prevents the possibility of a match occurring for a value that also\n a substring of a larger value that should have matched (e.g., 'abc'\n matching when 'abcdef' should have been the match).\n\n ", "language": "en", "n_whitespaces": 90, "n_words": 54, "vocab_size": 37 }
https://github.com/XX-net/XX-Net.git
1
node_id
def node_id(self): node_id = self.worker.current_node_id assert not node_id.is_nil() return node_id
8ffe435173aee0f313f786e7301d05f608b6d5be
8
runtime_context.py
39
[core/docs] Update worker docstring (#28495) Co-authored-by: Philipp Moritz <[email protected]>
28,506
0
38
22
9
127,694
10
ray
5
python/ray/runtime_context.py
Python
4
{ "docstring": "Get current node ID for this worker or driver.\n\n Node ID is the id of a node that your driver, task, or actor runs.\n\n Returns:\n A node id for this worker or driver.\n ", "language": "en", "n_whitespaces": 65, "n_words": 33, "vocab_size": 23 }
https://github.com/ray-project/ray.git
1
test_loading_configuration_from_packages
async def test_loading_configuration_from_packages(hass): await config_util.async_process_ha_core_config( hass, { "latitude": 39, "longitude": -1, "elevation": 500, "name": "Huis", CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_METRIC, "time_zone": "Europe/Madrid", "external_url": "https://www.example.com", "internal_url": "http://example.local", "packages": { "package_1": {"wake_on_lan": None}, "package_2": { "light": {"platform": "hue"}, "media_extractor": None, "sun": None, }, }, }, ) # Empty packages not allowed with pytest.raises(MultipleInvalid): await config_util.async_process_ha_core_config( hass, { "latitude": 39, "longitude": -1, "elevation": 500, "name": "Huis", CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_METRIC, "time_zone": "Europe/Madrid", "packages": {"empty_package": None}, }, ) @patch("homeassistant.helpers.check_config.async_check_ha_config_file")
8dc3ff72c6ff6f9591151759465ac5d8a20e2b2f
@patch("homeassistant.helpers.check_config.async_check_ha_config_file")
17
test_config.py
264
Cleanup config deprecation warning (#80251)
88,164
1
469
136
45
289,015
70
core
10
tests/test_config.py
Python
35
{ "docstring": "Test loading packages config onto hass object config.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
test_torchscript_bart_agent
def test_torchscript_bart_agent(self): from parlai.scripts.torchscript import TorchScript test_phrase = "Don't have a cow, man!" # From test_bart.py with testing_utils.tempdir() as tmpdir: scripted_model_file = os.path.join(tmpdir, 'scripted_model.pt') # Export the BART model export_opt = TorchScript.setup_args().parse_kwargs( model='bart', scripted_model_file=scripted_model_file, no_cuda=True ) TorchScript(export_opt).run() # Test the scripted BART model scripted_opt = ParlaiParser(True, True).parse_kwargs( model='parlai.torchscript.agents:TorchScriptAgent', model_file=scripted_model_file, ) bart = create_agent(scripted_opt) bart.observe({'text': test_phrase, 'episode_done': True}) act = bart.act() self.assertEqual(act['text'], test_phrase)
73a395fd457d5f56463f4f72e5b65919db3f1f96
12
test_torchscript.py
217
Add TorchScriptable transformer classifier and subword BPE tokenizer (#4566) * Add torchscript classes for Transformer classifier and subword BPE tokenizer * Add unit tests * Remove debugging code * Resolve comments * Rerun autoformat.sh * Update parlai/torchscript/tokenizer.py Co-authored-by: Eric Smith <[email protected]> Co-authored-by: Eric Smith <[email protected]>
47,209
0
268
126
51
195,124
62
ParlAI
28
tests/nightly/gpu/test_torchscript.py
Python
17
{ "docstring": "\n Test exporting a BART model to TorchScript and then testing it on sample data.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
https://github.com/facebookresearch/ParlAI.git
1
set_context
def set_context(context=None, font_scale=1, rc=None): context_object = plotting_context(context, font_scale, rc) mpl.rcParams.update(context_object)
34662f4be5c364e7518f9c1118c9b362038ee5dd
8
rcmod.py
53
Convert docs to pydata-sphinx-theme and add new material (#2842) * Do basic conversion of site to pydata_sphinx_theme * Remove some pae structure customizations we no longer need * Add some custom CSS * Tweak a few more colors * Remove vestigial div closing tag * Reorganize release notes into hierarchical pages * Rebuild full docs and fix some resulting issues * Make release note doc refs absolute * Convert homepage to use sphinx-design instead of hand-crafted html * Remove original custom css * Simplify header and put archive switcher in footer * Streamline API docs for objects * Play around with templates to fix shrinking content (not perfect yet) * Improve use of horizontal space without sidebars * Various tweaks * Convert tutorial homepage source to native sphinx-design directives * Move intro page into tutorial * More tweaks * Tweak theme colors and footer * Remove reference to navbar version * Note that error bar tutorial demonstrates new features as of v0.12 * Update layout customization for new theme features * Various layout and CSS tweaks * Narrow support guidance to StackOverflow * Run all notebooks * Adapt to new dropdown navbar in pydata theme * Separate tutorial source and outputs * Separate dostring source and outputs * Add scale API template * Update API docs * Fix requirements * Add new objects * Point doc requirements at v0.10 RC for theme
7,474
0
19
34
10
42,070
10
seaborn
9
seaborn/rcmod.py
Python
3
{ "docstring": "\n Set the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n matplotlib rcParams system.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n See :func:`plotting_context` to get the parameter values.\n\n Parameters\n ----------\n context : dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_context.rst\n\n ", "language": "en", "n_whitespaces": 254, "n_words": 157, "vocab_size": 101 }
https://github.com/mwaskom/seaborn.git
5
_process_sections
def _process_sections(cls, tree, sections, category, is_open): seen = set() for section in sections: if section[-1] == "global": # Global categories get escalated to parent continue sect = section[0] section_id = f"{category}|{sect}" if sect not in seen: seen.add(sect) text = sect.replace("_", " ").title() tree.insert(category, "end", section_id, text=text, open=is_open, tags="section") if len(section) == 2: opt = section[-1] opt_id = f"{section_id}|{opt}" opt_text = opt.replace("_", " ").title() tree.insert(section_id, "end", opt_id, text=opt_text, open=is_open, tags="option")
8ab085fae0193bb507fd5ad582668d19d56bea3d
15
popup_configure.py
256
bugfix: gui - settings popup. Always reload config
19,985
0
266
147
53
100,521
69
faceswap
22
lib/gui/popup_configure.py
Python
16
{ "docstring": " Process the sections of a category's configuration.\n\n Creates a category's sections, then the sub options for that category\n\n Parameters\n ----------\n tree: :class:`tkinter.ttk.TreeView`\n The tree view to insert sections into\n sections: list\n The sections to insert into the Tree View\n category: str\n The category node that these sections sit in\n is_open: bool\n ``True`` if the node should be created in \"open\" mode. ``False`` if it should be\n closed.\n ", "language": "en", "n_whitespaces": 179, "n_words": 67, "vocab_size": 47 }
https://github.com/deepfakes/faceswap.git
1
corners_to_center_format
def corners_to_center_format(x): x_transposed = x.T x0, y0, x1, y1 = x_transposed[0], x_transposed[1], x_transposed[2], x_transposed[3] b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return np.stack(b, axis=-1) # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes
1ac698744c4dbdf1495d303246d08ffacdf4f5b8
10
feature_extraction_yolos.py
119
Add YOLOS (#16848) * First draft * Add YolosForObjectDetection * Make forward pass work * Add mid position embeddings * Add interpolation of position encodings * Add expected values * Add YOLOS to tests * Add integration test * Support tiny model as well * Support all models in conversion script * Remove mid_pe_size attribute * Make more tests pass * Add model to README and fix config * Add copied from statements * Rename base_model_prefix to vit * Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP * Apply suggestions from code review * Apply more suggestions from code review * Convert remaining checkpoints * Improve docstrings * Add YolosFeatureExtractor * Add feature extractor to docs * Add corresponding tests * Fix style * Fix docs * Apply suggestion from code review * Fix bad rebase * Fix some more bad rebase * Fix missing character * Improve docs and variable names Co-authored-by: Niels Rogge <[email protected]>
6,837
0
53
81
33
37,626
39
transformers
12
src/transformers/models/yolos/feature_extraction_yolos.py
Python
5
{ "docstring": "\n Converts a NumPy array of bounding boxes of shape (number of bounding boxes, 4) of corners format (x_0, y_0, x_1,\n y_1) to center format (center_x, center_y, width, height).\n ", "language": "en", "n_whitespaces": 38, "n_words": 28, "vocab_size": 23 }
https://github.com/huggingface/transformers.git
8
get_attributes_and_values
def get_attributes_and_values(item_code): item_cache = ItemVariantsCacheManager(item_code) item_variants_data = item_cache.get_item_variants_data() attributes = get_item_attributes(item_code) attribute_list = [a.attribute for a in attributes] valid_options = {} for item_code, attribute, attribute_value in item_variants_data: if attribute in attribute_list: valid_options.setdefault(attribute, set()).add(attribute_value) item_attribute_values = frappe.db.get_all( "Item Attribute Value", ["parent", "attribute_value", "idx"], order_by="parent asc, idx asc" ) ordered_attribute_value_map = frappe._dict() for iv in item_attribute_values: ordered_attribute_value_map.setdefault(iv.parent, []).append(iv.attribute_value) # build attribute values in idx order for attr in attributes: valid_attribute_values = valid_options.get(attr.attribute, []) ordered_values = ordered_attribute_value_map.get(attr.attribute, []) attr["values"] = [v for v in ordered_values if v in valid_attribute_values] return attributes @frappe.whitelist(allow_guest=True)
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist(allow_guest=True)
15
utils.py
290
style: format code with black
14,028
1
67
170
62
65,836
89
erpnext
33
erpnext/e_commerce/variant_selector/utils.py
Python
20
{ "docstring": "Build a list of attributes and their possible values.\n\tThis will ignore the values upon selection of which there cannot exist one item.\n\t", "language": "en", "n_whitespaces": 21, "n_words": 23, "vocab_size": 22 }
https://github.com/frappe/erpnext.git
3
test_run
def test_run(self) -> Tuple[Dict, Dict]: print(" | > Synthesizing test sentences.") test_audios = {} test_figures = {} test_sentences = self.config.test_sentences for idx, s_info in enumerate(test_sentences): try: aux_inputs = self.get_aux_input_from_test_sentences(s_info) wav, alignment, _, _ = synthesis( self, aux_inputs["text"], self.config, "cuda" in str(next(self.parameters()).device), ap, speaker_id=aux_inputs["speaker_id"], d_vector=aux_inputs["d_vector"], style_wav=aux_inputs["style_wav"], language_id=aux_inputs["language_id"], language_name=aux_inputs["language_name"], enable_eos_bos_chars=self.config.enable_eos_bos_chars, use_griffin_lim=True, do_trim_silence=False, ).values() test_audios["{}-audio".format(idx)] = wav test_figures["{}-alignment".format(idx)] = plot_alignment(alignment.T, output_fig=False) except: # pylint: disable=bare-except print(" !! Error creating Test Sentence -", idx) return test_figures, test_audios
ea965a5683c56a39570b4cc91e86cd2bb9799308
22
vits.py
304
Update VITS for the new API
77,125
0
482
190
63
262,100
73
TTS
36
TTS/tts/models/vits.py
Python
35
{ "docstring": "Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n ", "language": "en", "n_whitespaces": 61, "n_words": 29, "vocab_size": 27 }
https://github.com/coqui-ai/TTS.git
8
read_image_batch
def read_image_batch(filenames, with_metadata=False): logger.trace("Requested batch: '%s'", filenames) batch = [None for _ in range(len(filenames))] if with_metadata: meta = [None for _ in range(len(filenames))] with futures.ThreadPoolExecutor() as executor: images = {executor.submit(read_image, filename, raise_error=True, with_metadata=with_metadata): idx for idx, filename in enumerate(filenames)} for future in futures.as_completed(images): ret_idx = images[future] if with_metadata: batch[ret_idx], meta[ret_idx] = future.result() else: batch[ret_idx] = future.result() batch = np.array(batch) retval = (batch, meta) if with_metadata else batch logger.trace("Returning images: (filenames: %s, batch shape: %s, with_metadata: %s)", filenames, batch.shape, with_metadata) return retval
2beceffad9b15c1fd78f06b9b272563321c5a41e
15
image.py
269
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
20,699
0
258
172
55
101,280
81
faceswap
28
lib/image.py
Python
20
{ "docstring": " Load a batch of images from the given file locations.\n\n Leverages multi-threading to load multiple images from disk at the same time leading to vastly\n reduced image read times.\n\n Parameters\n ----------\n filenames: list\n A list of ``str`` full paths to the images to be loaded.\n with_metadata: bool, optional\n Only returns a value if the images loaded are extracted Faceswap faces. If ``True`` then\n returns the Faceswap metadata stored with in a Face images .png exif header.\n Default: ``False``\n\n Returns\n -------\n numpy.ndarray\n The batch of images in `BGR` channel order returned in the order of :attr:`filenames`\n\n Notes\n -----\n As the images are compiled into a batch, they must be all of the same dimensions.\n\n Example\n -------\n >>> image_filenames = [\"/path/to/image_1.png\", \"/path/to/image_2.png\", \"/path/to/image_3.png\"]\n >>> images = read_image_batch(image_filenames)\n ", "language": "en", "n_whitespaces": 212, "n_words": 125, "vocab_size": 87 }
https://github.com/deepfakes/faceswap.git
3
get_instance
def get_instance(self, value): if value is None: return None elif isinstance(value, self.model): return value else: # assume instance ID return self.model.objects.get(pk=value)
39f7886a6f8ee98db7e73ce33d94c06139f35bd8
12
chooser.py
69
Split out common logic from get_value_data
16,671
0
83
42
18
77,545
21
wagtail
8
wagtail/admin/widgets/chooser.py
Python
7
{ "docstring": "\n Given a value passed to this widget for rendering (which may be None, an id, or a model\n instance), return a model instance or None\n ", "language": "en", "n_whitespaces": 47, "n_words": 25, "vocab_size": 21 }
https://github.com/wagtail/wagtail.git
1
test_change_view_save_as_new
def test_change_view_save_as_new(self): change_dict_save_as_new = { "_saveasnew": "Save as new", "title": "Ikke fordømt", "content": "<p>edited article</p>", "date_0": "2008-03-18", "date_1": "10:54:39", "section": self.s1.pk, } article_change_url = reverse( "admin:admin_views_article_change", args=(self.a1.pk,) ) # Add user can perform "Save as new". article_count = Article.objects.count() self.client.force_login(self.adduser) post = self.client.post(article_change_url, change_dict_save_as_new) self.assertRedirects(post, self.index_url) self.assertEqual(Article.objects.count(), article_count + 1) self.client.logout() # Change user cannot perform "Save as new" (no 'add' permission). article_count = Article.objects.count() self.client.force_login(self.changeuser) post = self.client.post(article_change_url, change_dict_save_as_new) self.assertEqual(post.status_code, 403) self.assertEqual(Article.objects.count(), article_count) # User with both add and change permissions should be redirected to the # change page for the newly created object. article_count = Article.objects.count() self.client.force_login(self.superuser) post = self.client.post(article_change_url, change_dict_save_as_new) self.assertEqual(Article.objects.count(), article_count + 1) new_article = Article.objects.latest("id") self.assertRedirects( post, reverse("admin:admin_views_article_change", args=(new_article.pk,)) )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
tests.py
419
Refs #33476 -- Reformatted code with Black.
52,024
0
392
251
79
207,623
115
django
26
tests/admin_views/tests.py
Python
31
{ "docstring": "\n 'Save as new' should raise PermissionDenied for users without the 'add'\n permission.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
https://github.com/django/django.git
3
estimated_run_time
def estimated_run_time(self): if self.state and self.state_type == schemas.states.StateType.RUNNING: return self.total_run_time + (pendulum.now() - self.state.timestamp) else: return self.total_run_time
29f4c75e3a605e29987e856204a2441b31f4a6df
12
orm_models.py
77
Repair imports as needed
10,820
0
60
47
15
53,477
17
prefect
12
src/prefect/orion/database/orm_models.py
Python
5
{ "docstring": "Total run time is incremented in the database whenever a RUNNING\n state is exited. To give up-to-date estimates, we estimate incremental\n run time for any runs currently in a RUNNING state.", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 25 }
https://github.com/PrefectHQ/prefect.git
6
from_rgs
def from_rgs(self, rgs, elements): if len(rgs) != len(elements): raise ValueError('mismatch in rgs and element lengths') max_elem = max(rgs) + 1 partition = [[] for i in range(max_elem)] j = 0 for i in rgs: partition[i].append(elements[j]) j += 1 if not all(p for p in partition): raise ValueError('some blocks of the partition were empty.') return Partition(*partition)
498015021131af4dbb07eb110e5badaba8250c7b
10
partitions.py
150
Updated import locations
47,604
0
155
92
42
196,104
55
sympy
16
sympy/combinatorics/partitions.py
Python
12
{ "docstring": "\n Creates a set partition from a restricted growth string.\n\n Explanation\n ===========\n\n The indices given in rgs are assumed to be the index\n of the element as given in elements *as provided* (the\n elements are not sorted by this routine). Block numbering\n starts from 0. If any block was not referenced in ``rgs``\n an error will be raised.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Partition\n >>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))\n Partition({c}, {a, d}, {b, e})\n >>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))\n Partition({e}, {a, c}, {b, d})\n >>> a = Partition([1, 4], [2], [3, 5])\n >>> Partition.from_rgs(a.RGS, a.members)\n Partition({2}, {1, 4}, {3, 5})\n ", "language": "en", "n_whitespaces": 238, "n_words": 104, "vocab_size": 81 }
https://github.com/sympy/sympy.git
2
get_chunk
def get_chunk(self, size=None) -> pd.DataFrame: if size is None: size = self._chunksize return self.read(nrows=size)
e48c9c3973286e257f6da1966c91806d86b917e0
9
sas_xport.py
54
TYP: more return annotations for io/* (#47524) * TYP: more return annotations for io/* * import future
40,022
0
46
33
13
167,451
14
pandas
8
pandas/io/sas/sas_xport.py
Python
16
{ "docstring": "\n Reads lines from Xport file and returns as dataframe\n\n Parameters\n ----------\n size : int, defaults to None\n Number of lines to read. If None, reads whole file.\n\n Returns\n -------\n DataFrame\n ", "language": "en", "n_whitespaces": 99, "n_words": 30, "vocab_size": 28 }
https://github.com/pandas-dev/pandas.git
1
iterate_graycode
def iterate_graycode(self, k): unranked_code = GrayCode.unrank(self.superset_size, (self.rank_gray + k) % self.cardinality) return Subset.subset_from_bitlist(self.superset, unranked_code)
498015021131af4dbb07eb110e5badaba8250c7b
12
subsets.py
64
Updated import locations
47,707
0
114
41
14
196,207
14
sympy
12
sympy/combinatorics/subsets.py
Python
5
{ "docstring": "\n Helper function used for prev_gray and next_gray.\n It performs ``k`` step overs to get the respective Gray codes.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> a = Subset([1, 2, 3], [1, 2, 3, 4])\n >>> a.iterate_graycode(3).subset\n [1, 4]\n >>> a.iterate_graycode(-2).subset\n [1, 2, 4]\n\n See Also\n ========\n\n next_gray, prev_gray\n ", "language": "en", "n_whitespaces": 148, "n_words": 49, "vocab_size": 39 }
https://github.com/sympy/sympy.git
6
__getattr__
def __getattr__(self, name): if self._wrapped is empty: self._setup(name) val = getattr(self._wrapped, name) # Special case some settings which require further modification. # This is done here for performance reasons so the modified value is cached. if name in {"MEDIA_URL", "STATIC_URL"} and val is not None: val = self._add_script_prefix(val) elif name == "SECRET_KEY" and not val: raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.") self.__dict__[name] = val return val
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
__init__.py
130
Refs #33476 -- Reformatted code with Black.
50,295
0
164
76
53
203,310
68
django
11
django/conf/__init__.py
Python
10
{ "docstring": "Return the value of a setting and cache it in self.__dict__.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/django/django.git
1
test_escape_sequence_resulting_in_multiple_keypresses
def test_escape_sequence_resulting_in_multiple_keypresses(parser): events = list(parser.feed("\x1b[2;4~")) assert len(events) == 2 assert events[0].key == "escape" assert events[1].key == "shift+insert"
bfb962bacf274373e5706090cd854b6aa0857270
11
test_xterm_parser.py
75
Backtracking unknown escape sequences, various tests for XTermParser
44,334
0
32
42
13
183,781
17
textual
7
tests/test_xterm_parser.py
Python
5
{ "docstring": "Some sequences are interpreted as more than 1 keypress", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/Textualize/textual.git
3
all_data
def all_data(request, data, data_missing): if request.param == "data": return data elif request.param == "data_missing": return data_missing @pytest.fixture
d40c37192b3d38a3c9a0d248dc612ac9dd56e506
@pytest.fixture
8
test_arrow.py
56
ENH/TST: Add BaseInterfaceTests tests for ArrowExtensionArray (#47377)
39,962
1
39
28
14
167,274
17
pandas
7
pandas/tests/extension/test_arrow.py
Python
5
{ "docstring": "Parametrized fixture returning 'data' or 'data_missing' integer arrays.\n\n Used to test dtype conversion with and without missing values.\n ", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 18 }
https://github.com/pandas-dev/pandas.git
4
encode_maxrespcode
def encode_maxrespcode(self): value = self.mrcode if value < 128: code = value elif value > 31743: code = 255 else: exp = 0 value >>= 3 while value > 31: exp += 1 value >>= 1 exp <<= 4 code = 0x80 | exp | (value & 0x0F) self.mrcode = code
08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf
12
igmpv3.py
109
E275 - Missing whitespace after keyword (#3711) Co-authored-by: Alexander Aring <[email protected]> Co-authored-by: Anmol Sarma <[email protected]> Co-authored-by: antoine.torre <[email protected]> Co-authored-by: Antoine Vacher <[email protected]> Co-authored-by: Arnaud Ebalard <[email protected]> Co-authored-by: atlowl <[email protected]> Co-authored-by: Brian Bienvenu <[email protected]> Co-authored-by: Chris Packham <[email protected]> Co-authored-by: CQ <[email protected]> Co-authored-by: Daniel Collins <[email protected]> Co-authored-by: Federico Maggi <[email protected]> Co-authored-by: Florian Maury <[email protected]> Co-authored-by: _Frky <[email protected]> Co-authored-by: g-mahieux <[email protected]> Co-authored-by: gpotter2 <[email protected]> Co-authored-by: Guillaume Valadon <[email protected]> Co-authored-by: Hao Zheng <[email protected]> Co-authored-by: Haresh Khandelwal <[email protected]> Co-authored-by: Harri Hämäläinen <[email protected]> Co-authored-by: hecke <[email protected]> Co-authored-by: Jan Romann <[email protected]> Co-authored-by: Jan Sebechlebsky <[email protected]> Co-authored-by: jdiog0 <[email protected]> Co-authored-by: jockque <[email protected]> Co-authored-by: Julien Bedel <[email protected]> Co-authored-by: Keith Scott <[email protected]> Co-authored-by: Kfir Gollan <[email protected]> Co-authored-by: Lars Munch <[email protected]> Co-authored-by: ldp77 <[email protected]> Co-authored-by: Leonard Crestez <[email protected]> Co-authored-by: Marcel Patzlaff <[email protected]> Co-authored-by: Martijn Thé <[email protected]> Co-authored-by: Martine Lenders <[email protected]> Co-authored-by: Michael Farrell <[email protected]> Co-authored-by: Michał Mirosław <[email protected]> Co-authored-by: mkaliszan <[email protected]> Co-authored-by: mtury <[email protected]> Co-authored-by: Neale Ranns <[email protected]> Co-authored-by: Octavian Toader <[email protected]> Co-authored-by: Peter Eisenlohr <[email protected]> Co-authored-by: Phil <[email protected]> Co-authored-by: Pierre Lalet <[email protected]> Co-authored-by: Pierre Lorinquer <[email protected]> Co-authored-by: piersoh <[email protected]> Co-authored-by: plorinquer <[email protected]> Co-authored-by: pvinci <[email protected]> Co-authored-by: Rahul Jadhav <[email protected]> Co-authored-by: Robin Jarry <[email protected]> Co-authored-by: romain-perez <[email protected]> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <[email protected]> Co-authored-by: Sebastian Baar <[email protected]> Co-authored-by: sebastien mainand <[email protected]> Co-authored-by: smehner1 <[email protected]> Co-authored-by: speakinghedge <[email protected]> Co-authored-by: Steven Van Acker <[email protected]> Co-authored-by: Thomas Faivre <[email protected]> Co-authored-by: Tran Tien Dat <[email protected]> Co-authored-by: Wael Mahlous <[email protected]> Co-authored-by: waeva <[email protected]> Co-authored-by: Alexander Aring <[email protected]> Co-authored-by: Anmol Sarma <[email protected]> Co-authored-by: antoine.torre <[email protected]> Co-authored-by: Antoine Vacher <[email protected]> Co-authored-by: Arnaud Ebalard <[email protected]> Co-authored-by: atlowl <[email protected]> Co-authored-by: Brian Bienvenu <[email protected]> Co-authored-by: Chris Packham <[email protected]> Co-authored-by: CQ <[email protected]> Co-authored-by: Daniel Collins <[email protected]> Co-authored-by: Federico Maggi <[email protected]> Co-authored-by: Florian Maury <[email protected]> Co-authored-by: _Frky <[email protected]> Co-authored-by: g-mahieux <[email protected]> Co-authored-by: gpotter2 <[email protected]> Co-authored-by: Guillaume Valadon <[email protected]> Co-authored-by: Hao Zheng <[email protected]> Co-authored-by: Haresh Khandelwal <[email protected]> Co-authored-by: Harri Hämäläinen <[email protected]> Co-authored-by: hecke <[email protected]> Co-authored-by: Jan Romann <[email protected]> Co-authored-by: Jan Sebechlebsky <[email protected]> Co-authored-by: jdiog0 <[email protected]> Co-authored-by: jockque <[email protected]> Co-authored-by: Julien Bedel <[email protected]> Co-authored-by: Keith Scott <[email protected]> Co-authored-by: Kfir Gollan <[email protected]> Co-authored-by: Lars Munch <[email protected]> Co-authored-by: ldp77 <[email protected]> Co-authored-by: Leonard Crestez <[email protected]> Co-authored-by: Marcel Patzlaff <[email protected]> Co-authored-by: Martijn Thé <[email protected]> Co-authored-by: Martine Lenders <[email protected]> Co-authored-by: Michael Farrell <[email protected]> Co-authored-by: Michał Mirosław <[email protected]> Co-authored-by: mkaliszan <[email protected]> Co-authored-by: mtury <[email protected]> Co-authored-by: Neale Ranns <[email protected]> Co-authored-by: Octavian Toader <[email protected]> Co-authored-by: Peter Eisenlohr <[email protected]> Co-authored-by: Phil <[email protected]> Co-authored-by: Pierre Lalet <[email protected]> Co-authored-by: Pierre Lorinquer <[email protected]> Co-authored-by: piersoh <[email protected]> Co-authored-by: pvinci <[email protected]> Co-authored-by: Rahul Jadhav <[email protected]> Co-authored-by: Robin Jarry <[email protected]> Co-authored-by: romain-perez <[email protected]> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <[email protected]> Co-authored-by: Sebastian Baar <[email protected]> Co-authored-by: sebastien mainand <[email protected]> Co-authored-by: smehner1 <[email protected]> Co-authored-by: Steven Van Acker <[email protected]> Co-authored-by: Thomas Faivre <[email protected]> Co-authored-by: Tran Tien Dat <[email protected]> Co-authored-by: Wael Mahlous <[email protected]> Co-authored-by: waeva <[email protected]>
52,724
0
200
65
29
209,532
51
scapy
6
scapy/contrib/igmpv3.py
Python
15
{ "docstring": "Encode and replace the mrcode value to its IGMPv3 encoded time value if needed, # noqa: E501\n as specified in rfc3376#section-4.1.1.\n\n If value < 128, return the value specified. If >= 128, encode as a floating # noqa: E501\n point value. Value can be 0 - 31744.\n ", "language": "en", "n_whitespaces": 77, "n_words": 47, "vocab_size": 37 }
https://github.com/secdev/scapy.git
2
show_tables
def show_tables(): # [START howto_elasticsearch_query] es = ElasticsearchHook(elasticsearch_conn_id=CONN_ID) # Handle ES conn with context manager with es.get_conn() as es_conn: tables = es_conn.execute('SHOW TABLES') for table, *_ in tables: print(f"table: {table}") return True # [END howto_elasticsearch_query] with models.DAG( DAG_ID, schedule_interval="@once", start_date=datetime(2021, 1, 1), catchup=False, tags=["example", "elasticsearch"], ) as dag: execute_query = show_tables() ( # TEST BODY execute_query ) from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag)
a801ea3927b8bf3ca154fea3774ebf2d90e74e50
13
example_elasticsearch_query.py
187
es new system tests (#22811)
9,105
0
158
45
63
47,416
78
airflow
27
tests/system/providers/elasticsearch/example_elasticsearch_query.py
Python
7
{ "docstring": "\n show_tables queries elasticsearch to list available tables\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
https://github.com/apache/airflow.git
1
test_category_match_in_app
def test_category_match_in_app(self): from sentry.grouping.enhancer import Enhancements enhancement = Enhancements.from_config_string( , ) event = make_event( platform="native", exception={ "values": [ { "type": "Hello", "stacktrace": { "frames": [ { "function": "foo", "in_app": True, }, {"function": "bar"}, ] }, } ] }, ) manager = EventManager(event) manager.normalize() manager.get_data()["grouping_config"] = { "enhancements": enhancement.dumps(), "id": "mobile:2021-02-12", } event1 = manager.save(self.project.id) assert event1.data["exception"]["values"][0]["stacktrace"]["frames"][0]["in_app"] is False event = make_event( platform="native", exception={ "values": [ { "type": "Hello", "stacktrace": { "frames": [ { "function": "foo2", "in_app": True, }, {"function": "bar"}, ] }, } ] }, ) manager = EventManager(event) manager.normalize() manager.get_data()["grouping_config"] = { "enhancements": enhancement.dumps(), "id": "mobile:2021-02-12", } event2 = manager.save(self.project.id) assert event2.data["exception"]["values"][0]["stacktrace"]["frames"][0]["in_app"] is False assert event1.group_id == event2.group_id
39cfdcb446e74732c67ce07d7dd8d8d5ace471b1
20
test_event_manager.py
492
test(event_manager): Fix incorrect invocations of manager.save (#36615)
19,067
0
1,085
277
51
94,332
109
sentry
24
tests/sentry/event_manager/test_event_manager.py
Python
64
{ "docstring": "\n Regression test to ensure that grouping in-app enhancements work in\n principle.\n \n function:foo category=bar\n function:foo2 category=bar\n category:bar -app\n ", "language": "en", "n_whitespaces": 84, "n_words": 17, "vocab_size": 16 }
https://github.com/getsentry/sentry.git
1
test_curr_only_accuracy
def test_curr_only_accuracy(self): _, test = testing_utils.eval_model( opt={ 'batchsize': 4, 'fp16': True, 'num_examples': 16, 'model_file': 'zoo:style_gen/curr_only_classifier/model', 'model': 'projects.style_gen.classifier:ClassifierAgent', 'classes_from_file': 'image_chat_personalities_file', 'task': 'style_gen:CurrUttOnlyStyle', 'wrapper_task': 'style_gen:LabeledBlendedSkillTalk', }, skip_valid=True, ) self.assertAlmostEqual(test['accuracy'], 0.4375, delta=0.0)
82df52b4431f3573ca2c93dd4bb3098992968acc
12
test_style_gen.py
129
[Style-Controlled Generation] Open-source a second style classifier (#4380) * Add model to model list * Curr only classifier download page * Add test case * Update version * Update with some results * Wording
47,085
0
210
75
29
194,793
29
ParlAI
10
tests/nightly/gpu/test_style_gen.py
Python
15
{ "docstring": "\n Test the accuracy of the classifier trained on current utterances only.\n\n The accuracy is low here because the task was labeled using a different\n classifier, zoo:style_gen/prev_curr_classifier/model.\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 23 }
https://github.com/facebookresearch/ParlAI.git
3
highlighted_child
def highlighted_child(self) -> ListItem | None: if self.index is None: return None elif 0 <= self.index < len(self.children): return self.children[self.index]
c57f6b90259c8193e3aa9c3453a02b4043873381
10
_list_view.py
67
Fix click handler
45,287
0
63
42
17
186,003
20
textual
6
src/textual/widgets/_list_view.py
Python
10
{ "docstring": "Get the currently highlighted ListItem\n\n Returns:\n ListItem | None: The currently highlighted ListItem, or None if nothing is highlighted.\n ", "language": "en", "n_whitespaces": 44, "n_words": 19, "vocab_size": 16 }
https://github.com/Textualize/textual.git
8
_expand_hint
def _expand_hint(expr, hint, deep=True, **hints): hit = False # XXX: Hack to support non-Basic args # | # V if deep and getattr(expr, 'args', ()) and not expr.is_Atom: sargs = [] for arg in expr.args: arg, arghit = Expr._expand_hint(arg, hint, **hints) hit |= arghit sargs.append(arg) if hit: expr = expr.func(*sargs) expr = expr._evaluate(expr) if hasattr(expr, hint): newexpr = getattr(expr, hint)(**hints) if newexpr != expr: return (newexpr, True) return (expr, hit)
32e337616722ceae757c23ad1fe2f3465c629a47
13
expr.py
203
Fix MatAdd.expand( ) not expanding completely
48,749
0
297
128
52
197,943
70
sympy
18
sympy/core/expr.py
Python
16
{ "docstring": "\n Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``.\n\n Returns ``(expr, hit)``, where expr is the (possibly) expanded\n ``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and\n ``False`` otherwise.\n ", "language": "en", "n_whitespaces": 65, "n_words": 28, "vocab_size": 24 }
https://github.com/sympy/sympy.git
3
get_extra_loggers
def get_extra_loggers(self) -> List[str]: return ( [name.strip() for name in self.extra_loggers.split(",")] if self.extra_loggers else [] )
a452d8b8917000774302411a7aeb949f7e326814
12
settings.py
64
Strip logger name to prevent accidental spaces
10,735
0
70
39
16
53,193
16
prefect
8
src/prefect/utilities/settings.py
Python
9
{ "docstring": "\n Parse the `extra_loggers` CSV and trim whitespace from logger names\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/PrefectHQ/prefect.git
3
forward
def forward(self, input_ids=None, attention_mask=None, **kwargs): if input_ids is None: raise ValueError("Input_ids cannot be None.") inputs_embeds = self.d_model**0.5 * self.embed_tokens(input_ids) inputs_embed_pos = self.encoder_embed_positions(input_ids.shape) hidden_states = inputs_embeds + inputs_embed_pos hidden_states = self.encoder_layernorm_embedding(hidden_states) encoder_input = self.encoder_dropout(hidden_states) if attention_mask is None: attention_mask = paddle.cast( input_ids == self.pad_token_id, dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e4 attention_mask.stop_gradient = True encoder_output = self.encoder(encoder_input, src_mask=attention_mask) return encoder_output
b0c35d5e1ff02a634fa26392b60d3885c2c78677
16
modeling.py
203
Fix the attention mask for fp16 (#1585)
118,056
0
189
130
39
322,099
56
PaddleNLP
26
paddlenlp/transformers/mbart/modeling.py
Python
15
{ "docstring": "\n The MBartEncoder forward method, overrides the `__call__()` special method.\n\n Args:\n input_ids (Tensor, optional):\n See :class:`MBartModel`.\n attention_mask (Tensor, optional):\n See :class:`MBartModel`.\n\n Returns:\n Tensor: Returns tensor `encoder_output`, which is the output at the last layer of the model.\n Its data type should be float32 and has a shape of [batch_size, sequence_length, hidden_size].\n\n ", "language": "en", "n_whitespaces": 153, "n_words": 50, "vocab_size": 42 }
https://github.com/PaddlePaddle/PaddleNLP.git
1
sparsemax_bisect
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True): return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
20a8a6fdb516e543d4598c852063ba0fb407f3ba
7
root_finding.py
47
Removes dependency on entmax from PyPI, adds entmax source to utils (#1778) * Removes dependency on entmax from PyPi, add entmax source code into utils instead. * Removes build status and image from README * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix python formatting in docs for pre-commit. * Removes __main__ from test_losses.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update entmax imports. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Daniel Treiman <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
957
0
16
32
10
6,299
10
ludwig
7
ludwig/utils/entmax/root_finding.py
Python
2
{ "docstring": "sparsemax: normalizing sparse transform (a la softmax), via bisection.\n\n Solves the projection:\n\n min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.\n\n Parameters\n ----------\n X : torch.Tensor\n The input tensor.\n\n dim : int\n The dimension along which to apply sparsemax.\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n ensure_sum_one : bool,\n Whether to divide the result by its sum. If false, the result might\n sum to close but not exactly 1, which might cause downstream problems.\n\n Note: This function does not yet support normalizing along anything except\n the last dimension. Please use transposing and views to achieve more\n general behavior.\n\n Returns\n -------\n P : torch tensor, same shape as X\n The projection result, such that P.sum(dim=dim) == 1 elementwise.\n ", "language": "en", "n_whitespaces": 231, "n_words": 128, "vocab_size": 107 }
https://github.com/ludwig-ai/ludwig.git
1
test_missing_fields_old
def test_missing_fields_old(self) -> None: expected_topic = "Unknown Policy (Unknown ID)" expected_message = .strip() self.check_webhook( "incident_default_fields_old", expected_topic, expected_message, content_type="application/json", )
bfd9fc86223c2446e8b38d2cdd5876caed50bfda
9
tests.py
57
integration: Fix integration with newrelic. Newrelic updated the payload that's sent via the webhook incoming call causing a bug in the newrelic webhook endpoint. This fixes the bug by updating the endpoint to respect the new format of the payload as well as the old format. This should be updated once the old format is EOLed. Fixes #22338.
17,836
0
90
32
18
84,423
19
zulip
7
zerver/webhooks/newrelic/tests.py
Python
14
{ "docstring": "\n[Incident](https://alerts.newrelic.com) **opened** for condition: **Unknown condition** at <time:2020-11-11 22:32:11.151000+00:00>\n``` quote\nNo details.\n```\n", "language": "en", "n_whitespaces": 10, "n_words": 14, "vocab_size": 13 }
https://github.com/zulip/zulip.git
1
test_process_form_extras
def test_process_form_extras(): # Testing parameters set in both `Extra` and custom fields. mock_form = mock.Mock() mock_form.data = { "conn_type": "test", "conn_id": "extras_test", "extra": '{"param1": "param1_val"}', "extra__test__custom_field": "custom_field_val", } cmv = ConnectionModelView() cmv.extra_fields = ["extra__test__custom_field"] # Custom field cmv.process_form(form=mock_form, is_created=True) assert json.loads(mock_form.extra.data) == { "extra__test__custom_field": "custom_field_val", "param1": "param1_val", } # Testing parameters set in `Extra` field only. mock_form = mock.Mock() mock_form.data = { "conn_type": "test2", "conn_id": "extras_test2", "extra": '{"param2": "param2_val"}', } cmv = ConnectionModelView() cmv.process_form(form=mock_form, is_created=True) assert json.loads(mock_form.extra.data) == {"param2": "param2_val"} # Testing parameters set in custom fields only. mock_form = mock.Mock() mock_form.data = { "conn_type": "test3", "conn_id": "extras_test3", "extra__test3__custom_field": "custom_field_val3", } cmv = ConnectionModelView() cmv.extra_fields = ["extra__test3__custom_field"] # Custom field cmv.process_form(form=mock_form, is_created=True) assert json.loads(mock_form.extra.data) == {"extra__test3__custom_field": "custom_field_val3"} # Testing parameters set in both extra and custom fields (cunnection updates). mock_form = mock.Mock() mock_form.data = { "conn_type": "test4", "conn_id": "extras_test4", "extra": '{"extra__test4__custom_field": "custom_field_val3"}', "extra__test4__custom_field": "custom_field_val4", } cmv = ConnectionModelView() cmv.extra_fields = ["extra__test4__custom_field"] # Custom field cmv.process_form(form=mock_form, is_created=True) assert json.loads(mock_form.extra.data) == {"extra__test4__custom_field": "custom_field_val4"}
44df1420582b358594c8d7344865811cff02956c
10
test_views_connection.py
510
fix: Update custom connection field processing (#20883) * fix: Update custom connection field processing Fixes issue where custom connectionfields are not updated because `extra` field is in form and has previous values, overriding custom field values. Adds portion of connection form tests to test functionality.
8,180
0
375
275
67
44,155
161
airflow
14
tests/www/views/test_views_connection.py
Python
45
{ "docstring": "\n Test the handling of connection parameters set with the classic `Extra` field as well as custom fields.\n ", "language": "en", "n_whitespaces": 24, "n_words": 17, "vocab_size": 15 }
https://github.com/apache/airflow.git
15
data
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False): offset = offset or (0, 0) size = size or (self.width - offset[0], self.height - offset[1]) shape = shape or size if any(x <= 0 for x in size): raise ValueError("Offset too big for this raster.") if size[0] > self.width or size[1] > self.height: raise ValueError("Size is larger than raster.") # Create ctypes type array generator ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1]) if data is None: # Set read mode access_flag = 0 # Prepare empty ctypes array data_array = ctypes_array() else: # Set write mode access_flag = 1 # Instantiate ctypes array holding the input data if isinstance(data, (bytes, memoryview)) or ( numpy and isinstance(data, numpy.ndarray) ): data_array = ctypes_array.from_buffer_copy(data) else: data_array = ctypes_array(*data) # Access band capi.band_io( self._ptr, access_flag, offset[0], offset[1], size[0], size[1], byref(data_array), shape[0], shape[1], self.datatype(), 0, 0, ) # Return data as numpy array if possible, otherwise as list if data is None: if as_memoryview: return memoryview(data_array) elif numpy: # reshape() needs a reshape parameter with the height first. return numpy.frombuffer( data_array, dtype=numpy.dtype(data_array) ).reshape(tuple(reversed(size))) else: return list(data_array) else: self._flush()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
17
band.py
460
Refs #33476 -- Reformatted code with Black.
50,604
0
740
299
118
203,999
181
django
33
django/contrib/gis/gdal/raster/band.py
Python
45
{ "docstring": "\n Read or writes pixel values for this band. Blocks of data can\n be accessed by specifying the width, height and offset of the\n desired block. The same specification can be used to update\n parts of a raster by providing an array of values.\n\n Allowed input data types are bytes, memoryview, list, tuple, and array.\n ", "language": "en", "n_whitespaces": 97, "n_words": 54, "vocab_size": 45 }
https://github.com/django/django.git
1
get_all_holiday_exchange_short_names
def get_all_holiday_exchange_short_names() -> pd.DataFrame: exchange_short_names = mcal.calendar_registry.get_calendar_names() df = pd.DataFrame(exchange_short_names, columns=["short_name"]) return df @log_start_end(log=logger)
7e4a657333c8b7bb1ebdcb7a4c8f06e8dc0d66f6
@log_start_end(log=logger)
11
pandas_market_cal_model.py
71
Addition of exchange holiday functionality under stocks/th (#3486) * Addition of exchange holiday calendars using PandasMarketCalendar * website update for holidays functionality * Disable pylint too many attributes * Changes to not show index for dataframe and include metavar * Setting of default value for holidays * Merge + black linter * test fix Co-authored-by: james <[email protected]> Co-authored-by: Jeroen Bouma <[email protected]>
86,066
1
25
34
12
286,857
14
OpenBBTerminal
12
openbb_terminal/stocks/tradinghours/pandas_market_cal_model.py
Python
11
{ "docstring": "Get all holiday exchanges short names.\n\n Returns\n -------\n pd.DataFrame\n All available exchanges with holiday data (short names)\n ", "language": "en", "n_whitespaces": 36, "n_words": 17, "vocab_size": 15 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
_tupleize_axis_indexer
def _tupleize_axis_indexer(self, key) -> tuple: new_key = [slice(None)] * self.ndim # error: Invalid index type "Optional[Any]" for "List[slice]"; expected # type "SupportsIndex" new_key[self.axis] = key # type:ignore[index] return tuple(new_key)
ab42f85f192ab054a18f94825ced1bb4c1ab7d3f
10
indexing.py
60
REF: implement LocationIndexer._tupleize_axis_indexer (#45378)
39,447
0
72
35
25
163,504
29
pandas
8
pandas/core/indexing.py
Python
7
{ "docstring": "\n If we have an axis, adapt the given key to be axis-independent.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/pandas-dev/pandas.git
5
_broadcasting_select_mhlo
def _broadcasting_select_mhlo(which, x, y): which_type, x_type, y_type = ( ir.RankedTensorType(v.type) for v in (which, x, y)) out_shape = list(lax_internal.broadcast_shapes( tuple(which_type.shape), tuple(x_type.shape), tuple(y_type.shape))) bcast_dims = lambda shape: mlir.dense_int_elements( range(len(out_shape) - len(shape), len(out_shape))) if which_type.shape != out_shape: which = mhlo.BroadcastInDimOp( ir.RankedTensorType.get(out_shape, which_type.element_type), which, bcast_dims(which_type.shape)) if x_type.shape != out_shape: x = mhlo.BroadcastInDimOp( ir.RankedTensorType.get(out_shape, x_type.element_type), x, bcast_dims(x_type.shape)) if y_type.shape != out_shape: y = mhlo.BroadcastInDimOp( ir.RankedTensorType.get(out_shape, y_type.element_type), y, bcast_dims(y_type.shape)) return mhlo.SelectOp(which, x, y).result
bc658e74567ffa941b31f4e89463dc713d2ecbf4
14
linalg.py
316
[MHLO] Add direct MHLO lowerings for most linear algebra kernels. PiperOrigin-RevId: 439927594
26,747
0
140
211
50
120,043
68
jax
28
jax/_src/lax/linalg.py
Python
20
{ "docstring": "Wrapper around XLA `Select` that broadcasts its arguments.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/google/jax.git
2
list_to_tuple
def list_to_tuple(maybe_list): if isinstance(maybe_list, list): return tuple(maybe_list) return maybe_list
84afc5193d38057e2e2badf9c889ea87d80d8fbf
9
training_utils.py
36
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,856
0
25
21
8
271,833
9
keras
5
keras/engine/training_utils.py
Python
4
{ "docstring": "Datasets will stack the list of tensor, so switch them to tuples.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/keras-team/keras.git
2
collect_extra_binaries
def collect_extra_binaries(self): binaries = [] # Applicable only to Windows. if not compat.is_win: return [] # OpenGL: EGL/GLES via ANGLE, software OpenGL renderer. binaries += self._collect_all_or_none(['libEGL.dll', 'libGLESv2.dll'], ['d3dcompiler_??.dll']) binaries += self._collect_all_or_none(['opengl32sw.dll']) # Include ICU files, if they exist. # See the "Deployment approach" section in ``PyInstaller/utils/hooks/qt.py``. binaries += self._collect_all_or_none(['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']) return binaries # Collect additional shared libraries required for SSL support in QtNetwork, if they are available. # Applicable only to Windows. See issue #3520, #4048.
d789a7daa7712716c89259b987349917a89aece7
10
__init__.py
117
hookutils: reorganize the Qt hook utilities Reorganize the Qt module information to provide information necessary to deal with variations between different python Qt bindings (PySide2, PyQt5, PySide6, and PyQt6). Replace the existing table-like dictionary with list of entries, which is easier to format and document. From this list, we now generate two dictionaries; one that maps Qt module (shared library) names to the module info entries (the same role as the old dictionary), and one that maps python module names to the module info entries. The latter is necessary to accommodate python modules that do not have corresponding Qt shared libraries (header-only Qt modules, such as QtAxContainer; or statically-linked module, such as QSci), but we still need to provide information about plugins or translation files. The new information list is based on manual inspection of source code for Qt 5.15 and 6.3, and should provide comprehensive information about all plugin names and translation file basenames. In addition, most of the helper functions, which take a reference to the `QtLibraryInfo` class as their first argument, have been turned into methods of the `QtLibraryInfo` class. The corresponding hooks have also been adjusted.
77,569
0
171
61
55
264,030
77
pyinstaller
6
PyInstaller/utils/hooks/qt/__init__.py
Python
8
{ "docstring": "\n Collect extra binaries/DLLs required by Qt. These include ANGLE DLLs, OpenGL software renderer DLL, and ICU\n DLLs. Applicable only on Windows (on other OSes, empty list is returned).\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 28 }
https://github.com/pyinstaller/pyinstaller.git
4
__make_attention_mask
def __make_attention_mask(self) -> None: # Make masks for shift case if self.shift_size > 0: height, width = self.input_resolution # type: int, int mask: torch.Tensor = torch.zeros(height, width, device=self.window_attention.tau.device) height_slices: Tuple = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) width_slices: Tuple = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) counter: int = 0 for height_slice in height_slices: for width_slice in width_slices: mask[height_slice, width_slice] = counter counter += 1 mask_windows: torch.Tensor = unfold(mask[None, None], self.window_size) mask_windows: torch.Tensor = mask_windows.reshape(-1, self.window_size * self.window_size) attention_mask: Optional[torch.Tensor] = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attention_mask: Optional[torch.Tensor] = attention_mask.masked_fill(attention_mask != 0, float(-100.0)) attention_mask: Optional[torch.Tensor] = attention_mask.masked_fill(attention_mask == 0, float(0.0)) else: attention_mask: Optional[torch.Tensor] = None # Save mask self.register_buffer("attention_mask", attention_mask)
90dc74c450a0ec671af0e7f73d6b4a7b5396a7af
14
swin_transformer_v2.py
420
Add code from https://github.com/ChristophReich1996/Swin-Transformer-V2 and change docstring style to match timm
119,894
0
474
279
69
331,749
108
pytorch-image-models
31
timm/models/swin_transformer_v2.py
Python
24
{ "docstring": " Method generates the attention mask used in shift case. ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
https://github.com/huggingface/pytorch-image-models.git
1
test_valid_zero_ops_doesnt_require_backend_dispatch_key
def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op, # so there's no reason to parse the backend self.assert_success_from_gen_backend_stubs(yaml_str)
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
7
test_gen_backend_stubs.py
32
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
21,486
0
58
16
27
102,171
30
pytorch
4
tools/test/test_gen_backend_stubs.py
Python
6
{ "docstring": "\\\nbackend: BAD_XLA\ncpp_namespace: torch_xla\nsupported:", "language": "en", "n_whitespaces": 2, "n_words": 6, "vocab_size": 6 }
https://github.com/pytorch/pytorch.git
1
test_worker_disable_ll_hls
async def test_worker_disable_ll_hls(hass): stream_settings = StreamSettings( ll_hls=True, min_segment_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS - SEGMENT_DURATION_ADJUSTER, part_target_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS, hls_advance_part_limit=3, hls_part_timeout=TARGET_SEGMENT_DURATION_NON_LL_HLS, orientation=1, ) py_av = MockPyAv() py_av.container.format.name = "hls" await async_decode_stream( hass, PacketSequence(TEST_SEQUENCE_LENGTH), py_av=py_av, stream_settings=stream_settings, ) assert stream_settings.ll_hls is False
852b0caf5be4bba0dcaaf5f6a38221d1590c4ed9
10
test_worker.py
117
Add orientation transforms to stream (#77439)
104,561
0
133
76
29
305,777
32
core
20
tests/components/stream/test_worker.py
Python
19
{ "docstring": "Test that the worker disables ll-hls for hls inputs.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
update_company_monthly_sales
def update_company_monthly_sales(company): import json from frappe.utils.goal import get_monthly_results filter_str = "company = {0} and status != 'Draft' and docstatus=1".format( frappe.db.escape(company) ) month_to_value_dict = get_monthly_results( "Sales Invoice", "base_grand_total", "posting_date", filter_str, "sum" ) frappe.db.set_value("Company", company, "sales_monthly_history", json.dumps(month_to_value_dict))
494bd9ef78313436f0424b918f200dab8fc7c20b
11
company.py
111
style: format code with black
14,538
0
25
64
30
67,473
35
erpnext
14
erpnext/setup/doctype/company/company.py
Python
10
{ "docstring": "Cache past year monthly sales of every company based on sales invoices", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/frappe/erpnext.git
4
newer_pairwise
def newer_pairwise (sources, targets): if len(sources) != len(targets): raise ValueError("'sources' and 'targets' must be same length") # build a pair of lists (sources, targets) where source is newer n_sources = [] n_targets = [] for i in range(len(sources)): if newer(sources[i], targets[i]): n_sources.append(sources[i]) n_targets.append(targets[i]) return (n_sources, n_targets) # newer_pairwise ()
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
dep_util.py
132
add python 3.10.4 for windows
56,774
0
106
81
43
222,842
49
XX-Net
11
python3.10.4/Lib/distutils/dep_util.py
Python
10
{ "docstring": "Walk two filename lists in parallel, testing if each source is newer\n than its corresponding target. Return a pair of lists (sources,\n targets) where source is newer than target, according to the semantics\n of 'newer()'.\n ", "language": "en", "n_whitespaces": 48, "n_words": 35, "vocab_size": 29 }
https://github.com/XX-net/XX-Net.git
1
mean
def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None): from dask.array.reductions import mean return mean( self, axis=axis, dtype=dtype, keepdims=keepdims, split_every=split_every, out=out, )
2820bae493a49cb1d0a6e376985c5473b8f04fa8
8
core.py
83
Don't include docs in ``Array`` methods, just refer to module docs (#9244) Co-authored-by: James Bourbeau <[email protected]>
36,740
0
114
60
20
156,730
20
dask
10
dask/array/core.py
Python
10
{ "docstring": "Returns the average of the array elements along given axis.\n\n Refer to :func:`dask.array.mean` for full documentation.\n\n See Also\n --------\n dask.array.mean : equivalent function\n ", "language": "en", "n_whitespaces": 58, "n_words": 23, "vocab_size": 22 }
https://github.com/dask/dask.git
1
test_filter_on_load
async def test_filter_on_load(hass, hass_storage): hass_storage[er.STORAGE_KEY] = { "version": er.STORAGE_VERSION_MAJOR, "minor_version": 1, "data": { "entities": [ { "entity_id": "test.named", "platform": "super_platform", "unique_id": "with-name", "name": "registry override", }, # This entity's name should be None { "entity_id": "test.no_name", "platform": "super_platform", "unique_id": "without-name", }, { "entity_id": "test.disabled_user", "platform": "super_platform", "unique_id": "disabled-user", "disabled_by": "user", # We store the string representation }, { "entity_id": "test.disabled_hass", "platform": "super_platform", "unique_id": "disabled-hass", "disabled_by": "hass", # We store the string representation }, # This entry should have the entity_category reset to None { "entity_id": "test.system_entity", "platform": "super_platform", "unique_id": "system-entity", "entity_category": "system", }, ] }, } await er.async_load(hass) registry = er.async_get(hass) assert len(registry.entities) == 5 assert set(registry.entities.keys()) == { "test.disabled_hass", "test.disabled_user", "test.named", "test.no_name", "test.system_entity", } entry_with_name = registry.async_get_or_create( "test", "super_platform", "with-name" ) entry_without_name = registry.async_get_or_create( "test", "super_platform", "without-name" ) assert entry_with_name.name == "registry override" assert entry_without_name.name is None assert not entry_with_name.disabled entry_disabled_hass = registry.async_get_or_create( "test", "super_platform", "disabled-hass" ) entry_disabled_user = registry.async_get_or_create( "test", "super_platform", "disabled-user" ) assert entry_disabled_hass.disabled assert entry_disabled_hass.disabled_by is er.RegistryEntryDisabler.HASS assert entry_disabled_user.disabled assert entry_disabled_user.disabled_by is er.RegistryEntryDisabler.USER entry_system_category = registry.async_get_or_create( "test", "system_entity", "system-entity" ) assert entry_system_category.entity_category is None
4e32bf2ac910e38695a1b9bf9b610c717db97998
14
test_entity_registry.py
535
Drop old migration code from entity registry (#78278)
106,309
0
919
305
92
307,538
178
core
26
tests/helpers/test_entity_registry.py
Python
77
{ "docstring": "Test we transform some data when loading from storage.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
read
def read(self) -> TOMLDocument: with open(self._path, encoding="utf-8", newline="") as f: return loads(f.read())
8faa74cdc9da20cfdcc69f5ec29b91112c95b4c9
12
toml_file.py
64
Update tomlkit==0.9.2 Used: python -m invoke vendoring.update --package=tomlkit
4,068
0
37
35
12
21,835
12
pipenv
9
pipenv/vendor/tomlkit/toml_file.py
Python
4
{ "docstring": "Read the file content as a :class:`tomlkit.toml_document.TOMLDocument`.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/pypa/pipenv.git
2
load_pascal_annotation
def load_pascal_annotation(index, pascal_root): classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename)
cc4d0564756ca067516f71718a3d135996525909
12
pascal_multilabel_datalayers.py
153
Balanced joint maximum mean discrepancy for deep transfer learning
12,009
0
161
317
41
60,201
45
transferlearning
11
code/deep/BJMMD/caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py
Python
33
{ "docstring": "\n This code is borrowed from Ross Girshick's FAST-RCNN code\n (https://github.com/rbgirshick/fast-rcnn).\n It parses the PASCAL .xml metadata files.\n See publication for further details: (http://arxiv.org/abs/1504.08083).\n\n Thanks Ross!\n\n ", "language": "en", "n_whitespaces": 44, "n_words": 25, "vocab_size": 24 }
https://github.com/jindongwang/transferlearning.git
2
test_target_exists_force
def test_target_exists_force(file, source): new_name = source.parent / "new_name.txt" new_name.write_text("existing file") try: file.rename(name=str(new_name), source=str(source), force=True) assert new_name.exists() assert not source.exists() assert new_name.read_text() == "Source content" finally: new_name.unlink()
a35b29b2651bf33c5d5b45e64bc7765ffde4aff4
12
test_rename.py
127
Add some funtional tests Add functional tests for the following: - file.readlink - file.replace - file.symlink Remove unit tests for file.replace as they are duplicated in the added functional test
54,193
0
76
73
24
215,826
26
salt
13
tests/pytests/functional/states/file/test_rename.py
Python
10
{ "docstring": "\n Test file.rename when there is an existing file with the new name and\n force=True\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 14 }
https://github.com/saltstack/salt.git
3
cast_column
def cast_column(column, backend): values = backend.df_engine.compute(column.drop_duplicates()) if strings_utils.values_are_pandas_numbers(values): # If numbers, convert to float so it can be converted to bool column = column.astype(float).astype(bool) elif strings_utils.values_are_pandas_bools(values): # If booleans, manually assign boolean values column = backend.df_engine.map_objects( column, lambda x: x.lower() in strings_utils.PANDAS_TRUE_STRS ).astype(bool) else: # If neither numbers or booleans, they are strings (objects) column = column.astype(object) return column
48731ae5ca98d4a1e92374af30f060408f14d91a
17
binary_feature.py
152
Fix postprocessing on binary feature columns with number dtype (#2189) * Fixes binary postprocessing for number dtype * ensure calibration work retained * cleanup * removed script postprocessing caveat * added ray/dask test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * adds mock.patch behavior * allow type inference for visualization * add pytest mark distributed * removed mock.patch call * add try/finally clause like test_ray.py * increasing epochs to 2 as workaround * increased number of samples and add back mock.patch Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1,116
0
193
91
46
7,086
59
ludwig
18
ludwig/features/binary_feature.py
Python
11
{ "docstring": "Cast column of dtype object to bool.\n\n Unchecked casting to boolean when given a column of dtype object converts all non-empty cells to True. We check\n the values of the column directly and manually determine the best dtype to use.\n ", "language": "en", "n_whitespaces": 61, "n_words": 40, "vocab_size": 28 }
https://github.com/ludwig-ai/ludwig.git
2
enter
def enter(self, *path): return Meta(self._meta, path=self._path + [str(p) for p in path])
522d6f27c28dc5fd4d996ed605865c42fbda0da8
12
meta.py
52
ref: replace legacy compat.map with list comprehensions (#36372)
18,968
0
26
33
12
93,031
12
sentry
8
src/sentry/utils/meta.py
Python
2
{ "docstring": "\n Enters into sub meta data at the specified path. This always returns a\n new ``Meta`` object, regardless whether the path already exists.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 21 }
https://github.com/getsentry/sentry.git
1
_get_support_mask
def _get_support_mask(self): n_features = len(self.feature_names) mask = np.zeros(n_features, dtype=bool) mask[np.asarray(self.feat_list_idx)] = True return mask
388616b6247ca4ea8de4e2f340d6206aee523541
10
feature_set_selector.py
67
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,623
0
49
41
11
181,854
14
tpot
12
tpot/builtins/feature_set_selector.py
Python
5
{ "docstring": "\n Get the boolean mask indicating which features are selected\n Returns\n -------\n support : boolean array of shape [# input features]\n An element is True iff its corresponding feature is selected for\n retention.\n ", "language": "en", "n_whitespaces": 90, "n_words": 32, "vocab_size": 29 }
https://github.com/EpistasisLab/tpot.git
1
test_do_not_render_html
def test_do_not_render_html(self): value = { "first_row_is_table_header": False, "first_col_is_header": False, "data": [ ["<p><strong>Test</strong></p>", None, None], [None, None, None], [None, None, None], ], } expected = block = TableBlock() result = block.render(value) self.assertHTMLEqual(result, expected)
d10f15e55806c6944827d801cd9c2d53f5da4186
11
tests.py
113
Reformat with black
16,056
0
171
71
23
73,589
32
wagtail
9
wagtail/contrib/table_block/tests.py
Python
22
{ "docstring": "\n Ensure that raw html doesn't render\n by default.\n \n <table>\n <tbody>\n <tr><td>&lt;p&gt;&lt;strong&gt;Test&lt;/strong&gt;&lt;/p&gt;</td><td></td><td></td></tr>\n <tr><td></td><td></td><td></td></tr>\n <tr><td></td><td></td><td></td></tr>\n </tbody>\n </table>\n ", "language": "en", "n_whitespaces": 154, "n_words": 15, "vocab_size": 14 }
https://github.com/wagtail/wagtail.git
4
path_up
def path_up(url, count): urlutils.ensure_valid(url) url = url.adjusted(QUrl.UrlFormattingOption.RemoveFragment | QUrl.UrlFormattingOption.RemoveQuery) path = url.path(QUrl.ComponentFormattingOption.FullyEncoded) if not path or path == '/': raise Error("Can't go up!") for _i in range(0, min(count, path.count('/'))): path = posixpath.join(path, posixpath.pardir) path = posixpath.normpath(path) url.setPath(path, QUrl.ParsingMode.StrictMode) return url
0877fb0d78635692e481c8bde224fac5ad0dd430
12
navigate.py
180
Run scripts/dev/rewrite_enums.py
117,525
0
81
111
32
321,095
40
qutebrowser
24
qutebrowser/browser/navigate.py
Python
11
{ "docstring": "Helper method for :navigate when `where' is up.\n\n Args:\n url: The current url.\n count: The number of levels to go up in the url.\n ", "language": "en", "n_whitespaces": 44, "n_words": 24, "vocab_size": 22 }
https://github.com/qutebrowser/qutebrowser.git
3
_smooth_amount_callback
def _smooth_amount_callback(self, *args) -> None: try: smooth_amount = self.vars["smoothgraph"].get() except tk.TclError: # Don't update when there is no value in the variable return logger.debug("Updating graph smooth_amount: (new_value: %s, args: %s)", smooth_amount, args) for graph in self.subnotebook.children.values(): graph.calcs.set_smooth_amount(smooth_amount)
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
12
display_command.py
105
Typing - lib.gui.display_command
21,281
0
136
62
35
101,899
37
faceswap
16
lib/gui/display_command.py
Python
10
{ "docstring": " Update each graph's smooth amount on variable change ", "language": "en", "n_whitespaces": 9, "n_words": 8, "vocab_size": 8 }
https://github.com/deepfakes/faceswap.git
2
set_pattern
def set_pattern(self, val): if len(val) > 5000: # avoid crash on huge search terms (#5973) log.completion.warning(f"Trimming {len(val)}-char pattern to 5000") val = val[:5000] self._pattern = val val = re.sub(r' +', r' ', val) # See #1919 val = re.escape(val) val = val.replace(r'\ ', '.*') rx = QRegularExpression(val, QRegularExpression.PatternOption.CaseInsensitiveOption) qtutils.ensure_valid(rx) self.setFilterRegularExpression(rx) self.invalidate() sortcol = 0 self.sort(sortcol)
0877fb0d78635692e481c8bde224fac5ad0dd430
13
listcategory.py
188
Run scripts/dev/rewrite_enums.py
117,601
0
163
108
43
321,240
55
qutebrowser
22
qutebrowser/completion/models/listcategory.py
Python
14
{ "docstring": "Setter for pattern.\n\n Args:\n val: The value to set.\n ", "language": "en", "n_whitespaces": 34, "n_words": 9, "vocab_size": 9 }
https://github.com/qutebrowser/qutebrowser.git
1
test_generic_relations_with_related_query_name
def test_generic_relations_with_related_query_name(self): bookmark = Bookmark.objects.create(name="djangoproject") tag = FunkyTag.objects.create(content_object=bookmark, name="django") tag_url = reverse("admin:admin_views_funkytag_change", args=(tag.id,)) should_contain = '<li>Funky tag: <a href="%s">django' % tag_url response = self.client.get( reverse("admin:admin_views_bookmark_delete", args=(bookmark.pk,)) ) self.assertContains(response, should_contain)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
tests.py
141
Refs #33476 -- Reformatted code with Black.
52,066
0
96
84
24
207,716
29
django
20
tests/admin_views/tests.py
Python
9
{ "docstring": "\n If a deleted object has GenericForeignKey with\n GenericRelation(related_query_name='...') pointing to it, those objects\n should be listed for deletion.\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 18 }
https://github.com/django/django.git
3
parsed_paths
def parsed_paths(self) -> List[str]: res_paths: List[str] = [] paths = self.parser.existing_paths for directory in paths: for filename in paths[directory]: res_paths.append(os.path.join(directory, filename)) return res_paths
7d9e9a49005de7961e84d2a7c608db57dbab3046
14
augeasparser.py
89
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
45,535
0
84
57
20
186,624
23
certbot
14
certbot-apache/certbot_apache/_internal/augeasparser.py
Python
16
{ "docstring": "\n Returns a list of file paths that have currently been parsed into the parser\n tree. The returned list may include paths with wildcard characters, for\n example: ['/etc/apache2/conf.d/*.load']\n\n This is typically called on the root node of the ParserNode tree.\n\n :returns: list of file paths of files that have been parsed\n ", "language": "en", "n_whitespaces": 93, "n_words": 50, "vocab_size": 35 }
https://github.com/certbot/certbot.git
2
ids
def ids(self) -> Tuple[str, ...]: return tuple( sorted(set([cast(str, p.get("id")) for p in self._patterns]) - set([None])) )
a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96
18
span_ruler.py
79
Add SpanRuler component (#9880) * Add SpanRuler component Add a `SpanRuler` component similar to `EntityRuler` that saves a list of matched spans to `Doc.spans[spans_key]`. The matches from the token and phrase matchers are deduplicated and sorted before assignment but are not otherwise filtered. * Update spacy/pipeline/span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Fix cast * Add self.key property * Use number of patterns as length * Remove patterns kwarg from init * Update spacy/tests/pipeline/test_span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Add options for spans filter and setting to ents * Add `spans_filter` option as a registered function' * Make `spans_key` optional and if `None`, set to `doc.ents` instead of `doc.spans[spans_key]`. * Update and generalize tests * Add test for setting doc.ents, fix key property type * Fix typing * Allow independent doc.spans and doc.ents * If `spans_key` is set, set `doc.spans` with `spans_filter`. * If `annotate_ents` is set, set `doc.ents` with `ents_fitler`. * Use `util.filter_spans` by default as `ents_filter`. * Use a custom warning if the filter does not work for `doc.ents`. * Enable use of SpanC.id in Span * Support id in SpanRuler as Span.id * Update types * `id` can only be provided as string (already by `PatternType` definition) * Update all uses of Span.id/ent_id in Doc * Rename Span id kwarg to span_id * Update types and docs * Add ents filter to mimic EntityRuler overwrite_ents * Refactor `ents_filter` to take `entities, spans` args for more filtering options * Give registered filters more descriptive names * Allow registered `filter_spans` filter (`spacy.first_longest_spans_filter.v1`) to take any number of `Iterable[Span]` objects as args so it can be used for spans filter or ents filter * Implement future entity ruler as span ruler Implement a compatible `entity_ruler` as `future_entity_ruler` using `SpanRuler` as the underlying component: * Add `sort_key` and `sort_reverse` to allow the sorting behavior to be customized. (Necessary for the same sorting/filtering as in `EntityRuler`.) * Implement `overwrite_overlapping_ents_filter` and `preserve_existing_ents_filter` to support `EntityRuler.overwrite_ents` settings. * Add `remove_by_id` to support `EntityRuler.remove` functionality. * Refactor `entity_ruler` tests to parametrize all tests to test both `entity_ruler` and `future_entity_ruler` * Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns` properties. Additional changes: * Move all config settings to top-level attributes to avoid duplicating settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of casting.) * Format * Fix filter make method name * Refactor to use same error for removing by label or ID * Also provide existing spans to spans filter * Support ids property * Remove token_patterns and phrase_patterns * Update docstrings * Add span ruler docs * Fix types * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem <[email protected]> * Move sorting into filters * Check for all tokens in seen tokens in entity ruler filters * Remove registered sort key * Set Token.ent_id in a backwards-compatible way in Doc.set_ents * Remove sort options from API docs * Update docstrings * Rename entity ruler filters * Fix and parameterize scoring * Add id to Span API docs * Fix typo in API docs * Include explicit labeled=True for scorer Co-authored-by: Sofie Van Landeghem <[email protected]>
24,383
0
48
49
16
111,340
16
spaCy
11
spacy/pipeline/span_ruler.py
Python
10
{ "docstring": "All IDs present in the match patterns.\n\n RETURNS (set): The string IDs.\n\n DOCS: https://spacy.io/api/spanruler#ids\n ", "language": "en", "n_whitespaces": 35, "n_words": 14, "vocab_size": 14 }
https://github.com/explosion/spaCy.git
6
__call__
def __call__(self, checkpoint): if not self.runner: return if checkpoint.storage == Checkpoint.PERSISTENT and checkpoint.value: checkpoint_path = checkpoint.value logger.debug( "Trial %s: Deleting checkpoint %s", self.trial_id, checkpoint_path ) # TODO(ujvl): Batch remote deletes. # We first delete the remote checkpoint. If it is on the same # node as the driver, it will also remove the local copy. ray.get(self.runner.delete_checkpoint.remote(checkpoint_path)) # Delete local copy, if any exists. if os.path.exists(checkpoint_path): try: checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path) shutil.rmtree(checkpoint_dir) except FileNotFoundError: logger.debug("Local checkpoint dir not found during deletion.")
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
15
trial.py
162
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,818
0
312
95
63
132,826
79
ray
25
python/ray/tune/trial.py
Python
15
{ "docstring": "Requests checkpoint deletion asynchronously.\n\n Args:\n checkpoint (Checkpoint): Checkpoint to delete.\n ", "language": "en", "n_whitespaces": 35, "n_words": 10, "vocab_size": 9 }
https://github.com/ray-project/ray.git
1
test_unread_counts
def test_unread_counts(self) -> None: # Check that our own messages don't increase the unread count. self.helper.send(self.room_id, "hello", tok=self.tok) self._check_unread_count(0) # Join the new user and check that this doesn't increase the unread count. self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) self._check_unread_count(0) # Check that the new user sending a message increases our unread count. res = self.helper.send(self.room_id, "hello", tok=self.tok2) self._check_unread_count(1) # Send a read receipt to tell the server we've read the latest event. body = json.dumps({"m.read": res["event_id"]}).encode("utf8") channel = self.make_request( "POST", "/rooms/%s/read_markers" % self.room_id, body, access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.json_body) # Check that the unread counter is back to 0. self._check_unread_count(0) # Check that hidden read receipts don't break unread counts res = self.helper.send(self.room_id, "hello", tok=self.tok2) self._check_unread_count(1) # Send a read receipt to tell the server we've read the latest event. body = json.dumps({ReadReceiptEventFields.MSC2285_HIDDEN: True}).encode("utf8") channel = self.make_request( "POST", "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), body, access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.json_body) # Check that the unread counter is back to 0. self._check_unread_count(0) # Check that room name changes increase the unread counter. self.helper.send_state( self.room_id, "m.room.name", {"name": "my super room"}, tok=self.tok2, ) self._check_unread_count(1) # Check that room topic changes increase the unread counter. self.helper.send_state( self.room_id, "m.room.topic", {"topic": "welcome!!!"}, tok=self.tok2, ) self._check_unread_count(2) # Check that encrypted messages increase the unread counter. self.helper.send_event(self.room_id, EventTypes.Encrypted, {}, tok=self.tok2) self._check_unread_count(3) # Check that custom events with a body increase the unread counter. self.helper.send_event( self.room_id, "org.matrix.custom_type", {"body": "hello"}, tok=self.tok2, ) self._check_unread_count(4) # Check that edits don't increase the unread counter. self.helper.send_event( room_id=self.room_id, type=EventTypes.Message, content={ "body": "hello", "msgtype": "m.text", "m.relates_to": {"rel_type": RelationTypes.REPLACE}, }, tok=self.tok2, ) self._check_unread_count(4) # Check that notices don't increase the unread counter. self.helper.send_event( room_id=self.room_id, type=EventTypes.Message, content={"body": "hello", "msgtype": "m.notice"}, tok=self.tok2, ) self._check_unread_count(4) # Check that tombstone events changes increase the unread counter. self.helper.send_state( self.room_id, EventTypes.Tombstone, {"replacement_room": "!someroom:test"}, tok=self.tok2, ) self._check_unread_count(5)
64c73c6ac88a740ee480a0ad1f9afc8596bccfa4
14
test_sync.py
815
Add type hints to `tests/rest/client` (#12066)
71,305
0
1,076
498
121
246,626
290
synapse
35
tests/rest/client/test_sync.py
Python
76
{ "docstring": "Tests that /sync returns the right value for the unread count (MSC2654).", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
3
get_tail
def get_tail(self, n=10, raw=True, output=False, include_latest=False): self.writeout_cache() if not include_latest: n += 1 # cursor/line/entry this_cur = list( self._run_sql( "WHERE session == ? ORDER BY line DESC LIMIT ? ", (self.session_number, n), raw=raw, output=output, ) ) other_cur = list( self._run_sql( "WHERE session != ? ORDER BY session DESC, line DESC LIMIT ?", (self.session_number, n), raw=raw, output=output, ) ) everything = this_cur + other_cur everything = everything[:n] if not include_latest: return list(everything)[:0:-1] return list(everything)[::-1]
dc5bcc1c50892a5128fcf128af28887226144927
12
history.py
198
This fixed the mixing of multiple history seen in #13631 It forces get_tail to put the current session last in the returned results.
52,478
0
344
128
44
208,719
73
ipython
13
IPython/core/history.py
Python
25
{ "docstring": "Get the last n lines from the history database.\n\n Most recent entry last.\n\n Completion will be reordered so that that the last ones are when\n possible from current session.\n\n Parameters\n ----------\n n : int\n The number of lines to get\n raw, output : bool\n See :meth:`get_range`\n include_latest : bool\n If False (default), n+1 lines are fetched, and the latest one\n is discarded. This is intended to be used where the function\n is called by a user command, which it should not return.\n\n Returns\n -------\n Tuples as :meth:`get_range`\n ", "language": "en", "n_whitespaces": 226, "n_words": 87, "vocab_size": 68 }
https://github.com/ipython/ipython.git
4
_discover_all_airflow_builtin_providers_from_local_sources
def _discover_all_airflow_builtin_providers_from_local_sources(self) -> None: try: import airflow.providers except ImportError: log.info("You have no providers installed.") return try: for path in airflow.providers.__path__: # type: ignore[attr-defined] self._add_provider_info_from_local_source_files_on_path(path) except Exception as e: log.warning("Error when loading 'provider.yaml' files from airflow sources: %s", e)
2fdc23333909096d427171002582e2906f8bbc0a
11
providers_manager.py
96
Fix remaining mypy issues in "core" Airflow (#20795) Co-authored-by: Josh Fell <[email protected]> Co-authored-by: Tzu-ping Chung <[email protected]> Co-authored-by: Jarek Potiuk <[email protected]>
8,082
0
144
54
36
43,879
38
airflow
13
airflow/providers_manager.py
Python
19
{ "docstring": "\n Finds all built-in airflow providers if airflow is run from the local sources.\n It finds `provider.yaml` files for all such providers and registers the providers using those.\n\n This 'provider.yaml' scanning takes precedence over scanning packages installed\n in case you have both sources and packages installed, the providers will be loaded from\n the \"airflow\" sources rather than from the packages.\n ", "language": "en", "n_whitespaces": 102, "n_words": 59, "vocab_size": 44 }
https://github.com/apache/airflow.git
3
fit
def fit(self, X, y=None): self._validate_params() X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet") random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn( "The covariance matrix associated to your dataset is not full rank" ) # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state, ) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance( X[raw_support], assume_centered=True ) # get precision matrix in an optimized way precision = linalg.pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self
47dd54f727f09a47e84fb4cb53d33f9b0f239edb
15
_robust_covariance.py
331
MAINT validate parameter in `EmpiricalCovariance`, `MinCovDet`, and `EllipticEnvelope` (#23842) Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,285
0
439
211
90
260,489
123
scikit-learn
41
sklearn/covariance/_robust_covariance.py
Python
31
{ "docstring": "Fit a Minimum Covariance Determinant with the FastMCD algorithm.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ", "language": "en", "n_whitespaces": 154, "n_words": 54, "vocab_size": 44 }
https://github.com/scikit-learn/scikit-learn.git
4
from_pandas
def from_pandas(cls, df, return_dims=False): num_splits = GpuCount.get() put_func = cls._partition_class.put # For now, we default to row partitioning pandas_dfs = split_result_of_axis_func_pandas(0, num_splits, df) keys = [ put_func(cls._get_gpu_managers()[i], pandas_dfs[i]) for i in range(num_splits) ] keys = RayWrapper.materialize(keys) parts = cls._create_partitions(keys, cls._get_gpu_managers()).reshape( (num_splits, 1) ) if not return_dims: return parts else: row_lengths = [len(df.index) for df in pandas_dfs] col_widths = [ len(df.columns) ] # single value since we only have row partitions return parts, row_lengths, col_widths
1dc16415333bf2428ee2b1f4d31ff94e66b9a0a6
13
partition_manager.py
206
REFACTOR-#5009: use RayWrapper.materialize instead of ray.get (#5010) Signed-off-by: Myachev <[email protected]>
36,049
0
262
132
56
154,526
74
modin
26
modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition_manager.py
Python
20
{ "docstring": "\n Create partitions from ``pandas.DataFrame/pandas.Series``.\n\n Parameters\n ----------\n df : pandas.DataFrame/pandas.Series\n A ``pandas.DataFrame`` to add.\n return_dims : boolean, default: False\n Is return dimensions or not.\n\n Returns\n -------\n list or tuple\n List of partitions in case `return_dims` == False,\n tuple (partitions, row lengths, col widths) in other case.\n ", "language": "en", "n_whitespaces": 153, "n_words": 45, "vocab_size": 40 }
https://github.com/modin-project/modin.git
3
_crossover
def _crossover(candidate): sample_index1 = np.random.choice(len(candidate)) sample_index2 = np.random.choice(len(candidate)) sample_1 = candidate[sample_index1] sample_2 = candidate[sample_index2] cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7)) logger.info( LOGGING_PREFIX + "Perform crossover between %sth and %sth at index=%s", sample_index1, sample_index2, cross_index, ) next_gen = [] for i in range(len(sample_1)): sample = sample_2[i] if i > cross_index else sample_1[i] next_gen.append(sample) return next_gen
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
12
genetic_searcher.py
190
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,658
0
197
125
43
132,059
54
ray
23
python/ray/tune/automl/genetic_searcher.py
Python
17
{ "docstring": "Perform crossover action to candidates.\n\n For example, new gene = 60% sample_1 + 40% sample_2.\n\n Args:\n candidate: List of candidate genes (encodings).\n\n Examples:\n >>> # Genes that represent 3 parameters\n >>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])\n >>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])\n >>> new_gene = _crossover([gene1, gene2])\n >>> # new_gene could be the first [n=1] parameters of\n >>> # gene1 + the rest of gene2\n >>> # in which case:\n >>> # new_gene[0] = gene1[0]\n >>> # new_gene[1] = gene2[1]\n >>> # new_gene[2] = gene1[1]\n\n Returns:\n New gene (encoding)\n ", "language": "en", "n_whitespaces": 270, "n_words": 97, "vocab_size": 62 }
https://github.com/ray-project/ray.git
6
get_latest_device_activity
def get_latest_device_activity(self, device_id, activity_types): if device_id not in self._latest_activities: return None latest_device_activities = self._latest_activities[device_id] latest_activity = None for activity_type in activity_types: if activity_type in latest_device_activities: if ( latest_activity is not None and latest_device_activities[activity_type].activity_start_time <= latest_activity.activity_start_time ): continue latest_activity = latest_device_activities[activity_type] return latest_activity
dadcc5ebcbcf951ff677568b281c5897d990c8ae
14
activity.py
106
spelling: components/august (#64232) Co-authored-by: Josh Soref <[email protected]>
108,496
0
227
69
28
309,801
42
core
9
homeassistant/components/august/activity.py
Python
15
{ "docstring": "Return latest activity that is one of the activity_types.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
upnp_factory_mock
def upnp_factory_mock() -> Iterable[Mock]: with patch( "homeassistant.components.dlna_dms.dms.UpnpFactory", autospec=True, spec_set=True, ) as upnp_factory: upnp_device = create_autospec(UpnpDevice, instance=True) upnp_device.name = MOCK_DEVICE_NAME upnp_device.udn = MOCK_DEVICE_UDN upnp_device.device_url = MOCK_DEVICE_LOCATION upnp_device.device_type = MOCK_DEVICE_TYPE upnp_device.available = True upnp_device.parent_device = None upnp_device.root_device = upnp_device upnp_device.all_devices = [upnp_device] upnp_device.services = { "urn:schemas-upnp-org:service:ContentDirectory:1": create_autospec( UpnpService, instance=True, service_type="urn:schemas-upnp-org:service:ContentDirectory:1", service_id="urn:upnp-org:serviceId:ContentDirectory", ), "urn:schemas-upnp-org:service:ConnectionManager:1": create_autospec( UpnpService, instance=True, service_type="urn:schemas-upnp-org:service:ConnectionManager:1", service_id="urn:upnp-org:serviceId:ConnectionManager", ), } seal(upnp_device) upnp_factory_instance = upnp_factory.return_value upnp_factory_instance.async_create_device.return_value = upnp_device yield upnp_factory_instance @pytest.fixture
b19bf9b147f4321e89d1f7f01e68337f2102f460
@pytest.fixture
14
conftest.py
241
Add dlna_dms integration to support DLNA Digital Media Servers (#66437)
91,545
1
366
143
50
292,459
68
core
33
tests/components/dlna_dms/conftest.py
Python
34
{ "docstring": "Mock the UpnpFactory class to construct DMS-style UPnP devices.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
3
_setup
def _setup(self, name=None): settings_module = os.environ.get(ENVIRONMENT_VARIABLE) if not settings_module: desc = ("setting %s" % name) if name else "settings" raise ImproperlyConfigured( "Requested %s, but settings are not configured. " "You must either define the environment variable %s " "or call settings.configure() before accessing settings." % (desc, ENVIRONMENT_VARIABLE) ) self._wrapped = Settings(settings_module)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
__init__.py
99
Refs #33476 -- Reformatted code with Black.
50,300
0
172
56
45
203,320
51
django
12
django/conf/__init__.py
Python
11
{ "docstring": "\n Load the settings module pointed to by the environment variable. This\n is used the first time settings are needed, if the user hasn't\n configured settings manually.\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 21 }
https://github.com/django/django.git
1
transform_vector
def transform_vector(self, vector): return Vector( (vector.x + self.offset[0]) * self.scale[0], (vector.y + self.offset[1]) * self.scale[1], )
d10f15e55806c6944827d801cd9c2d53f5da4186
12
image_operations.py
78
Reformat with black
16,345
0
59
52
14
75,054
16
wagtail
8
wagtail/images/image_operations.py
Python
5
{ "docstring": "\n Transforms the given vector into the coordinate space of the final image.\n\n Use this to find out where a point on the source image would end up in the\n final image after cropping/resizing has been performed.\n\n Returns a new vector.\n ", "language": "en", "n_whitespaces": 76, "n_words": 40, "vocab_size": 33 }
https://github.com/wagtail/wagtail.git
1
test_kbinsdiscretizer_effect_sample_weight
def test_kbinsdiscretizer_effect_sample_weight(): X = np.array([[-2], [-1], [1], [3], [500], [1000]]) # add a large number of bins such that each sample with a non-null weight # will be used as bin edge est = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile") est.fit(X, sample_weight=[1, 1, 1, 1, 0, 0]) assert_allclose(est.bin_edges_[0], [-2, -1, 1, 3]) assert_allclose(est.transform(X), [[0.0], [1.0], [2.0], [2.0], [2.0], [2.0]])
1f3c1be77a5b15d6bc1a5bfd9eb64315928679b1
11
test_discretization.py
194
ENH add support for sample_weight in KBinsDiscretizer with strategy="quantile" (#24935) Co-authored-by: seladus <[email protected]> Co-authored-by: Seladus <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
76,979
0
80
145
48
261,762
56
scikit-learn
14
sklearn/preprocessing/tests/test_discretization.py
Python
6
{ "docstring": "Check the impact of `sample_weight` one computed quantiles.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/scikit-learn/scikit-learn.git
3
_predict_recursive
def _predict_recursive(self, X, x_squared_norms, sample_weight, cluster_node): if cluster_node.left is None: # This cluster has no subcluster. Labels are just the label of the cluster. return np.full(X.shape[0], cluster_node.label, dtype=np.int32) # Determine if data points belong to the left or right subcluster centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) if hasattr(self, "_X_mean"): centers += self._X_mean cluster_labels = _labels_inertia_threadpool_limit( X, sample_weight, x_squared_norms, centers, self._n_threads, return_inertia=False, ) mask = cluster_labels == 0 # Compute the labels for each subset of the data points. labels = np.full(X.shape[0], -1, dtype=np.int32) labels[mask] = self._predict_recursive( X[mask], x_squared_norms[mask], sample_weight[mask], cluster_node.left ) labels[~mask] = self._predict_recursive( X[~mask], x_squared_norms[~mask], sample_weight[~mask], cluster_node.right ) return labels
0822851f5cb17827939a7d7b4f8c84f43184ae89
11
_bisect_k_means.py
272
FEA Bisecting K-Means (#20031) Co-authored-by: Gael Varoquaux <[email protected]> Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
75,897
0
325
186
70
259,752
99
scikit-learn
25
sklearn/cluster/_bisect_k_means.py
Python
23
{ "docstring": "Predict recursively by going down the hierarchical tree.\n\n Parameters\n ----------\n X : {ndarray, csr_matrix} of shape (n_samples, n_features)\n The data points, currently assigned to `cluster_node`, to predict between\n the subclusters of this node.\n\n x_squared_norms : ndarray of shape (n_samples,)\n Squared euclidean norm of each data point.\n\n sample_weight : ndarray of shape (n_samples,)\n The weights for each observation in X.\n\n cluster_node : _BisectingTree node object\n The cluster node of the hierarchical tree.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Index of the cluster each sample belongs to.\n ", "language": "en", "n_whitespaces": 223, "n_words": 87, "vocab_size": 56 }
https://github.com/scikit-learn/scikit-learn.git
1
test_get
def test_get(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve", args=(signature, self.image.id, "fill-800x600") ) ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.assertEqual(response["Content-Type"], "image/png")
d10f15e55806c6944827d801cd9c2d53f5da4186
14
tests.py
127
Reformat with black
16,397
0
138
74
24
75,348
31
wagtail
15
wagtail/images/tests/tests.py
Python
10
{ "docstring": "\n Test a valid GET request to the view\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/wagtail/wagtail.git
2
set_temp_hold
def set_temp_hold(self, temp): if self.hvac_mode in (HVACMode.HEAT, HVACMode.COOL): heat_temp = temp cool_temp = temp else: delta = self.thermostat["settings"]["heatCoolMinDelta"] / 10.0 heat_temp = temp - delta cool_temp = temp + delta self.set_auto_temp_hold(heat_temp, cool_temp)
7b1d5fb10af9cf71fae27f9e1020e18bd1fc2510
13
climate.py
102
Use climate enums in ecobee (#70632)
97,614
0
115
64
21
298,672
32
core
12
homeassistant/components/ecobee/climate.py
Python
9
{ "docstring": "Set temperature hold in modes other than auto.\n\n Ecobee API: It is good practice to set the heat and cool hold\n temperatures to be the same, if the thermostat is in either heat, cool,\n auxHeatOnly, or off mode. If the thermostat is in auto mode, an\n additional rule is required. The cool hold temperature must be greater\n than the heat hold temperature by at least the amount in the\n heatCoolMinDelta property.\n https://www.ecobee.com/home/developer/api/examples/ex5.shtml\n ", "language": "en", "n_whitespaces": 128, "n_words": 72, "vocab_size": 49 }
https://github.com/home-assistant/core.git
1
test_archiving_interrupted
def test_archiving_interrupted(self) -> None: expired_msg_ids = self._make_expired_zulip_messages(7) expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids) # Insert an exception near the end of the archiving process of a chunk: with mock.patch( "zerver.lib.retention.delete_messages", side_effect=Exception("delete_messages error") ): with self.assertRaisesRegex(Exception, r"^delete_messages error$"): # Specify large chunk_size to ensure things happen in a single batch archive_messages(chunk_size=1000) # Archiving code has been executed, but because we got an exception, things should have been rolled back: self._verify_archive_data([], []) self.assertEqual( set(Message.objects.filter(id__in=expired_msg_ids).values_list("id", flat=True)), set(expired_msg_ids), ) self.assertEqual( set( UserMessage.objects.filter(id__in=expired_usermsg_ids).values_list( "id", flat=True ) ), set(expired_usermsg_ids), )
033d2615f6614b06c8268fe60c6ee2a37892c204
16
test_retention.py
223
ruff: Fix B017 `assertRaises(Exception):` should be considered evil. Signed-off-by: Anders Kaseorg <[email protected]>
17,912
0
377
132
68
85,040
81
zulip
23
zerver/tests/test_retention.py
Python
23
{ "docstring": "Check that queries get rolled back to a consistent state\n if archiving gets interrupted in the middle of processing a chunk.", "language": "en", "n_whitespaces": 27, "n_words": 21, "vocab_size": 20 }
https://github.com/zulip/zulip.git
1
test_unknown_sequence_followed_by_known_sequence
def test_unknown_sequence_followed_by_known_sequence(parser): unknown_sequence = "\x1b[?" known_sequence = "\x1b[8~" # key = 'end' sequence = unknown_sequence + known_sequence events = parser.feed(sequence) assert next(events).key == "escape" assert next(events).key == "[" assert next(events).key == "?" assert next(events).key == "end" with pytest.raises(StopIteration): next(events)
bfb962bacf274373e5706090cd854b6aa0857270
9
test_xterm_parser.py
137
Backtracking unknown escape sequences, various tests for XTermParser
44,340
0
77
73
24
183,789
39
textual
12
tests/test_xterm_parser.py
Python
11
{ "docstring": " When we feed the parser an unknown sequence followed by a known\n sequence. The characters in the unknown sequence are delivered as keys,\n and the known escape sequence that follows is delivered as expected.\n ", "language": "en", "n_whitespaces": 44, "n_words": 34, "vocab_size": 26 }
https://github.com/Textualize/textual.git
3
write_to_directory
def write_to_directory(self, metric_info_dir, pretty_print=False): with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f: json.dump(asdict(self), f, indent=4 if pretty_print else None) if self.license: with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f: f.write(self.license)
98f2f1390d558d54faa84ae9d2c503d624c7a9ef
15
info.py
154
Pretty print dataset info files (#4116) * Pass indent to `json.dumps` * Minor fix * Pretty print GH dataset infos * Style * Revert "Pretty print GH dataset infos" This reverts commit cf0523d4c9c176271cad44cd0d9dd595d233e817. * Add pretty print option
21,931
0
86
94
21
104,689
28
datasets
19
src/datasets/info.py
Python
6
{ "docstring": "Write `MetricInfo` as JSON to `metric_info_dir`.\n Also save the license separately in LICENCE.\n If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4.\n ", "language": "en", "n_whitespaces": 49, "n_words": 28, "vocab_size": 25 }
https://github.com/huggingface/datasets.git
4
axvline
def axvline(self, x=0, ymin=0, ymax=1, **kwargs): self._check_no_units([ymin, ymax], ['ymin', 'ymax']) if "transform" in kwargs: raise ValueError("'transform' is not allowed as a keyword " "argument; axvline generates its own transform.") xmin, xmax = self.get_xbound() # Strip away the units for comparison with non-unitized bounds. xx, = self._process_unit_info([("x", x)], kwargs) scalex = (xx < xmin) or (xx > xmax) trans = self.get_xaxis_transform(which='grid') l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs) self.add_line(l) if scalex: self._request_autoscale_view("x") return l
383de519505964ed879c40b23ef36e90c17ebe0d
11
_axes.py
227
[Doc] fix more spelling and grammar
24,055
0
208
139
66
110,316
74
matplotlib
23
lib/matplotlib/axes/_axes.py
Python
14
{ "docstring": "\n Add a vertical line across the Axes.\n\n Parameters\n ----------\n x : float, default: 0\n x position in data coordinates of the vertical line.\n\n ymin : float, default: 0\n Should be between 0 and 1, 0 being the bottom of the plot, 1 the\n top of the plot.\n\n ymax : float, default: 1\n Should be between 0 and 1, 0 being the bottom of the plot, 1 the\n top of the plot.\n\n Returns\n -------\n `~matplotlib.lines.Line2D`\n\n Other Parameters\n ----------------\n **kwargs\n Valid keyword arguments are `.Line2D` properties, except for\n 'transform':\n\n %(Line2D:kwdoc)s\n\n See Also\n --------\n vlines : Add vertical lines in data coordinates.\n axvspan : Add a vertical span (rectangle) across the axis.\n axline : Add a line with an arbitrary slope.\n\n Examples\n --------\n * draw a thick red vline at *x* = 0 that spans the yrange::\n\n >>> axvline(linewidth=4, color='r')\n\n * draw a default vline at *x* = 1 that spans the yrange::\n\n >>> axvline(x=1)\n\n * draw a default vline at *x* = .5 that spans the middle half of\n the yrange::\n\n >>> axvline(x=.5, ymin=0.25, ymax=0.75)\n ", "language": "en", "n_whitespaces": 465, "n_words": 173, "vocab_size": 87 }
https://github.com/matplotlib/matplotlib.git
1
update
def update(self, props): return self._update_props( props, "{cls.__name__!r} object has no property {prop_name!r}")
d69be2554cf6d1ac711bf433b1d6f176e3290d4f
8
artist.py
30
Clarify error message for bad keyword arguments. `plot([], [], foo=42)` previously emitted ``` 'Line2D' object has no property 'foo' ``` which refers to the Matplotlib-specific concept of "properties". It now instead emits ``` Line2D.set() got an unexpected keyword argument 'foo' ``` which is modeled after the standard error message for unknown keyword arguments. (To maximize backcompat, the implementation goes through a new _internal_update, which does *not* error when the same prop is passed under different aliases. This could be changed later, but is not the goal of this PR.)
22,841
0
37
17
12
107,629
12
matplotlib
4
lib/matplotlib/artist.py
Python
3
{ "docstring": "\n Update this artist's properties from the dict *props*.\n\n Parameters\n ----------\n props : dict\n ", "language": "en", "n_whitespaces": 49, "n_words": 13, "vocab_size": 12 }
https://github.com/matplotlib/matplotlib.git
12
remove
def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: cmd = "" out = {} try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info["errors"]) continue if rpmpkg: # assume use dnf or yum cmdflags = "-y remove" libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"} if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" cmd = "{} {} {}".format(cmdexe, cmdflags, target) out = __salt__["cmd.run_all"]( cmd, python_shell=False, env=libpathenv, ignore_retcode=True, ) elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" cmd = "{} {} {}".format(cmdexe, cmdflags, target) out = __salt__["cmd.run_all"]( cmd, python_shell=False, env=libpathenv, ignore_retcode=True, ) elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" cmd = "{} {} {}".format(cmdexe, cmdflags, target) out = __salt__["cmd.run_all"]( cmd, python_shell=False, env=libpathenv, ignore_retcode=True, ) else: cmdexe = "/usr/bin/rpm" cmdflags = "-e" cmd = "{} {} {}".format(cmdexe, cmdflags, target) out = __salt__["cmd.run_all"](cmd, python_shell=False) else: cmd = ["/usr/sbin/installp", "-u", named] out = __salt__["cmd.run_all"](cmd, python_shell=False) log.debug("result of removal command %s, returned result %s", cmd, out) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered removing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) return ret
fae5bc757ad0f1716483ce7ae180b451545c2058
16
aixpkg.py
648
Updated to remove use of f-strings, and review comments
53,843
0
979
385
123
215,126
220
salt
42
salt/modules/aixpkg.py
Python
65
{ "docstring": "\n Remove specified fileset(s)/rpm package(s).\n\n name\n The name of the fileset or rpm package to be deleted.\n\n .. versionchanged:: 3005\n\n preference to install rpm packages are to use in the following order:\n /opt/freeware/bin/dnf\n /opt/freeware/bin/yum\n /usr/bin/yum\n /usr/bin/rpm\n\n pkgs\n A list of filesets and/or rpm packages to delete.\n Must be passed as a python list. The ``name`` parameter will be\n ignored if this option is passed.\n\n\n Returns a list containing the removed packages.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.remove <fileset/rpm package name>\n salt '*' pkg.remove tcsh\n salt '*' pkg.remove xlC.rte\n salt '*' pkg.remove Firefox.base.adt\n salt '*' pkg.remove pkgs='[\"foo\", \"bar\"]'\n ", "language": "en", "n_whitespaces": 237, "n_words": 98, "vocab_size": 69 }
https://github.com/saltstack/salt.git
2
codes
def codes(self): if not self.known: msg = ( "`df.column.cat.codes` with unknown categories is not " "supported. Please use `column.cat.as_known()` or " "`df.categorize()` beforehand to ensure known categories" ) raise AttributeNotImplementedError(msg) return self._property_map("codes")
ecbab9d7cb52a2d96cb1b8dc397a87811e6f8059
11
categorical.py
61
Add ``AttributeNotImplementedError`` for properties so IPython glob search works (#9231)
36,714
0
132
31
30
156,683
32
dask
6
dask/dataframe/categorical.py
Python
9
{ "docstring": "The codes of this categorical.\n\n If categories are unknown, an error is raised", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
https://github.com/dask/dask.git
3
get_local_ffmpeg
def get_local_ffmpeg() -> Optional[Path]: ffmpeg_path = Path(get_spotdl_path()) / ("ffmpeg" + (".exe" if platform.system() == "Windows" else "")) if ffmpeg_path.is_file(): return ffmpeg_path return None
e96e8ed7ec7e7cdffceaeb2c42e0396d4e3a8906
14
ffmpeg.py
86
fixed local path creation on *nix machines
5,524
0
42
47
20
30,374
23
spotify-downloader
8
spotdl/utils/ffmpeg.py
Python
11
{ "docstring": "\n Get local ffmpeg binary path.\n\n ### Returns\n - Path to ffmpeg binary or None if not found.\n ", "language": "en", "n_whitespaces": 30, "n_words": 17, "vocab_size": 15 }
https://github.com/spotDL/spotify-downloader.git
6
get_date_list
def get_date_list(self, queryset, date_type=None, ordering="ASC"): date_field = self.get_date_field() allow_empty = self.get_allow_empty() if date_type is None: date_type = self.get_date_list_period() if self.uses_datetime_field: date_list = queryset.datetimes(date_field, date_type, ordering) else: date_list = queryset.dates(date_field, date_type, ordering) if date_list is not None and not date_list and not allow_empty: raise Http404( _("No %(verbose_name_plural)s available") % { "verbose_name_plural": queryset.model._meta.verbose_name_plural, } ) return date_list
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
dates.py
175
Refs #33476 -- Reformatted code with Black.
51,761
0
230
108
38
206,860
55
django
19
django/views/generic/dates.py
Python
17
{ "docstring": "\n Get a date list by calling `queryset.dates/datetimes()`, checking\n along the way for empty lists that aren't allowed.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
https://github.com/django/django.git
1
test_fetch_lfw_people_internal_cropping
def test_fetch_lfw_people_internal_cropping(): # If cropping was not done properly and we don't resize the images, the images would # have their original size (250x250) and the image would not fit in the NumPy array # pre-allocated based on `slice_` parameter. slice_ = (slice(70, 195), slice(78, 172)) lfw = fetch_lfw_people( data_home=SCIKIT_LEARN_DATA, min_faces_per_person=3, download_if_missing=False, resize=None, slice_=slice_, ) assert lfw.images[0].shape == ( slice_[0].stop - slice_[0].start, slice_[1].stop - slice_[1].start, )
3714c900fc6ddcc87ad4486a41e0d1a4324d7209
10
test_lfw.py
129
FIX crop properly the image in fetch_lfw_people (#24951) Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,869
0
142
87
55
261,573
66
scikit-learn
14
sklearn/datasets/tests/test_lfw.py
Python
13
{ "docstring": "Check that we properly crop the images.\n\n Non-regression test for:\n https://github.com/scikit-learn/scikit-learn/issues/24942\n ", "language": "en", "n_whitespaces": 20, "n_words": 11, "vocab_size": 11 }
https://github.com/scikit-learn/scikit-learn.git
10
prepopulated_fields_js
def prepopulated_fields_js(context): prepopulated_fields = [] if "adminform" in context: prepopulated_fields.extend(context["adminform"].prepopulated_fields) if "inline_admin_formsets" in context: for inline_admin_formset in context["inline_admin_formsets"]: for inline_admin_form in inline_admin_formset: if inline_admin_form.original is None: prepopulated_fields.extend(inline_admin_form.prepopulated_fields) prepopulated_fields_json = [] for field in prepopulated_fields: prepopulated_fields_json.append( { "id": "#%s" % field["field"].auto_id, "name": field["field"].name, "dependency_ids": [ "#%s" % dependency.auto_id for dependency in field["dependencies"] ], "dependency_list": [ dependency.name for dependency in field["dependencies"] ], "maxLength": field["field"].field.max_length or 50, "allowUnicode": getattr(field["field"].field, "allow_unicode", False), } ) context.update( { "prepopulated_fields": prepopulated_fields, "prepopulated_fields_json": json.dumps(prepopulated_fields_json), } ) return context @register.tag(name="prepopulated_fields_js")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.tag(name="prepopulated_fields_js")
16
admin_modify.py
322
Refs #33476 -- Reformatted code with Black.
50,414
1
396
178
57
203,504
81
django
20
django/contrib/admin/templatetags/admin_modify.py
Python
32
{ "docstring": "\n Create a list of prepopulated_fields that should render JavaScript for\n the prepopulated fields for both the admin form and inlines.\n ", "language": "en", "n_whitespaces": 30, "n_words": 20, "vocab_size": 18 }
https://github.com/django/django.git
4
_split_multiext
def _split_multiext(name, min=3, max=4, count=2): extension = '' for i, sfx in enumerate(reversed(_suffixes(name))): if i >= count: break if min <= len(sfx) <= max: extension = '%s%s' % (sfx, extension) name = name.rstrip(sfx) else: # Stop on the first invalid extension break return name, extension
8ebca4a6a3a9836ce515a4dc60044c52465ef170
12
urls.py
126
fetch_file - properly split files with multi-part file extensions (#75257)
79,494
0
133
77
37
268,366
45
ansible
13
lib/ansible/module_utils/urls.py
Python
11
{ "docstring": "Split a multi-part extension from a file name.\n\n Returns '([name minus extension], extension)'.\n\n Define the valid extension length (including the '.') with 'min' and 'max',\n 'count' sets the number of extensions, counting from the end, to evaluate.\n Evaluation stops on the first file extension that is outside the min and max range.\n\n If no valid extensions are found, the original ``name`` is returned\n and ``extension`` is empty.\n\n :arg name: File name or path.\n :kwarg min: Minimum length of a valid file extension.\n :kwarg max: Maximum length of a valid file extension.\n :kwarg count: Number of suffixes from the end to evaluate.\n\n ", "language": "en", "n_whitespaces": 134, "n_words": 101, "vocab_size": 67 }
https://github.com/ansible/ansible.git
1
test_upload_file_publish
def test_upload_file_publish(self): file_upload = ContentFile(b"A new file", name="published-file.txt") post_data = { "title": "New file", "slug": "new-file", "file_field": file_upload, "action-publish": "Publish", } response = self.client.post( reverse("wagtailadmin_pages:edit", args=[self.file_page.id]), post_data ) # Should be redirected to explorer self.assertRedirects( response, reverse("wagtailadmin_explore", args=[self.root_page.id]) ) # Check the new file exists file_page = FilePage.objects.get() self.assertEqual(file_page.file_field.name, file_upload.name) self.assertTrue(os.path.exists(file_page.file_field.path)) self.assertEqual(file_page.file_field.read(), b"A new file")
d10f15e55806c6944827d801cd9c2d53f5da4186
14
test_edit_page.py
234
Reformat with black
15,695
0
219
138
46
71,596
55
wagtail
25
wagtail/admin/tests/pages/test_edit_page.py
Python
18
{ "docstring": "\n Check that file uploads work when directly publishing\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/wagtail/wagtail.git
5
__eq__
def __eq__(self, other): if isinstance(other, str): try: other = GEOSGeometry.from_ewkt(other) except (ValueError, GEOSException): return False return ( isinstance(other, GEOSGeometry) and self.srid == other.srid and self.equals_exact(other) )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
geometry.py
93
Refs #33476 -- Reformatted code with Black.
50,623
0
139
59
23
204,045
26
django
11
django/contrib/gis/geos/geometry.py
Python
11
{ "docstring": "\n Equivalence testing, a Geometry may be compared with another Geometry\n or an EWKT representation.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/django/django.git
2
id_for_label
def id_for_label(self): widget = self.field.widget id_ = widget.attrs.get("id") or self.auto_id return widget.id_for_label(id_)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
boundfield.py
59
Refs #33476 -- Reformatted code with Black.
51,284
0
40
34
11
205,923
12
django
8
django/forms/boundfield.py
Python
4
{ "docstring": "\n Wrapper around the field widget's `id_for_label` method.\n Useful, for example, for focusing on this field regardless of whether\n it has a single widget or a MultiWidget.\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 23 }
https://github.com/django/django.git
6
decode_predictions
def decode_predictions(preds, top=5): global CLASS_INDEX if len(preds.shape) != 2 or preds.shape[1] != 1000: raise ValueError( "`decode_predictions` expects " "a batch of predictions " "(i.e. a 2D array of shape (samples, 1000)). " "Found array with shape: " + str(preds.shape) ) if CLASS_INDEX is None: fpath = data_utils.get_file( "imagenet_class_index.json", CLASS_INDEX_PATH, cache_subdir="models", file_hash="c2c37ea517e94d9795004a39431a14cb", ) with open(fpath) as f: CLASS_INDEX = json.load(f) results = [] for pred in preds: top_indices = pred.argsort()[-top:][::-1] result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices] result.sort(key=lambda x: x[2], reverse=True) results.append(result) return results
84afc5193d38057e2e2badf9c889ea87d80d8fbf
15
imagenet_utils.py
271
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,052
0
268
164
66
269,382
85
keras
30
keras/applications/imagenet_utils.py
Python
25
{ "docstring": "Decodes the prediction of an ImageNet model.\n\n Args:\n preds: Numpy array encoding a batch of predictions.\n top: Integer, how many top-guesses to return. Defaults to 5.\n\n Returns:\n A list of lists of top class prediction tuples\n `(class_name, class_description, score)`.\n One list of tuples per sample in batch input.\n\n Raises:\n ValueError: In case of invalid shape of the `pred` array\n (must be 2D).\n ", "language": "en", "n_whitespaces": 111, "n_words": 62, "vocab_size": 49 }
https://github.com/keras-team/keras.git
1
handle_expect_100
def handle_expect_100(self): self.send_response_only(HTTPStatus.CONTINUE) self.end_headers() return True
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
server.py
38
add python 3.10.4 for windows
54,971
0
34
21
6
217,864
6
XX-Net
6
python3.10.4/Lib/http/server.py
Python
4
{ "docstring": "Decide what to do with an \"Expect: 100-continue\" header.\n\n If the client is expecting a 100 Continue response, we must\n respond with either a 100 Continue or a final response before\n waiting for the request body. The default is to always respond\n with a 100 Continue. You can behave differently (for example,\n reject unauthorized requests) by overriding this method.\n\n This method should either return True (possibly after sending\n a 100 Continue response) or send an error response and return\n False.\n\n ", "language": "en", "n_whitespaces": 143, "n_words": 80, "vocab_size": 60 }
https://github.com/XX-net/XX-Net.git
4
paginator_number
def paginator_number(cl, i): if i == cl.paginator.ELLIPSIS: return format_html("{} ", cl.paginator.ELLIPSIS) elif i == cl.page_num: return format_html('<span class="this-page">{}</span> ', i) else: return format_html( '<a href="{}"{}>{}</a> ', cl.get_query_string({PAGE_VAR: i}), mark_safe(' class="end"' if i == cl.paginator.num_pages else ""), i, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
admin_list.py
128
Refs #33476 -- Reformatted code with Black.
50,406
0
122
78
30
203,487
38
django
11
django/contrib/admin/templatetags/admin_list.py
Python
12
{ "docstring": "\n Generate an individual page index link in a paginated list.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
4
test_thumbnail_repeated_thumbnail
def test_thumbnail_repeated_thumbnail(self) -> None: self._test_thumbnail( "scale", self.test_image.expected_scaled, self.test_image.expected_found ) if not self.test_image.expected_found: return # Fetching again should work, without re-requesting the image from the # remote. params = "?width=32&height=32&method=scale" channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), "GET", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result["body"], self.test_image.expected_scaled, channel.result["body"], ) # Deleting the thumbnail on disk then re-requesting it should work as # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split("/") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) file_id = info["filesystem_id"] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), "GET", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result["body"], self.test_image.expected_scaled, channel.result["body"], )
32c828d0f760492711a98b11376e229d795fd1b3
11
test_media_storage.py
414
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <[email protected]>
71,705
0
586
263
68
247,511
112
synapse
33
tests/rest/media/v1/test_media_storage.py
Python
49
{ "docstring": "Test that fetching the same thumbnail works, and deleting the on disk\n thumbnail regenerates it.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 13 }
https://github.com/matrix-org/synapse.git
4
_check_readonly_fields
def _check_readonly_fields(self, obj): if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be( "a list or tuple", option="readonly_fields", obj=obj, id="admin.E034" ) else: return list( chain.from_iterable( self._check_readonly_fields_item( obj, field_name, "readonly_fields[%d]" % index ) for index, field_name in enumerate(obj.readonly_fields) ) )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
checks.py
137
Refs #33476 -- Reformatted code with Black.
50,315
0
250
85
37
203,341
42
django
16
django/contrib/admin/checks.py
Python
16
{ "docstring": "Check that readonly_fields refers to proper attribute or field.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/django/django.git
5
semilogy
def semilogy(self, *args, **kwargs): d = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basey', 'subsy', 'nonposy']} self.set_yscale('log', **d) return self.plot( *args, **{k: v for k, v in kwargs.items() if k not in d})
383de519505964ed879c40b23ef36e90c17ebe0d
12
_axes.py
139
[Doc] fix more spelling and grammar
24,062
0
111
84
27
110,323
39
matplotlib
10
lib/matplotlib/axes/_axes.py
Python
7
{ "docstring": "\n Make a plot with log scaling on the y-axis.\n\n Call signatures::\n\n semilogy([x], y, [fmt], data=None, **kwargs)\n semilogy([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)\n\n This is just a thin wrapper around `.plot` which additionally changes\n the y-axis to log scaling. All the concepts and parameters of plot can\n be used here as well.\n\n The additional parameters *base*, *subs*, and *nonpositive* control the\n y-axis properties. They are just forwarded to `.Axes.set_yscale`.\n\n Parameters\n ----------\n base : float, default: 10\n Base of the y logarithm.\n\n subs : array-like, optional\n The location of the minor yticks. If *None*, reasonable locations\n are automatically chosen depending on the number of decades in the\n plot. See `.Axes.set_yscale` for details.\n\n nonpositive : {'mask', 'clip'}, default: 'mask'\n Non-positive values in y can be masked as invalid, or clipped to a\n very small positive number.\n\n **kwargs\n All parameters supported by `.plot`.\n\n Returns\n -------\n list of `.Line2D`\n Objects representing the plotted data.\n ", "language": "en", "n_whitespaces": 381, "n_words": 151, "vocab_size": 112 }
https://github.com/matplotlib/matplotlib.git
1
test_upgrade_org_config_no_source_code_provider
def test_upgrade_org_config_no_source_code_provider(self): with self.tasks(): self.assert_setup_flow() project_id = self.project.id org = self.organization data = { "project_mappings": [[project_id, "Qme9NXBpguaRxcXssZ1NWHVaM98MAL6PHDXUs1jPrgiM8H"]] } integration = Integration.objects.get(provider=self.provider.key) installation = integration.get_installation(org.id) responses.add( responses.GET, "https://api.vercel.com/v1/projects/%s" % "Qme9NXBpguaRxcXssZ1NWHVaM98MAL6PHDXUs1jPrgiM8H", json={}, ) with pytest.raises(ValidationError): installation.update_organization_config(data)
284e980df0018f8baee659999268bdd4c7d08255
11
test_integration.py
174
ref: replace self.assertRaises with pytest.raises (#35685) * add flake8 plugin to detect assertRaises * ref: replace self.assertRaises with pytest.raises * non-sed fixes
18,763
0
188
101
29
91,382
34
sentry
26
tests/sentry/integrations/vercel/test_integration.py
Python
18
{ "docstring": "Test that the function doesn't progress if the Vercel project hasn't been connected to a Git repository", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 16 }
https://github.com/getsentry/sentry.git
5
_get_coeff_exp
def _get_coeff_exp(expr, x): from sympy.simplify import powsimp (c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x) if not m: return c, S.Zero [m] = m if m.is_Pow: if m.base != x: raise _CoeffExpValueError('expr not of form a*x**b') return c, m.exp elif m == x: return c, S.One else: raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr)
f757f3daae6e11ea0cfb7dadc133274d8d74315f
12
meijerint.py
148
Reordered imports 2
48,173
0
122
90
37
196,786
52
sympy
17
sympy/integrals/meijerint.py
Python
14
{ "docstring": "\n When expr is known to be of the form c*x**b, with c and/or b possibly 1,\n return c, b.\n\n Examples\n ========\n\n >>> from sympy.abc import x, a, b\n >>> from sympy.integrals.meijerint import _get_coeff_exp\n >>> _get_coeff_exp(a*x**b, x)\n (a, b)\n >>> _get_coeff_exp(x, x)\n (1, 1)\n >>> _get_coeff_exp(2*x, x)\n (2, 1)\n >>> _get_coeff_exp(x**3, x)\n (1, 3)\n ", "language": "en", "n_whitespaces": 99, "n_words": 53, "vocab_size": 40 }
https://github.com/sympy/sympy.git
2
process
def process(self) -> None: logger.debug("Starting Conversion") # queue_manager.debug_monitor(5) try: self._convert_images() self._disk_io.save_thread.join() queue_manager.terminate_queues() finalize(self._images.count, self._predictor.faces_count, self._predictor.verify_output) logger.debug("Completed Conversion") except MemoryError as err: msg = ("Faceswap ran out of RAM running convert. Conversion is very system RAM " "heavy, so this can happen in certain circumstances when you have a lot of " "cpus but not enough RAM to support them all." "\nYou should lower the number of processes in use by either setting the " "'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j).") raise FaceswapError(msg) from err
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
12
convert.py
150
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,804
0
313
82
77
101,389
89
faceswap
20
scripts/convert.py
Python
26
{ "docstring": " The entry point for triggering the Conversion Process.\n\n Should only be called from :class:`lib.cli.launcher.ScriptExecutor`\n\n Raises\n ------\n FaceswapError\n Error raised if the process runs out of memory\n ", "language": "en", "n_whitespaces": 74, "n_words": 26, "vocab_size": 25 }
https://github.com/deepfakes/faceswap.git
5
_oas
def _oas(X, *, assume_centered=False): # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: if not assume_centered: X = X - X.mean() return np.atleast_2d((X**2).mean()), 0.0 n_samples, n_features = X.shape emp_cov = empirical_covariance(X, assume_centered=assume_centered) mu = np.trace(emp_cov) / n_features # formula from Chen et al.'s **implementation** alpha = np.mean(emp_cov**2) num = alpha + mu**2 den = (n_samples + 1.0) * (alpha - (mu**2) / n_features) shrinkage = 1.0 if den == 0 else min(num / den, 1.0) shrunk_cov = (1.0 - shrinkage) * emp_cov shrunk_cov.flat[:: n_features + 1] += shrinkage * mu return shrunk_cov, shrinkage ############################################################################### # Public API # ShrunkCovariance estimator
a4d470838fe0cc31ba838594e4c8b0ff15be91f8
14
_shrunk_covariance.py
266
MAINT Parameters validation for `covariance.oas` (#24904) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
77,005
0
176
176
75
261,801
112
scikit-learn
21
sklearn/covariance/_shrunk_covariance.py
Python
15
{ "docstring": "Estimate covariance with the Oracle Approximating Shrinkage algorithm.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/scikit-learn/scikit-learn.git
5
ledoit_wolf
def ledoit_wolf(X, *, assume_centered=False, block_size=1000): X = check_array(X) # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: if not assume_centered: X = X - X.mean() return np.atleast_2d((X**2).mean()), 0.0 if X.ndim == 1: X = np.reshape(X, (1, -1)) warnings.warn( "Only one sample available. You may want to reshape your data array" ) n_features = X.size else: _, n_features = X.shape # get Ledoit-Wolf shrinkage shrinkage = ledoit_wolf_shrinkage( X, assume_centered=assume_centered, block_size=block_size ) emp_cov = empirical_covariance(X, assume_centered=assume_centered) mu = np.sum(np.trace(emp_cov)) / n_features shrunk_cov = (1.0 - shrinkage) * emp_cov shrunk_cov.flat[:: n_features + 1] += shrinkage * mu return shrunk_cov, shrinkage
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
14
_shrunk_covariance.py
284
MNT Update black to stable version (#22474)
75,444
0
229
182
76
258,820
109
scikit-learn
26
sklearn/covariance/_shrunk_covariance.py
Python
22
{ "docstring": "Estimates the shrunk Ledoit-Wolf covariance matrix.\n\n Read more in the :ref:`User Guide <shrunk_covariance>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data from which to compute the covariance estimate\n\n assume_centered : bool, default=False\n If True, data will not be centered before computation.\n Useful to work with data whose mean is significantly equal to\n zero but is not exactly zero.\n If False, data will be centered before computation.\n\n block_size : int, default=1000\n Size of blocks into which the covariance matrix will be split.\n This is purely a memory optimization and does not affect results.\n\n Returns\n -------\n shrunk_cov : ndarray of shape (n_features, n_features)\n Shrunk covariance.\n\n shrinkage : float\n Coefficient in the convex combination used for the computation\n of the shrunk estimate.\n\n Notes\n -----\n The regularized (shrunk) covariance is:\n\n (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)\n\n where mu = trace(cov) / n_features\n ", "language": "en", "n_whitespaces": 263, "n_words": 145, "vocab_size": 104 }
https://github.com/scikit-learn/scikit-learn.git
1
create_pax_global_header
def create_pax_global_header(cls, pax_headers): return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
8
tarfile.py
32
Vendor in pip 22.1.2
3,872
0
21
19
7
21,486
7
pipenv
5
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
Python
2
{ "docstring": "Return the object as a pax global header block sequence.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/pypa/pipenv.git
7
adapt
def adapt(self, data, batch_size=None, steps=None): _disallow_inside_tf_function("adapt") if not version_utils.should_use_v2(): raise RuntimeError("`adapt` is only supported in tensorflow v2.") if not self._is_compiled: self.compile() # Compile with defaults. if self.built: self.reset_state() data_handler = data_adapter.DataHandler( data, batch_size=batch_size, steps_per_epoch=steps, epochs=1, steps_per_execution=self._steps_per_execution, distribute=False, ) self._adapt_function = self.make_adapt_function() for _, iterator in data_handler.enumerate_epochs(): with data_handler.catch_stop_iteration(): for _ in data_handler.steps(): self._adapt_function(iterator) if data_handler.should_sync: context.async_wait() self.finalize_state() self._is_adapted = True
3613c3defc39c236fb1592c4f7ba1a9cc887343a
15
base_preprocessing_layer.py
241
Remove pylint comments. PiperOrigin-RevId: 452353044
82,685
0
324
147
49
278,703
60
keras
32
keras/engine/base_preprocessing_layer.py
Python
25
{ "docstring": "Fits the state of the preprocessing layer to the data being passed.\n\n After calling `adapt` on a layer, a preprocessing layer's state will not\n update during training. In order to make preprocessing layers efficient\n in any distribution context, they are kept constant with respect to any\n compiled `tf.Graph`s that call the layer. This does not affect the layer\n use when adapting each layer only once, but if you adapt a layer\n multiple times you will need to take care to re-compile any compiled\n functions as follows:\n\n * If you are adding a preprocessing layer to a `keras.Model`, you need\n to call `model.compile` after each subsequent call to `adapt`.\n * If you are calling a preprocessing layer inside\n `tf.data.Dataset.map`, you should call `map` again on the input\n `tf.data.Dataset` after each `adapt`.\n * If you are using a `tf.function` directly which calls a preprocessing\n layer, you need to call `tf.function` again on your callable after\n each subsequent call to `adapt`.\n\n `tf.keras.Model` example with multiple adapts:\n\n >>> layer = tf.keras.layers.Normalization(\n ... axis=None)\n >>> layer.adapt([0, 2])\n >>> model = tf.keras.Sequential(layer)\n >>> model.predict([0, 1, 2])\n array([-1., 0., 1.], dtype=float32)\n >>> layer.adapt([-1, 1])\n >>> model.compile() # This is needed to re-compile model.predict!\n >>> model.predict([0, 1, 2])\n array([0., 1., 2.], dtype=float32)\n\n `tf.data.Dataset` example with multiple adapts:\n\n >>> layer = tf.keras.layers.Normalization(\n ... axis=None)\n >>> layer.adapt([0, 2])\n >>> input_ds = tf.data.Dataset.range(3)\n >>> normalized_ds = input_ds.map(layer)\n >>> list(normalized_ds.as_numpy_iterator())\n [array([-1.], dtype=float32),\n array([0.], dtype=float32),\n array([1.], dtype=float32)]\n >>> layer.adapt([-1, 1])\n >>> normalized_ds = input_ds.map(layer) # Re-map over the input dataset.\n >>> list(normalized_ds.as_numpy_iterator())\n [array([0.], dtype=float32),\n array([1.], dtype=float32),\n array([2.], dtype=float32)]\n\n `adapt()` is meant only as a single machine utility to compute layer\n state. To analyze a dataset that cannot fit on a single machine, see\n [Tensorflow Transform](\n https://www.tensorflow.org/tfx/transform/get_started)\n for a multi-machine, map-reduce solution.\n\n Arguments:\n data: The data to train on. It can be passed either as a tf.data\n Dataset, or as a numpy array.\n batch_size: Integer or `None`.\n Number of samples per state update. If unspecified,\n `batch_size` will default to 32. Do not specify the\n `batch_size` if your data is in the form of datasets,\n generators, or `keras.utils.Sequence` instances (since they\n generate batches).\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined. If x is a\n `tf.data` dataset, and 'steps' is None, the epoch will run until\n the input dataset is exhausted. When passing an infinitely\n repeating dataset, you must specify the `steps` argument. This\n argument is not supported with array inputs.\n ", "language": "en", "n_whitespaces": 1065, "n_words": 434, "vocab_size": 227 }
https://github.com/keras-team/keras.git
2
test_loadtxt_float_conversion
def test_loadtxt_float_conversion(): strings = [ '0.9999999999999999', '9876543210.123456', '5.43215432154321e+300', '0.901', '0.333', ] txt = TextIO('\n'.join(strings)) res = np.loadtxt(txt) expected = np.array([float(s) for s in strings]) assert_equal(res, expected)
66a61b03658f3c9f312505dcf7eab07e4cf91ac6
11
test_io.py
108
Port over tests from npreadtext test suite - Add test for parsing scientific notation. - Add multiple-char comment test. - Port over tests for structured dtypes. - Add tests for exceptions on skiprows/max_rows. - port over ndmin tests. - Make structured data reusable, add unpack tests. - Port over delimiter tests. - Port over maxrows test w/ various dtypes. - Port over test of exception msg on parse failure. - Port over test for converters w/neg indices. - Port over usecols tests - Port over unicode tests. - Port over more converter tests. - Port over test for large rows. - Port over test for string-len discovery. - Port over float conversion accuracy test. - Port over bool test. - Add test for implicit float->int conversion. - Port over complex parsing tests. - Port over tests for reading from generator. - Port over object cleanup test. - Port over bytes incompat test. - Port over converters tests. Co-authored-by: Warren Weckesser <[email protected]> Co-authored-by: Sebastian Berg <[email protected]>
38,418
0
82
61
23
159,757
26
numpy
13
numpy/lib/tests/test_io.py
Python
12
{ "docstring": "\n Some tests that the conversion to float64 works as accurately as the Python\n built-in `float` function. In a naive version of the float parser, these\n strings resulted in values that were off by an ULP or two.\n ", "language": "en", "n_whitespaces": 50, "n_words": 37, "vocab_size": 33 }
https://github.com/numpy/numpy.git