complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
9
get_batch_no
def get_batch_no(doctype, txt, searchfield, start, page_len, filters): doctype = "Batch" cond = "" if filters.get("posting_date"): cond = "and (batch.expiry_date is null or batch.expiry_date >= %(posting_date)s)" batch_nos = None args = { "item_code": filters.get("item_code"), "warehouse": filters.get("warehouse"), "posting_date": filters.get("posting_date"), "txt": "%{0}%".format(txt), "start": start, "page_len": page_len, } having_clause = "having sum(sle.actual_qty) > 0" if filters.get("is_return"): having_clause = "" meta = frappe.get_meta(doctype, cached=True) searchfields = meta.get_search_fields() search_columns = "" search_cond = "" if searchfields: search_columns = ", " + ", ".join(searchfields) search_cond = " or " + " or ".join([field + " like %(txt)s" for field in searchfields]) if args.get("warehouse"): searchfields = ["batch." + field for field in searchfields] if searchfields: search_columns = ", " + ", ".join(searchfields) search_cond = " or " + " or ".join([field + " like %(txt)s" for field in searchfields]) batch_nos = frappe.db.sql( .format( search_columns=search_columns, cond=cond, match_conditions=get_match_cond(doctype), having_clause=having_clause, search_cond=search_cond, ), args, ) return batch_nos else: return frappe.db.sql( .format( cond, search_columns=search_columns, search_cond=search_cond, match_conditions=get_match_cond(doctype), ), args, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
9baa2229761c5415f29646a1a5bed4a3f4981e05
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
16
queries.py
501
fix: specify allowed doctype in queries (#31761)
14,981
1
106
280
80
69,113
158
erpnext
29
erpnext/controllers/queries.py
Python
83
{ "docstring": "select sle.batch_no, round(sum(sle.actual_qty),2), sle.stock_uom,\n\t\t\t\tconcat('MFG-',batch.manufacturing_date), concat('EXP-',batch.expiry_date)\n\t\t\t\t{search_columns}\n\t\t\tfrom `tabStock Ledger Entry` sle\n\t\t\t\tINNER JOIN `tabBatch` batch on sle.batch_no = batch.name\n\t\t\twhere\n\t\t\t\tbatch.disabled = 0\n\t\t\t\tand sle.is_cancelled = 0\n\t\t\t\tand sle.item_code = %(item_code)s\n\t\t\t\tand sle.warehouse = %(warehouse)s\n\t\t\t\tand (sle.batch_no like %(txt)s\n\t\t\t\tor batch.expiry_date like %(txt)s\n\t\t\t\tor batch.manufacturing_date like %(txt)s\n\t\t\t\t{search_cond})\n\t\t\t\tand batch.docstatus < 2\n\t\t\t\t{cond}\n\t\t\t\t{match_conditions}\n\t\t\tgroup by batch_no {having_clause}\n\t\t\torder by batch.expiry_date, sle.batch_no desc\n\t\t\tlimit %(page_len)s offset %(start)sselect name, concat('MFG-', manufacturing_date), concat('EXP-',expiry_date)\n\t\t\t{search_columns}\n\t\t\tfrom `tabBatch` batch\n\t\t\twhere batch.disabled = 0\n\t\t\tand item = %(item_code)s\n\t\t\tand (name like %(txt)s\n\t\t\tor expiry_date like %(txt)s\n\t\t\tor manufacturing_date like %(txt)s\n\t\t\t{search_cond})\n\t\t\tand docstatus < 2\n\t\t\t{0}\n\t\t\t{match_conditions}\n\n\t\t\torder by expiry_date, name desc\n\t\t\tlimit %(page_len)s offset %(start)s", "language": "en", "n_whitespaces": 79, "n_words": 112, "vocab_size": 65 }
https://github.com/frappe/erpnext.git
9
allow_serial_nos_with_different_item
def allow_serial_nos_with_different_item(sle_serial_no, sle): allow_serial_nos = False if sle.voucher_type == "Stock Entry" and cint(sle.actual_qty) > 0: stock_entry = frappe.get_cached_doc("Stock Entry", sle.voucher_no) if stock_entry.purpose in ("Repack", "Manufacture"): for d in stock_entry.get("items"): if d.serial_no and (d.s_warehouse if not sle.is_cancelled else d.t_warehouse): serial_nos = get_serial_nos(d.serial_no) if sle_serial_no in serial_nos: allow_serial_nos = True return allow_serial_nos
494bd9ef78313436f0424b918f200dab8fc7c20b
17
serial_no.py
163
style: format code with black
14,610
0
39
98
38
67,735
50
erpnext
20
erpnext/stock/doctype/serial_no/serial_no.py
Python
11
{ "docstring": "\n\tAllows same serial nos for raw materials and finished goods\n\tin Manufacture / Repack type Stock Entry\n\t", "language": "en", "n_whitespaces": 15, "n_words": 17, "vocab_size": 17 }
https://github.com/frappe/erpnext.git
5
find_latest_deps_file
def find_latest_deps_file(build_data_working, ansible_version): # imports here so that they don't cause unnecessary deps for all of the plugins from packaging.version import Version data_dir = os.path.join(build_data_working, ansible_version) deps_files = glob.glob(os.path.join(data_dir, '*.deps')) if not deps_files: raise Exception('No deps files exist for version {0}'.format(ansible_version)) # Find the latest version of the deps file for this major version latest = None latest_ver = Version('0') for filename in deps_files: deps_data = parse_deps_file(filename) new_version = Version(deps_data['_ansible_version']) if new_version > latest_ver: latest_ver = new_version latest = filename if latest is None: raise NoSuchFile('Could not find an ansible deps file in {0}'.format(data_dir)) return latest # # Subcommand core #
9545f2e0deae73d142047f02b499940b07b700aa
12
docs_build.py
197
stable docs build: do not use YAML parsing/serialization for .deps files (#79233) * Do not use YAML parsing/serialization for .deps files. * Prevent crash.
79,570
0
191
114
64
268,629
101
ansible
21
hacking/build_library/build_ansible/command_plugins/docs_build.py
Python
17
{ "docstring": "Find the most recent ansible deps file for the given ansible major version.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 11 }
https://github.com/ansible/ansible.git
6
move_tone_curve
def move_tone_curve(img, low_y, high_y): input_dtype = img.dtype if low_y < 0 or low_y > 1: raise ValueError("low_shift must be in range [0, 1]") if high_y < 0 or high_y > 1: raise ValueError("high_shift must be in range [0, 1]") if input_dtype != np.uint8: raise ValueError("Unsupported image type {}".format(input_dtype)) t = np.linspace(0.0, 1.0, 256) # Defines responze of a four-point bezier curve
43201769f044e2f1bcd594a6e5c251903cf14c49
12
functional.py
120
Upgrade black to 22.3.0 (#1181) * Upgrade black to 22.3.0 * Remove sphinx docs build
57,473
0
103
126
42
225,577
61
albumentations
12
albumentations/augmentations/functional.py
Python
15
{ "docstring": "Rescales the relationship between bright and dark areas of the image by manipulating its tone curve.\n\n Args:\n img (numpy.ndarray): RGB or grayscale image.\n low_y (float): y-position of a Bezier control point used\n to adjust the tone curve, must be in range [0, 1]\n high_y (float): y-position of a Bezier control point used\n to adjust image tone curve, must be in range [0, 1]\n ", "language": "en", "n_whitespaces": 112, "n_words": 63, "vocab_size": 40 }
https://github.com/albumentations-team/albumentations.git
6
apply_str
def apply_str(self) -> DataFrame | Series: # Caller is responsible for checking isinstance(self.f, str) f = cast(str, self.f) obj = self.obj # Support for `frame.transform('method')` # Some methods (shift, etc.) require the axis argument, others # don't, so inspect and insert if necessary. func = getattr(obj, f, None) if callable(func): sig = inspect.getfullargspec(func) if self.axis != 0 and ( "axis" not in sig.args or f in ("corrwith", "mad", "skew") ): raise ValueError(f"Operation {f} does not support axis=1") elif "axis" in sig.args: self.kwargs["axis"] = self.axis return self._try_aggregate_string_function(obj, f, *self.args, **self.kwargs)
d1485e76b7c48316c0d54f1a4df26c4467792670
13
apply.py
199
CLN: Refactor groupby._make_wrapper (#48400) * CLN: Refactor groupby._make_wrapper * Remove type: ignore * Revert behavior change for corrwith; fix skew and mad * Add docstring
40,394
0
248
117
71
169,112
89
pandas
19
pandas/core/apply.py
Python
20
{ "docstring": "\n Compute apply in case of a string.\n\n Returns\n -------\n result: Series or DataFrame\n ", "language": "en", "n_whitespaces": 49, "n_words": 13, "vocab_size": 13 }
https://github.com/pandas-dev/pandas.git
2
completed_annotations
def completed_annotations(self): if self.project.skip_queue == self.project.SkipQueue.IGNORE_SKIPPED: return self.annotations.filter(Q(ground_truth=False)) else: return self.annotations.filter(Q_finished_annotations)
074af782e6f351c711f18d8ad6a05aa4f632339c
12
models.py
76
feat: DEV-469: Skip queue (#1693) * DEV-469 Skip queue project setting * DEV-469 review fixes * Merge migrations (DEV-469) * Update requirements-test.txt * Update requirements-test.txt * Update test_exception.py * Revert "Update test_exception.py" This reverts commit b9c686c9bacaf298bafe3a207352cc5260fef737. * Revert "Update requirements-test.txt" This reverts commit 3704d29978761089bcd008506f9e1c30a162bb3a. * Revert "Update requirements-test.txt" This reverts commit 50273847ae2872b31bccc376d04a3afff0efcf21. * Recalc is_labeled after skip_queue change (DEV-469) * Fix migrations (DEV-469) Co-authored-by: Max Tkachenko <[email protected]> Co-authored-by: niklub <[email protected]> Co-authored-by: nik <[email protected]>
42,458
0
54
46
10
177,600
11
label-studio
11
label_studio/tasks/models.py
Python
5
{ "docstring": "Annotations that we take into account when set completed status to the task", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/heartexlabs/label-studio.git
2
test_get_result_correctness
async def test_get_result_correctness(graph1): (_, _, _, dag) = graph1 handle = serve.run(DAGDriver.bind(dag)) visualizer = GraphVisualizer() visualizer.visualize_with_gradio(handle, _launch=False) await visualizer._send_request(random.randint(0, 100), 1, 2) values = await asyncio.gather( *[ (visualizer._get_result(node.get_stable_uuid())) for node in visualizer.node_to_block ] ) assert {1, 2} <= set(values) @pytest.mark.asyncio
4c970cc88247f7cfa7351297b8b5050f2372742e
@pytest.mark.asyncio
16
test_gradio_visualization.py
171
[serve] Visualize Deployment Graph with Gradio (#27897)
28,422
1
102
103
35
127,348
40
ray
26
python/ray/serve/tests/test_gradio_visualization.py
Python
13
{ "docstring": "Tests correctness: that after running _send_request(), _get_result() in\n GraphVisualizer correctly returns object refs to the submitted tasks.\n ", "language": "en", "n_whitespaces": 23, "n_words": 17, "vocab_size": 17 }
https://github.com/ray-project/ray.git
2
test_get_backfill_points_in_room
def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] )
ac1a31740b6d0dfda4d57a25762aaddfde981caf
11
test_event_federation.py
115
Only try to backfill event if we haven't tried before recently (#13635) Only try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over. Fix https://github.com/matrix-org/synapse/issues/13622 Fix https://github.com/matrix-org/synapse/issues/8451 Follow-up to https://github.com/matrix-org/synapse/pull/13589 Part of https://github.com/matrix-org/synapse/issues/13356
72,983
0
108
67
26
249,543
30
synapse
12
tests/storage/test_event_federation.py
Python
10
{ "docstring": "\n Test to make sure we get some backfill points\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
10
_mixup_transform
def _mixup_transform(self, results): assert 'mix_results' in results assert len( results['mix_results']) == 1, 'MixUp only support 2 images now !' if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: # empty bbox return results retrieve_results = results['mix_results'][0] retrieve_img = retrieve_results['img'] jit_factor = random.uniform(*self.ratio_range) is_filp = random.uniform(0, 1) > self.flip_ratio if len(retrieve_img.shape) == 3: out_img = np.ones( (self.dynamic_scale[0], self.dynamic_scale[1], 3), dtype=retrieve_img.dtype) * self.pad_val else: out_img = np.ones( self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val # 1. keep_ratio resize scale_ratio = min(self.dynamic_scale[0] / retrieve_img.shape[0], self.dynamic_scale[1] / retrieve_img.shape[1]) retrieve_img = mmcv.imresize( retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), int(retrieve_img.shape[0] * scale_ratio))) # 2. paste out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img # 3. scale jit scale_ratio *= jit_factor out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), int(out_img.shape[0] * jit_factor))) # 4. flip if is_filp: out_img = out_img[:, ::-1, :] # 5. random crop ori_img = results['img'] origin_h, origin_w = out_img.shape[:2] target_h, target_w = ori_img.shape[:2] padded_img = np.zeros( (max(origin_h, target_h), max(origin_w, target_w), 3)).astype(np.uint8) padded_img[:origin_h, :origin_w] = out_img x_offset, y_offset = 0, 0 if padded_img.shape[0] > target_h: y_offset = random.randint(0, padded_img.shape[0] - target_h) if padded_img.shape[1] > target_w: x_offset = random.randint(0, padded_img.shape[1] - target_w) padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:x_offset + target_w] # 6. adjust bbox retrieve_gt_bboxes = retrieve_results['gt_bboxes'] retrieve_gt_bboxes[:, 0::2] = retrieve_gt_bboxes[:, 0::2] * scale_ratio retrieve_gt_bboxes[:, 1::2] = retrieve_gt_bboxes[:, 1::2] * scale_ratio if self.bbox_clip_border: retrieve_gt_bboxes[:, 0::2] = np.clip(retrieve_gt_bboxes[:, 0::2], 0, origin_w) retrieve_gt_bboxes[:, 1::2] = np.clip(retrieve_gt_bboxes[:, 1::2], 0, origin_h) if is_filp: retrieve_gt_bboxes[:, 0::2] = ( origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1]) # 7. filter cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy() cp_retrieve_gt_bboxes[:, 0::2] = \ cp_retrieve_gt_bboxes[:, 0::2] - x_offset cp_retrieve_gt_bboxes[:, 1::2] = \ cp_retrieve_gt_bboxes[:, 1::2] - y_offset if self.bbox_clip_border: cp_retrieve_gt_bboxes[:, 0::2] = np.clip( cp_retrieve_gt_bboxes[:, 0::2], 0, target_w) cp_retrieve_gt_bboxes[:, 1::2] = np.clip( cp_retrieve_gt_bboxes[:, 1::2], 0, target_h) # 8. mix up ori_img = ori_img.astype(np.float32) mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32) retrieve_gt_labels = retrieve_results['gt_labels'] if not self.skip_filter: keep_list = self._filter_box_candidates(retrieve_gt_bboxes.T, cp_retrieve_gt_bboxes.T) retrieve_gt_labels = retrieve_gt_labels[keep_list] cp_retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list] mixup_gt_bboxes = np.concatenate( (results['gt_bboxes'], cp_retrieve_gt_bboxes), axis=0) mixup_gt_labels = np.concatenate( (results['gt_labels'], retrieve_gt_labels), axis=0) # remove outside bbox inside_inds = find_inside_bboxes(mixup_gt_bboxes, target_h, target_w) mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] mixup_gt_labels = mixup_gt_labels[inside_inds] results['img'] = mixup_img.astype(np.uint8) results['img_shape'] = mixup_img.shape results['gt_bboxes'] = mixup_gt_bboxes results['gt_labels'] = mixup_gt_labels return results
c9e1906f00505621b0247ab364feb0ca862fa98c
14
transforms.py
1,292
[Fix] Fix MixUp transform filter boxes failing case. Added test case (#7080)
70,162
0
1,392
835
179
243,959
340
mmdetection
56
mmdet/datasets/pipelines/transforms.py
Python
83
{ "docstring": "MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n ", "language": "en", "n_whitespaces": 56, "n_words": 13, "vocab_size": 12 }
https://github.com/open-mmlab/mmdetection.git
8
authenticate
def authenticate(self, _=None): # TODO: remove unused var acceptance_wait_time = self.opts["acceptance_wait_time"] acceptance_wait_time_max = self.opts["acceptance_wait_time_max"] if not acceptance_wait_time_max: acceptance_wait_time_max = acceptance_wait_time with salt.channel.client.ReqChannel.factory( self.opts, crypt="clear" ) as channel: while True: creds = self.sign_in(channel=channel) if creds == "retry": if self.opts.get("caller"): # We have a list of masters, so we should break # and try the next one in the list. if self.opts.get("local_masters", None): error = SaltClientError( "Minion failed to authenticate" " with the master, has the " "minion key been accepted?" ) break else: print( "Minion failed to authenticate with the master, " "has the minion key been accepted?" ) sys.exit(2) if acceptance_wait_time: log.info( "Waiting %s seconds before retry.", acceptance_wait_time ) time.sleep(acceptance_wait_time) if acceptance_wait_time < acceptance_wait_time_max: acceptance_wait_time += acceptance_wait_time log.debug( "Authentication wait time is %s", acceptance_wait_time ) continue break self._creds = creds self._crypticle = Crypticle(self.opts, creds["aes"])
70972c8016ff5d6fbdd7f83776077b0936f60dea
20
crypt.py
308
Use salt.channel.client instead of salt.transport.client
54,016
0
954
172
89
215,547
134
salt
28
salt/crypt.py
Python
39
{ "docstring": "\n Authenticate with the master, this method breaks the functional\n paradigm, it will update the master information from a fresh sign\n in, signing in can occur as often as needed to keep up with the\n revolving master AES key.\n\n :rtype: Crypticle\n :returns: A crypticle used for encryption operations\n ", "language": "en", "n_whitespaces": 97, "n_words": 47, "vocab_size": 41 }
https://github.com/saltstack/salt.git
1
test_not_collapsed_with_legacy
def test_not_collapsed_with_legacy(self): # Sidebar should not be collapsed because the feature flag is not enabled self.client.cookies['wagtail_sidebar_collapsed'] = '1' response = self.client.get(reverse('wagtailadmin_home')) self.assertNotContains(response, 'sidebar-collapsed')
18c4d7c81356dbd5c4503db2ea24b21492512317
11
test_menu.py
70
Update Wagtail test cases to match slim sidebar capabilities and implementation details
15,545
0
58
37
21
70,730
23
wagtail
8
wagtail/admin/tests/test_menu.py
Python
4
{ "docstring": "Sidebar should only remember its collapsed state with the slim implementation.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/wagtail/wagtail.git
1
default
def default(method): method._is_default = True # pylint: disable=protected-access return method
84afc5193d38057e2e2badf9c889ea87d80d8fbf
7
generic_utils.py
25
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,746
0
20
13
10
276,832
10
keras
3
keras/utils/generic_utils.py
Python
3
{ "docstring": "Decorates a method to detect overrides in subclasses.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/keras-team/keras.git
3
tokenize_wrapper
def tokenize_wrapper(input): skip = {token.NEWLINE, token.INDENT, token.DEDENT} tokens = tokenize.generate_tokens(io.StringIO(input).readline) for quintuple in tokens: type, value, start, end, line_text = quintuple if type not in skip: yield quintuple
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
patcomp.py
92
add python 3.10.4 for windows
55,459
0
65
60
23
218,759
28
XX-Net
19
python3.10.4/Lib/lib2to3/patcomp.py
Python
7
{ "docstring": "Tokenizes a string suppressing significant whitespace.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
https://github.com/XX-net/XX-Net.git
1
test_get_serials
def test_get_serials(): expected = [ "180720d39cd2db3244ba037417241e90", "1768ac4e5b72bf1d0df0df118b34b959", ] mock = MagicMock( return_value=( "CertInfo\r\n" "================ Certificate 0 ================\r\n" "Serial Number: 180720d39cd2db3244ba037417241e90\r\n" "OtherStuff\r\n" "\r\n" "================ Certificate 1 ================\r\n" "Serial Number: 1768ac4e5b72bf1d0df0df118b34b959\r\n" "OtherStuff" ) ) with patch.dict(certutil.__salt__, {"cmd.run": mock}): out = certutil.get_stored_cert_serials("TrustedPublisher") mock.assert_called_once_with('certutil.exe -store "TrustedPublisher"') assert expected == out
a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857
12
test_win_certutil.py
142
Add tests, fix state module
54,255
0
201
64
36
215,928
46
salt
12
tests/pytests/unit/modules/test_win_certutil.py
Python
21
{ "docstring": "\n Test getting all the serial numbers from a store\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/saltstack/salt.git
3
build_partition
def build_partition(cls, partition_ids, column_widths): return np.array( [ [ cls.frame_partition_cls( part_id[0], length=part_id[2], width=col_width, ) for part_id, col_width in zip(part_ids, column_widths) ] for part_ids in partition_ids ] )
8864bc197974da6d8cda2de2f35ca31d561be1cc
13
parquet_dispatcher.py
81
PERF-#4305: Parallelize `read_parquet` over row groups (#4700) Co-authored-by: mvashishtha <[email protected]>
35,794
0
240
56
21
154,121
26
modin
13
modin/core/io/column_stores/parquet_dispatcher.py
Python
14
{ "docstring": "\n Build array with partitions of `cls.frame_partition_cls` class.\n\n Parameters\n ----------\n partition_ids : list\n Array with references to the partitions data.\n column_widths : list\n Number of columns in each partition.\n\n Returns\n -------\n np.ndarray\n array with shape equals to the shape of `partition_ids` and\n filed with partition objects.\n\n Notes\n -----\n The second level of partitions_ids contains a list of object references\n for each read call:\n partition_ids[i][j] -> [ObjectRef(df), ObjectRef(df.index), ObjectRef(len(df))].\n ", "language": "en", "n_whitespaces": 210, "n_words": 67, "vocab_size": 50 }
https://github.com/modin-project/modin.git
5
__new__
def __new__(cls, stylename, **kwargs): # The "class" should have the _style_list attribute, which is a mapping # of style names to style classes. _list = stylename.replace(" ", "").split(",") _name = _list[0].lower() try: _cls = cls._style_list[_name] except KeyError as err: raise ValueError(f"Unknown style: {stylename}") from err try: _args_pair = [cs.split("=") for cs in _list[1:]] _args = {k: float(v) for k, v in _args_pair} except ValueError as err: raise ValueError(f"Incorrect style argument: {stylename}") from err return _cls(**{**_args, **kwargs})
075ff0952896f44d7d0b0b3318f0978ae53f84d7
12
patches.py
208
Small style fixes.
23,015
0
201
120
59
108,011
76
matplotlib
20
lib/matplotlib/patches.py
Python
13
{ "docstring": "Return the instance of the subclass with the given style name.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
https://github.com/matplotlib/matplotlib.git
1
get_roi_rel_points_train
def get_roi_rel_points_train(self, mask_pred, labels, cfg): point_coords = get_uncertain_point_coords_with_randomness( mask_pred, labels, cfg.num_points, cfg.oversample_ratio, cfg.importance_sample_ratio) return point_coords
c576e5d570bf64a99e2c6817ed7b5c0084a44a55
9
mask_point_head.py
49
[Enhance] Take point sample related functions out of mask_point_head (#7353) add point sample replace function in mask_point_head
70,231
0
58
34
12
244,083
15
mmdetection
10
mmdet/models/roi_heads/mask_heads/mask_point_head.py
Python
5
{ "docstring": "Get ``num_points`` most uncertain points with random points during\n train.\n\n Sample points in [0, 1] x [0, 1] coordinate space based on their\n uncertainty. The uncertainties are calculated for each point using\n '_get_uncertainty()' function that takes point's logit prediction as\n input.\n\n Args:\n mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n mask_height, mask_width) for class-specific or class-agnostic\n prediction.\n labels (list): The ground truth class for each instance.\n cfg (dict): Training config of point head.\n\n Returns:\n point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n that contains the coordinates sampled points.\n ", "language": "en", "n_whitespaces": 234, "n_words": 89, "vocab_size": 72 }
https://github.com/open-mmlab/mmdetection.git
2
tempdir_registry
def tempdir_registry() -> Iterator[TempDirectoryTypeRegistry]: global _tempdir_registry old_tempdir_registry = _tempdir_registry _tempdir_registry = TempDirectoryTypeRegistry() try: yield _tempdir_registry finally: _tempdir_registry = old_tempdir_registry
f3166e673fe8d40277b804d35d77dcdb760fc3b3
9
temp_dir.py
53
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,170
0
51
29
12
20,001
19
pipenv
5
pipenv/patched/notpip/_internal/utils/temp_dir.py
Python
11
{ "docstring": "Provides a scoped global tempdir registry that can be used to dictate\n whether directories should be deleted.\n ", "language": "en", "n_whitespaces": 23, "n_words": 17, "vocab_size": 16 }
https://github.com/pypa/pipenv.git
1
lexicographers_mutual_information
def lexicographers_mutual_information(cooccurrence): with np.errstate(invalid="ignore", divide="ignore"): result = cooccurrence * mutual_information(cooccurrence) return np.array(result)
1d7341e93d1f03387699fb3c6ae0b6c0e464296f
11
python_utils.py
64
Add new item similarity metrics for SAR (#1754) * Add mutual information similarity in SAR * Add lexicographers mutual information similarity for SAR * Add cosine similarity for SAR * Add inclusion index for SAR * Typos * Change SARSingleNode to SAR * Convert item similarity matrix to np.array * Update * Update SAR tests * Remove unused imports * Add explanations for new similarity metrics
7,235
0
28
35
12
39,443
12
recommenders
9
recommenders/utils/python_utils.py
Python
4
{ "docstring": "Helper method to calculate the Lexicographers Mutual Information of\n a matrix of co-occurrences.\n\n Due to the bias of mutual information for low frequency items,\n lexicographers mutual information corrects the formula by\n multiplying it by the co-occurrence frequency.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items.\n\n Returns:\n numpy.ndarray: The matrix of lexicographers mutual information between any two items.\n\n ", "language": "en", "n_whitespaces": 94, "n_words": 59, "vocab_size": 40 }
https://github.com/microsoft/recommenders.git
3
set_title
def set_title(self, title, prop=None): self._legend_title_box._text.set_text(title) if title: self._legend_title_box._text.set_visible(True) self._legend_title_box.set_visible(True) else: self._legend_title_box._text.set_visible(False) self._legend_title_box.set_visible(False) if prop is not None: self._legend_title_box._text.set_fontproperties(prop) self.stale = True
b9cdf3e2a68bd3d30ad7233b223f88d37044dab3
12
legend.py
136
ENH: add the ability to control label alignment in legends Co-authored-by: Tim Hoffmann <[email protected]>
23,426
0
118
84
20
109,064
21
matplotlib
10
lib/matplotlib/legend.py
Python
11
{ "docstring": "\n Set legend title and title style.\n\n Parameters\n ----------\n title : str\n The legend title.\n\n prop : `.font_manager.FontProperties` or `str` or `pathlib.Path`\n The font properties of the legend title.\n If a `str`, it is interpreted as a fontconfig pattern parsed by\n `.FontProperties`. If a `pathlib.Path`, it is interpreted as the\n absolute path to a font file.\n\n ", "language": "en", "n_whitespaces": 154, "n_words": 55, "vocab_size": 37 }
https://github.com/matplotlib/matplotlib.git
1
get_free_open_trades
def get_free_open_trades(self) -> int: open_trades = Trade.get_open_trade_count() return max(0, self.config['max_open_trades'] - open_trades)
87a3115073562e9abc7efd792569020435a01f6e
10
freqtradebot.py
50
Add get_open_trade_count() to simplify getting open trade count.
34,774
0
33
29
12
150,503
12
freqtrade
8
freqtrade/freqtradebot.py
Python
7
{ "docstring": "\n Return the number of free open trades slots or 0 if\n max number of open trades reached\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 13 }
https://github.com/freqtrade/freqtrade.git
4
native_unit_of_measurement
def native_unit_of_measurement(self) -> str | None: raw_units = self.raw_unit_of_measurement # Check if this is a known index pair UOM if isinstance(raw_units, dict) or raw_units in (UOM_ON_OFF, UOM_INDEX): return None if raw_units in ( UnitOfTemperature.FAHRENHEIT, UnitOfTemperature.CELSIUS, UOM_DOUBLE_TEMP, ): return self.hass.config.units.temperature_unit return raw_units
a6ddac9004b7f73633f2019f3b5267e1486756c1
11
sensor.py
94
Use UnitOfTemperature in integrations (i-m) (#84307)
96,664
0
145
61
33
297,698
41
core
17
homeassistant/components/isy994/sensor.py
Python
12
{ "docstring": "Get the Home Assistant unit of measurement for the device.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/home-assistant/core.git
2
find_target_completion
def find_target_completion(target_func, prefix, short): # type: (t.Callable[[], t.Iterable[CompletionTarget]], str, bool) -> t.List[str] try: targets = target_func() matches = list(walk_completion_targets(targets, prefix, short)) return matches except Exception as ex: # pylint: disable=locally-disabled, broad-except return ['%s' % ex]
86779cc90376ea70bafa7044b12ce5132409fd63
12
target.py
74
ansible-test - Code cleanup. This helps prepare for a future pylint upgrade.
78,873
0
74
43
30
267,383
35
ansible
10
test/lib/ansible_test/_internal/target.py
Python
7
{ "docstring": "Return a list of targets from the given target function which match the given prefix.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 13 }
https://github.com/ansible/ansible.git
6
test_pipeline_simple
def test_pipeline_simple(self): arr = np.random.randint(0, 1000, (1000, 1000)) df = pd.DataFrame(arr)
3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8
9
test_pipeline.py
51
FEAT-#4412: Add Batch Pipeline API to Modin (#4452) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Signed-off-by: Rehan Durrani <[email protected]>
35,608
0
32
257
10
153,784
11
modin
9
modin/experimental/batch/test/test_pipeline.py
Python
35
{ "docstring": "Create a simple pipeline and ensure that it runs end to end correctly.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
https://github.com/modin-project/modin.git
2
__mul__
def __mul__(self, factor): factor = sympify(factor) coords = [simplify(x*factor) for x in self.args] return Point(coords, evaluate=False)
498015021131af4dbb07eb110e5badaba8250c7b
10
point.py
62
Updated import locations
47,775
0
44
39
15
196,275
16
sympy
10
sympy/geometry/point.py
Python
4
{ "docstring": "Multiply point's coordinates by a factor.\n\n Notes\n =====\n\n >>> from sympy import Point\n\n When multiplying a Point by a floating point number,\n the coordinates of the Point will be changed to Floats:\n\n >>> Point(1, 2)*0.1\n Point2D(0.1, 0.2)\n\n If this is not desired, the `scale` method can be used or\n else only multiply or divide by integers:\n\n >>> Point(1, 2).scale(1.1, 1.1)\n Point2D(11/10, 11/5)\n >>> Point(1, 2)*11/10\n Point2D(11/10, 11/5)\n\n See Also\n ========\n\n sympy.geometry.point.Point.scale\n ", "language": "en", "n_whitespaces": 190, "n_words": 71, "vocab_size": 53 }
https://github.com/sympy/sympy.git
2
value
def value(self): if hasattr(self, '_m_value'): return self._m_value self._m_value = (self.b & 127) return getattr(self, '_m_value', None)
002f919dda5f01d067c2e786426c68751551d15c
9
vlq_base128_le.py
64
update kaitai definitions
73,952
0
75
38
14
252,418
16
mitmproxy
6
mitmproxy/contrib/kaitaistruct/vlq_base128_le.py
Python
5
{ "docstring": "The 7-bit (base128) numeric value chunk of this group.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/mitmproxy/mitmproxy.git
4
notify
def notify(self, notification, raise_exception=False): event = notification.event try: return self.notify_users( event.group, event, triggering_rules=[r.label for r in notification.rules] ) except ( ApiError, HTTPError, InvalidIdentity, PluginError, SSLError, UrllibHTTPError, ) as err: self.logger.info( "notification-plugin.notify-failed", extra={ "error": str(err), "plugin": self.slug, "project_id": event.group.project_id, "organization_id": event.group.project.organization_id, }, ) if raise_exception: raise err return False
542484c0cd71625e62e086f3f7c5aaf85360f724
16
notify.py
175
fix(plugins): Silence error (#32042)
19,326
0
366
114
45
96,559
48
sentry
26
src/sentry/plugins/bases/notify.py
Python
26
{ "docstring": "\n This calls the notify_users method of the plugin.\n Normally this method eats the error and logs it but if we\n set raise_exception=True like we do for the test plugin button,\n the exception is raised\n ", "language": "en", "n_whitespaces": 70, "n_words": 34, "vocab_size": 28 }
https://github.com/getsentry/sentry.git
1
create_single_host
def create_single_host(name, variables): # type: (str, t.Dict[str, t.Union[str, int]]) -> Inventory return Inventory(host_groups=dict(all={name: variables}))
a06fa496d3f837cca3c437ab6e9858525633d147
13
host_profiles.py
41
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
78,585
0
29
24
14
266,782
14
ansible
7
test/lib/ansible_test/_internal/host_profiles.py
Python
2
{ "docstring": "Return an inventory instance created from the given hostname and variables.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/ansible/ansible.git
1
test_syncer_hanging_sync_with_timeout
def test_syncer_hanging_sync_with_timeout(temp_data_dirs): tmp_source, tmp_target = temp_data_dirs
ed5b9e5439811becd062ef7734b1afceca2c7d07
7
test_syncer.py
21
[Tune] Add timeout for experiment checkpoint syncing to cloud (#30855) #28155 introduced a sync timeout for trainable checkpoint syncing to the cloud, in the case that the sync operation (default is with pyarrow) hangs. This PR adds a similar timeout for experiment checkpoint cloud syncing. Signed-off-by: Justin Yu <[email protected]>
31,110
0
12
86
6
137,248
6
ray
4
python/ray/tune/tests/test_syncer.py
Python
16
{ "docstring": "Check that syncing times out when the sync process is hanging.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/ray-project/ray.git
1
draw
def draw(self, renderer): self._set_lims() # this sets the scale in case the parent has set its scale. self._set_scale() super().draw(renderer)
8387676bc049d7b3e071846730c632e6ced137ed
9
_secondary_axes.py
48
Clean up code in SecondaryAxis
23,725
0
54
26
18
109,729
19
matplotlib
6
lib/matplotlib/axes/_secondary_axes.py
Python
4
{ "docstring": "\n Draw the secondary axes.\n\n Consults the parent axes for its limits and converts them\n using the converter specified by\n `~.axes._secondary_axes.set_functions` (or *functions*\n parameter when axes initialized.)\n ", "language": "en", "n_whitespaces": 69, "n_words": 26, "vocab_size": 23 }
https://github.com/matplotlib/matplotlib.git
3
decode
def decode(self, bboxes, pred_bboxes, max_shape=None): bboxes = get_box_tensor(bboxes) assert len(pred_bboxes) == 2 cls_preds, offset_preds = pred_bboxes assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( 0) == bboxes.size(0) bboxes, loc_confidence = bucket2bbox(bboxes, cls_preds, offset_preds, self.num_buckets, self.scale_factor, max_shape, self.clip_border) if self.use_box_type: bboxes = HorizontalBoxes(bboxes, clone=False) return bboxes, loc_confidence
d915740fa8228cf57741b27d9e5d66e358456b8e
11
bucketing_bbox_coder.py
158
[Refactor] Refactor anchor head and base head with boxlist (#8625) * Refactor anchor head * Update * Update * Update * Add a series of boxes tools * Fix box type to support n x box_dim boxes * revert box type changes * Add docstring * refactor retina_head * Update * Update * Fix comments * modify docstring of coder and ioucalculator * Replace with_boxlist with use_box_type
70,859
0
254
105
32
245,713
44
mmdetection
18
mmdet/models/task_modules/coders/bucketing_bbox_coder.py
Python
13
{ "docstring": "Apply transformation `pred_bboxes` to `boxes`.\n Args:\n boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.\n pred_bboxes (torch.Tensor): Predictions for bucketing estimation\n and fine regression\n max_shape (tuple[int], optional): Maximum shape of boxes.\n Defaults to None.\n\n Returns:\n Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.\n ", "language": "en", "n_whitespaces": 131, "n_words": 36, "vocab_size": 33 }
https://github.com/open-mmlab/mmdetection.git
1
test_include_block_tag_with_plain_value
def test_include_block_tag_with_plain_value(self): result = render_to_string( "tests/blocks/include_block_test.html", { "test_block": 42, }, ) self.assertIn("<body>42</body>", result)
d10f15e55806c6944827d801cd9c2d53f5da4186
11
test_blocks.py
51
Reformat with black
16,220
0
89
28
13
74,116
13
wagtail
5
wagtail/core/tests/test_blocks.py
Python
8
{ "docstring": "\n The include_block tag should be able to render a value without a render_as_block method\n by just rendering it as a string\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
https://github.com/wagtail/wagtail.git
40
meta_from_array
def meta_from_array(x, ndim=None, dtype=None): # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr) # implement a _meta attribute that are incompatible with Dask Array._meta if hasattr(x, "_meta") and isinstance(x, Array): x = x._meta if dtype is None and x is None: raise ValueError("You must specify the meta or dtype of the array") if np.isscalar(x): x = np.array(x) if x is None: x = np.ndarray elif dtype is None and hasattr(x, "dtype"): dtype = x.dtype if isinstance(x, type): x = x(shape=(0,) * (ndim or 0), dtype=dtype) if isinstance(x, list) or isinstance(x, tuple): ndims = [ 0 if isinstance(a, numbers.Number) else a.ndim if hasattr(a, "ndim") else len(a) for a in x ] a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)] return a if isinstance(x, list) else tuple(x) if ( not hasattr(x, "shape") or not hasattr(x, "dtype") or not isinstance(x.shape, tuple) ): return x if ndim is None: ndim = x.ndim try: meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))] if meta.ndim != ndim: if ndim > x.ndim: meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))] meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))] elif ndim == 0: meta = meta.sum() else: meta = meta.reshape((0,) * ndim) if meta is np.ma.masked: meta = np.ma.array(np.empty((0,) * ndim, dtype=dtype or x.dtype), mask=True) except Exception: meta = np.empty((0,) * ndim, dtype=dtype or x.dtype) if np.isscalar(meta): meta = np.array(meta) if dtype and meta.dtype != dtype: try: meta = meta.astype(dtype) except ValueError as e: if ( any( s in str(e) for s in [ "invalid literal", "could not convert string to float", ] ) and meta.dtype.kind in "SU" ): meta = np.array([]).astype(dtype) else: raise e return meta
7471eb3d1e9ccf085b70b219413aa891c8c2c167
21
utils.py
816
masked scalars input to da.from_array (#8895)
36,621
0
887
524
149
156,257
287
dask
42
dask/array/utils.py
Python
66
{ "docstring": "Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n ", "language": "en", "n_whitespaces": 112, "n_words": 57, "vocab_size": 45 }
https://github.com/dask/dask.git
20
async_update
async def async_update(self, *, tries=0): attrs = {} closer_time = None try: yandex_reply = await self.requester.get_stop_info(self._stop_id) except CaptchaError as ex: _LOGGER.error( "%s. You may need to disable the integration for some time", ex, ) return try: data = yandex_reply["data"] except KeyError as key_error: _LOGGER.warning( "Exception KeyError was captured, missing key is %s. Yandex returned: %s", key_error, yandex_reply, ) if tries > 0: return await self.requester.set_new_session() await self.async_update(tries=tries + 1) return stop_name = data["name"] transport_list = data["transports"] for transport in transport_list: for thread in transport["threads"]: if "Events" not in thread["BriefSchedule"]: continue if thread.get("noBoarding") is True: continue for event in thread["BriefSchedule"]["Events"]: # Railway route depends on the essential stops and # can vary over time. # City transport has the fixed name for the route if "railway" in transport["Types"]: route = " - ".join( [x["name"] for x in thread["EssentialStops"]] ) else: route = transport["name"] if self._routes and route not in self._routes: # skip unnecessary route info continue if "Estimated" not in event and "Scheduled" not in event: continue departure = event.get("Estimated") or event["Scheduled"] posix_time_next = int(departure["value"]) if closer_time is None or closer_time > posix_time_next: closer_time = posix_time_next if route not in attrs: attrs[route] = [] attrs[route].append(departure["text"]) attrs[STOP_NAME] = stop_name if closer_time is None: self._state = None else: self._state = dt_util.utc_from_timestamp(closer_time).replace(microsecond=0) self._attrs = attrs
62690759d483b03932939d28bbf87ce3b293c486
20
sensor.py
545
Move attribution to standalone attribute [t-z] (#80521)
88,455
0
1,075
316
125
289,313
211
core
39
homeassistant/components/yandex_transport/sensor.py
Python
56
{ "docstring": "Get the latest data from maps.yandex.ru and update the states.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
_linear_predictor
def _linear_predictor(self, X): check_is_fitted(self) X = self._validate_data( X, accept_sparse=["csr", "csc", "coo"], dtype=[np.float64, np.float32], ensure_2d=True, allow_nd=False, reset=False, ) return X @ self.coef_ + self.intercept_
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
11
glm.py
100
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
75,775
0
124
65
22
259,442
23
scikit-learn
15
sklearn/linear_model/_glm/glm.py
Python
11
{ "docstring": "Compute the linear_predictor = `X @ coef_ + intercept_`.\n\n Note that we often use the term raw_prediction instead of linear predictor.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n y_pred : array of shape (n_samples,)\n Returns predicted values of linear predictor.\n ", "language": "en", "n_whitespaces": 125, "n_words": 47, "vocab_size": 38 }
https://github.com/scikit-learn/scikit-learn.git
2
async_added_to_hass
async def async_added_to_hass(self): self.async_on_remove( self.device.events.async_add_listener(self.async_write_ha_state) ) if (last_state := await self.async_get_last_state()) is not None: self._attr_is_on = last_state.state == STATE_ON
29a2df3dfcf3b5d1fb6cf20b413e024eb0ebf597
11
binary_sensor.py
79
Restore ONVIF sensors (#70393) Co-authored-by: Paulus Schoutsen <[email protected]>
98,171
0
69
47
19
299,235
19
core
12
homeassistant/components/onvif/binary_sensor.py
Python
6
{ "docstring": "Connect to dispatcher listening for entity data notifications.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
3
extensions
def extensions(self) -> list[tuple[int, bytes]]: ret = [] if self._client_hello.extensions: for extension in self._client_hello.extensions.extensions: body = getattr(extension, "_raw_body", extension.body) ret.append((extension.type, body)) return ret
e83ec8390ad6be6a86cfcfc57bce14cb8861bf32
13
tls.py
100
`pyupgrade --py39-plus **/*.py`
73,616
0
92
64
21
251,164
23
mitmproxy
13
mitmproxy/tls.py
Python
8
{ "docstring": "The raw list of extensions in the form of `(extension_type, raw_bytes)` tuples.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/mitmproxy/mitmproxy.git
1
build_runtime_context
def build_runtime_context(self) -> "RuntimeContext": # Defer the import of RuntimeContext until needed to avoid cycles from ray.runtime_context import RuntimeContext return RuntimeContext(self)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
7
runtime_context.py
36
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,871
0
49
19
19
132,932
21
ray
5
python/ray/util/client/runtime_context.py
Python
4
{ "docstring": "Creates a RuntimeContext backed by the properites of this API", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
1
test_make_tarball_latin1
def test_make_tarball_latin1(self): self.test_make_tarball('årchiv') # note this isn't a real word
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
test_archive_util.py
26
add python 3.10.4 for windows
56,856
0
24
12
10
223,045
10
XX-Net
3
python3.10.4/Lib/distutils/tests/test_archive_util.py
Python
2
{ "docstring": "\n Mirror test_make_tarball, except filename contains latin characters.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/XX-net/XX-Net.git
2
ensure_schema_for_first_block
def ensure_schema_for_first_block(self) -> Optional[Union["pyarrow.Schema", type]]: get_schema = cached_remote_fn(_get_schema) try: block = next(self.iter_blocks()) except (StopIteration, ValueError): # Dataset is empty (no blocks) or was manually cleared. return None schema = ray.get(get_schema.remote(block)) # Set the schema. self._metadata[0].schema = schema return schema
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
12
block_list.py
113
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,328
0
128
68
32
130,602
39
ray
18
python/ray/data/impl/block_list.py
Python
13
{ "docstring": "Ensure that the schema is set for the first block.\n\n Returns None if the block list is empty.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 15 }
https://github.com/ray-project/ray.git
7
parse257
def parse257(resp): if resp[:3] != '257': raise error_reply(resp) if resp[3:5] != ' "': return '' # Not compliant to RFC 959, but UNIX ftpd does this dirname = '' i = 5 n = len(resp) while i < n: c = resp[i] i = i+1 if c == '"': if i >= n or resp[i] != '"': break i = i+1 dirname = dirname + c return dirname
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
ftplib.py
158
add python 3.10.4 for windows
54,790
0
171
90
42
217,445
68
XX-Net
8
python3.10.4/Lib/ftplib.py
Python
17
{ "docstring": "Parse the '257' response for a MKD or PWD request.\n This is a response to a MKD or PWD request: a directory name.\n Returns the directoryname in the 257 reply.", "language": "en", "n_whitespaces": 35, "n_words": 30, "vocab_size": 21 }
https://github.com/XX-net/XX-Net.git
4
to_bag
def to_bag(df, index=False, format="tuple"): from ...bag.core import Bag if not isinstance(df, (DataFrame, Series)): raise TypeError("df must be either DataFrame or Series") name = "to_bag-" + tokenize(df, index, format) if format == "frame": # Use existing graph and name of df, but # drop meta to produce a Bag collection dsk = df.dask name = df._name else: dsk = { (name, i): (_df_to_bag, block, index, format) for (i, block) in enumerate(df.__dask_keys__()) } dsk.update(df.__dask_optimize__(df.__dask_graph__(), df.__dask_keys__())) return Bag(dsk, name, df.npartitions)
8a6f6a7b95762df4e44bc4d82ce33a7c388a0676
15
io.py
215
Move Bag.map_partitions to Blockwise (#8646) 1. Adds `format="frame"` option to `dataframe.io.to_bag` (effectively returning a zero-copy view of the same dask graph, that no-longer tracks meta/divisions) 2. Revises `Bag.map_partitions` to use `blockwise` (and to support the `token=` option) 3. Modifies the ACA code path to use a `Bag.map_partitions` for any blockwise operations where partitions may loose "dataframe-like" properties. This represents an alternative to using `map_partitions` incorrectly in ACA. It is also an alternative to the low-level `blockwise` API.
36,501
0
176
134
66
155,996
77
dask
25
dask/dataframe/io/io.py
Python
15
{ "docstring": "Create Dask Bag from a Dask DataFrame\n\n Parameters\n ----------\n index : bool, optional\n If True, the elements are tuples of ``(index, value)``, otherwise\n they're just the ``value``. Default is False.\n format : {\"tuple\", \"dict\", \"frame\"}, optional\n Whether to return a bag of tuples, dictionaries, or\n dataframe-like objects. Default is \"tuple\". If \"frame\",\n the original partitions of ``df`` will not be transformed\n in any way.\n\n\n Examples\n --------\n >>> bag = df.to_bag() # doctest: +SKIP\n ", "language": "en", "n_whitespaces": 141, "n_words": 73, "vocab_size": 61 }
https://github.com/dask/dask.git
3
_get_no_faces
def _get_no_faces(self): self.output_message = "Frames with no faces" for frame in tqdm(self._items, desc=self.output_message): logger.trace(frame) # type:ignore frame_name = frame["frame_fullname"] if not self._alignments.frame_has_faces(frame_name): logger.debug("Returning: '%s'", frame_name) yield frame_name
892d8626ed4e7f834ac5607af59f14f5476d5997
12
jobs.py
104
Bugfix: Alignments tool - don't error on from-faces job
21,067
0
112
60
25
101,662
27
faceswap
13
tools/alignments/jobs.py
Python
8
{ "docstring": " yield each frame that has no face match in alignments file ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 11 }
https://github.com/deepfakes/faceswap.git
2
_perform_invalid_key_test
def _perform_invalid_key_test(self, key, expected_warning): msg = expected_warning.replace(key, cache.make_key(key)) tests = [ ("add", [key, 1]), ("get", [key]), ("set", [key, 1]), ("incr", [key]), ("decr", [key]), ("touch", [key]), ("delete", [key]), ("get_many", [[key, "b"]]), ("set_many", [{key: 1, "b": 2}]), ("delete_many", [[key, "b"]]), ] for operation, args in tests: with self.subTest(operation=operation): with self.assertRaises(InvalidCacheKey) as cm: getattr(cache, operation)(*args) self.assertEqual(str(cm.exception), msg)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
tests.py
281
Refs #33476 -- Reformatted code with Black.
50,027
0
259
179
44
201,962
54
django
19
tests/cache/tests.py
Python
19
{ "docstring": "\n While other backends merely warn, memcached should raise for an invalid\n key.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
https://github.com/django/django.git
5
test_deploy
def test_deploy(self, mock_func): release = Release.objects.create( version="meow" * 10, organization_id=self.project.organization_id, date_released=timezone.now(), ) # The projects can appear out of order. projects = (self.project, self.create_project(name="battlesnake")) SLUGS_TO_PROJECT = {project.slug: project for project in projects} for project in projects: release.add_project(project) deploy = Deploy.objects.create( release=release, organization_id=self.organization.id, environment_id=self.environment.id, ) notification = ReleaseActivityNotification( Activity( project=self.project, user=self.user, type=ActivityType.RELEASE.value, data={"version": release.version, "deploy_id": deploy.id}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert ( text == f"Release {release.version} was deployed to {self.environment.name} for these projects" ) first_project = None for i in range(len(projects)): project = SLUGS_TO_PROJECT[attachment["actions"][i]["text"]] if not first_project: first_project = project assert ( attachment["actions"][i]["url"] == f"http://testserver/organizations/{self.organization.slug}/releases/" f"{release.version}/?project={project.id}&unselectedSeries=Healthy/" ) assert ( attachment["footer"] == f"{first_project.slug} | <http://testserver/settings/account/notifications/" f"deploy/?referrer=release_activity-slack-user|Notification Settings>" )
b9f5a910dc841b85f58d46266ec049ae5a7fd305
14
test_deploy.py
440
ref(models): `ActivityType` (#34978) ## Objective: We want to separate enum logic from Model logic. This breaks a lot of circular dependencies.
18,703
0
573
244
79
90,884
111
sentry
43
tests/sentry/integrations/slack/notifications/test_deploy.py
Python
45
{ "docstring": "\n Test that a Slack message is sent with the expected payload when a deploy happens.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 14 }
https://github.com/getsentry/sentry.git
1
test_option_then_setting
def test_option_then_setting(self): args = [ "base_command", "testlabel", "--option_a=x", "--settings=alternate_settings", ] self._test(args)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
8
tests.py
46
Refs #33476 -- Reformatted code with Black.
51,933
0
83
24
11
207,338
11
django
4
tests/admin_scripts/tests.py
Python
8
{ "docstring": "Options passed before settings are correctly handled.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
6
_pandas_dtype_needs_early_conversion
def _pandas_dtype_needs_early_conversion(pd_dtype): try: from pandas.api.types import ( is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_sparse, ) except ImportError: return False if is_sparse(pd_dtype) or not is_extension_array_dtype(pd_dtype): # Sparse arrays will be converted later in `check_array` # Only handle extension arrays for integer and floats return False elif is_float_dtype(pd_dtype): # Float ndarrays can normally support nans. They need to be converted # first to map pd.NA to np.nan return True elif is_integer_dtype(pd_dtype): # XXX: Warn when converting from a high integer to a float return True return False
ee5a1b69d1dfa99635a10f0a5b54ec263cedf866
9
validation.py
105
DOC, MNT Typos found by codespell (#22906)
75,712
0
224
62
61
259,327
82
scikit-learn
10
sklearn/utils/validation.py
Python
17
{ "docstring": "Return True if pandas extension pd_dtype need to be converted early.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/scikit-learn/scikit-learn.git
14
on_motion
def on_motion(self, etype, me): if self.disabled or me.dispatch_mode == MODE_DONT_DISPATCH: return if me.type_id not in self.motion_filter: return filtered = self.motion_filter[me.type_id] if filtered[0] is self and len(filtered) == 1: return if me.dispatch_mode == MODE_DEFAULT_DISPATCH: last_filtered = filtered[-1] for widget in self.children[:]: if widget.dispatch('on_motion', etype, me): return True if widget is last_filtered: return if me.dispatch_mode == MODE_FILTERED_DISPATCH: widgets = filtered[1:] if filtered[0] is self else filtered[:] for widget in widgets: if widget.dispatch('on_motion', etype, me): return True # # Default event handlers #
1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4
12
widget.py
233
Feature: EventManagerBase (#7658) * Added EventManagerBase class and event_managers attribute to WindowBase class. * Added on_motion event to Widget class. * Updated post_dispatch_input in EventLoopBase to skip non-touch events. * Using type ids in MouseMotionEventProvider. * Added on_motion method to Widget subclasses. * Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile. * Changed motion_filter property in Widget to store key to list values. * Updated Widget.on_motion to not dispatch event to children if widget is disabled. * Widget: Using flags to control dispatching in on_motion method. * Widget: Don't dispatch on_motion to children if only self is registered. * Widget: Removed collision on disabled check from on_motion method. * Widget: Added docstrings for motion_filter and related methods. * EventManager: Moved motion event flags to eventmanager/__init__.py module. * ScreenManager: Overrode the on_motion method. * WindowBase: Using attributes event_managers and event_managers_dict. * WindowBase: Added doc for register_event_manager and unregister_event_manager methods. * Widget: Improved default dispatch to stop after the last registered widgets. * EventManagerBase: Added initial docs class and module. * Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods. * WindowBase: Added docs for event_managers and event_managers_dict attributes. * MotionEvent: Added type_id and flags to push_attrs list. * EventManagerBase: Added versionadded tag on all flags. * EventManagerBase: Use dispatch modes instead of flags.
46,982
0
317
148
43
194,458
80
kivy
18
kivy/uix/widget.py
Python
20
{ "docstring": "Called when a motion event is received.\n\n :Parameters:\n `etype`: `str`\n Event type, one of \"begin\", \"update\" or \"end\"\n `me`: :class:`~kivy.input.motionevent.MotionEvent`\n Received motion event\n :Returns: `bool`\n `True` to stop event dispatching\n\n .. versionadded:: 2.1.0\n\n .. warning::\n This is an experimental method and it remains so while this warning\n is present as it can be changed or removed in the next versions of\n Kivy.\n ", "language": "en", "n_whitespaces": 193, "n_words": 62, "vocab_size": 53 }
https://github.com/kivy/kivy.git
9
call_ef
def call_ef(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="ef", description=, ) parser.add_argument( "-vs", "--value-short", dest="short_allocation", help="Amount to allocate to portfolio in short positions", type=float, default=self.params["short_allocation"] if "short_allocation" in self.params else 0.0, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-n") parser.add_argument( "-n", "--number-portfolios", default=self.params["amount_portfolios"] if "amount_portfolios" in self.params else 100, type=check_non_negative, dest="amount_portfolios", help="Number of portfolios to simulate", ) parser.add_argument( "-se", "--seed", default=self.params["random_seed"] if "random_seed" in self.params else 123, type=check_non_negative, dest="random_seed", help="Seed used to generate random portfolios", ) parser.add_argument( "-t", "--tangency", action="store_true", dest="tangency", default=self.params["tangency"] if "tangency" in self.params else False, help="Adds the optimal line with the risk-free asset", ) parser.add_argument( "--no_plot", action="store_false", dest="plot_tickers", default=True, help="Whether or not to plot the tickers for the assets provided", ) parser = self.po_parser( parser, rm=True, mt=True, p=True, s=True, e=True, lr=True, freq=True, mn=True, th=True, r=True, a=True, v=True, ) ns_parser = self.parse_known_args_and_warn(parser, other_args) if ns_parser: if len(self.tickers) < 2: console.print( "Please have at least 2 loaded tickers to calculate weights.\n" ) return optimizer_view.display_ef( symbols=self.tickers, interval=ns_parser.historic_period, start_date=ns_parser.start_period, end_date=ns_parser.end_period, log_returns=ns_parser.log_returns, freq=ns_parser.return_frequency, maxnan=ns_parser.max_nan, threshold=ns_parser.threshold_value, method=ns_parser.nan_fill_method, risk_measure=ns_parser.risk_measure.lower(), risk_free_rate=ns_parser.risk_free, alpha=ns_parser.significance_level, value=ns_parser.long_allocation, value_short=ns_parser.short_allocation, n_portfolios=ns_parser.amount_portfolios, seed=ns_parser.random_seed, tangency=ns_parser.tangency, plot_tickers=ns_parser.plot_tickers, )
83377868b25b91b608f04cbda9383f838dff74cd
13
po_controller.py
668
PO_controller refactoring and tests (#2725) * Adding tests * Added tests to po_controller * Refactored po controller * fix custom_resets * fix load * add disable timeout * pylint Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: montezdesousa <[email protected]>
85,488
0
1,230
436
133
285,988
176
OpenBBTerminal
73
openbb_terminal/portfolio/portfolio_optimization/po_controller.py
Python
95
{ "docstring": "Process ef commandThis function plots random portfolios based on their\n risk and returns and shows the efficient frontier.", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 17 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
is_pinned
def is_pinned(self) -> bool: specifiers = self.specifier return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}
f3166e673fe8d40277b804d35d77dcdb760fc3b3
12
req_install.py
63
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,159
0
37
36
16
19,953
16
pipenv
9
pipenv/patched/notpip/_internal/req/req_install.py
Python
7
{ "docstring": "Return whether I am pinned to an exact version.\n\n For example, some-package==1.2 is pinned; some-package>1.2 is not.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 16 }
https://github.com/pypa/pipenv.git
1
_generate_legacy_events_context_id_query
def _generate_legacy_events_context_id_query() -> Select: # This can be removed once we no longer have event_ids in the states table return ( select( *EVENT_COLUMNS, literal(value=None, type_=sqlalchemy.String).label("shared_data"), States.state, States.entity_id, States.attributes, StateAttributes.shared_attrs, ) .outerjoin(States, (Events.event_id == States.event_id)) .where(States.last_updated == States.last_changed) .where(_not_continuous_entity_matcher()) .outerjoin( StateAttributes, (States.attributes_id == StateAttributes.attributes_id) ) )
26177bd080b4eb6d11cfd9fbdd158be36f4983d4
22
__init__.py
151
Convert logbook to use lambda_stmt (#71624)
99,183
0
183
98
41
300,319
45
core
24
homeassistant/components/logbook/__init__.py
Python
18
{ "docstring": "Generate a legacy events context id query that also joins states.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git
1
test_poisson_glmnet
def test_poisson_glmnet(): # library("glmnet") # options(digits=10) # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2)) # x <- data.matrix(df[,c("a", "b")]) # y <- df$y # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson", # standardize=F, thresh=1e-10, nlambda=10000) # coef(fit, s=1) # (Intercept) -0.12889386979 # a 0.29019207995 # b 0.03741173122 X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T y = np.array([0, 1, 1, 2]) glm = PoissonRegressor( alpha=1, fit_intercept=True, tol=1e-7, max_iter=300, ) glm.fit(X, y) assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5) assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
12
test_glm.py
170
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
75,787
0
199
123
58
259,457
78
scikit-learn
17
sklearn/linear_model/_glm/tests/test_glm.py
Python
12
{ "docstring": "Compare Poisson regression with L2 regularization and LogLink to glmnet", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/scikit-learn/scikit-learn.git
2
detect_reporter
def _detect_reporter(**kwargs) -> TuneReporterBase: if IS_NOTEBOOK: kwargs.setdefault("overwrite", not has_verbosity(Verbosity.V2_TRIAL_NORM)) progress_reporter = JupyterNotebookReporter(**kwargs) else: progress_reporter = CLIReporter(**kwargs) return progress_reporter
eb69c1ca286a2eec594f02ddaf546657a8127afd
13
progress_reporter.py
78
[air] Add annotation for Tune module. (#27060) Co-authored-by: Kai Fricke <[email protected]>
28,098
0
51
44
15
126,228
18
ray
11
python/ray/tune/progress_reporter.py
Python
14
{ "docstring": "Detect progress reporter class.\n\n Will return a :class:`JupyterNotebookReporter` if a IPython/Jupyter-like\n session was detected, and a :class:`CLIReporter` otherwise.\n\n Keyword arguments are passed on to the reporter class.\n ", "language": "en", "n_whitespaces": 39, "n_words": 27, "vocab_size": 23 }
https://github.com/ray-project/ray.git
5
_w_intercept_raw
def _w_intercept_raw(self, coef, X): if not self.base_loss.is_multiclass: if self.fit_intercept: intercept = coef[-1] weights = coef[:-1] else: intercept = 0.0 weights = coef raw_prediction = X @ weights + intercept else: # reshape to (n_classes, n_dof) if coef.ndim == 1: weights = coef.reshape((self.base_loss.n_classes, -1), order="F") else: weights = coef if self.fit_intercept: intercept = weights[:, -1] weights = weights[:, :-1] else: intercept = 0.0 raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous return weights, intercept, raw_prediction
d8d5637cfe372dd353dfc9f79dbb63c3189a9ecc
16
_linear_loss.py
216
ENH Loss module LogisticRegression (#21808) * ENH replace loss in linear logistic regression * MNT remove logistic regression's own loss functions * CLN remove comment * DOC add whatsnew * DOC more precise whatsnew * CLN restore improvements of #19571 * ENH improve fit time by separating mat-vec in multiclass * DOC update whatsnew * not only a bit ;-) * DOC note memory benefit for multiclass case * trigger CI * trigger CI * CLN rename variable to hess_prod * DOC address reviewer comments * CLN remove C/F for 1d arrays * CLN rename to gradient_per_sample * CLN rename alpha to l2_reg_strength * ENH respect F-contiguity * TST fix sag tests * CLN rename to LinearModelLoss * CLN improve comments according to review * CLN liblinear comment * TST add / move test to test_linear_loss.py * CLN comment placement * trigger CI * CLN add comment about contiguity of raw_prediction * DEBUG debian-32 * DEBUG test only linear_model module * Revert "DEBUG test only linear_model module" This reverts commit 9d6e6987ff4fbcd32fc9a07944b260688162e14b. * DEBUG test -k LogisticRegression * Revert "DEBUG test -k LogisticRegression" This reverts commit c20316704185da400857b0a3f32935ee1b56c8d9. * Revert "DEBUG debian-32" This reverts commit ef0b98f23251d1b2b0bd8801e456f258392a8d18. * DEBUG set n_jobs=1 * Revert "DEBUG set n_jobs=1" This reverts commit c7f6f72a8c1ee21299786130e097df248fc1a0fb. * CLN always use n_threads=1 * CLN address review * ENH avoid array copy * CLN simplify L2 norm * CLN rename w to weights * CLN rename to hessian_sum and hx_sum * CLN address review * CLN rename to init arg and attribute to base_loss * CLN apply review suggestion Co-authored-by: Alexandre Gramfort <[email protected]> * CLN base_loss instead of _loss attribute Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Alexandre Gramfort <[email protected]>
75,421
0
340
136
41
258,777
77
scikit-learn
15
sklearn/linear_model/_linear_loss.py
Python
21
{ "docstring": "Helper function to get coefficients, intercept and raw_prediction.\n\n Parameters\n ----------\n coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)\n Coefficients of a linear model.\n If shape (n_classes * n_dof,), the classes of one feature are contiguous,\n i.e. one reconstructs the 2d-array via\n coef.reshape((n_classes, -1), order=\"F\").\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n Returns\n -------\n weights : ndarray of shape (n_features,) or (n_classes, n_features)\n Coefficients without intercept term.\n intercept : float or ndarray of shape (n_classes,)\n Intercept terms.\n raw_prediction : ndarray of shape (n_samples,) or \\\n (n_samples, n_classes)\n ", "language": "en", "n_whitespaces": 252, "n_words": 94, "vocab_size": 63 }
https://github.com/scikit-learn/scikit-learn.git
3
_make_class_weight_map_fn
def _make_class_weight_map_fn(class_weight): class_ids = list(sorted(class_weight.keys())) expected_class_ids = list(range(len(class_ids))) if class_ids != expected_class_ids: error_msg = ( "Expected `class_weight` to be a dict with keys from 0 to one less " "than the number of classes, found {}" ).format(class_weight) raise ValueError(error_msg) class_weight_tensor = tf.convert_to_tensor( [class_weight[int(c)] for c in class_ids] )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
13
data_adapter.py
123
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,688
0
116
76
43
271,103
48
keras
17
keras/engine/data_adapter.py
Python
14
{ "docstring": "Applies class weighting to a `Dataset`.\n\n The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where\n `y` must be a single `Tensor`.\n\n Args:\n class_weight: A map where the keys are integer class ids and values are\n the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`\n\n Returns:\n A function that can be used with `tf.data.Dataset.map` to apply class\n weighting.\n ", "language": "en", "n_whitespaces": 101, "n_words": 64, "vocab_size": 51 }
https://github.com/keras-team/keras.git
2
convert_continuos_f0
def convert_continuos_f0(f0): # get uv information as binary f0 = np.copy(f0) uv = np.float32(f0 != 0) # get start and end of f0 if (f0 == 0).all(): print("| all of the f0 values are 0.") return uv, f0 start_f0 = f0[f0 != 0][0] end_f0 = f0[f0 != 0][-1] # padding start and end of f0 sequence start_idx = np.where(f0 == start_f0)[0][0] end_idx = np.where(f0 == end_f0)[0][-1] f0[:start_idx] = start_f0 f0[end_idx:] = end_f0 # get non-zero frame index nz_frames = np.where(f0 != 0)[0] # perform linear interpolation f = interp1d(nz_frames, f0[nz_frames]) cont_f0 = f(np.arange(0, f0.shape[0])) return uv, cont_f0
7eef3bfde63d03acbd1fc9a15a5e56bef47c0ef7
12
cwt.py
255
Add Diffsinger Module (#2120) * add diffsinger * update README * update README
10,585
0
168
159
58
52,497
97
PaddleHub
19
modules/audio/svs/diffsinger/utils/cwt.py
Python
16
{ "docstring": "CONVERT F0 TO CONTINUOUS F0\n Args:\n f0 (ndarray): original f0 sequence with the shape (T)\n Return:\n (ndarray): continuous f0 with the shape (T)\n ", "language": "en", "n_whitespaces": 46, "n_words": 23, "vocab_size": 15 }
https://github.com/PaddlePaddle/PaddleHub.git
1
test_dates
def test_dates(self, mock_handler): df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': 3.14 } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) assert ret.error_code is None assert len(ret.data) == 2 # is last datetime value of a = 1 assert ret.data[0][1].isoformat() == dt.datetime(2020, 1, 3).isoformat()
5b1cd41a6202873e49c9ec43c770cf7d1f700adb
13
test_executor.py
320
keep datetype from predictor
25,689
0
297
198
57
116,198
81
mindsdb
26
tests/unit/test_executor.py
Python
31
{ "docstring": "\n SELECT a, last(b)\n FROM (\n SELECT res.a, res.b \n FROM pg.tasks as source\n JOIN mindsdb.task_model as res\n ) \n group by 1\n order by a\n ", "language": "en", "n_whitespaces": 133, "n_words": 23, "vocab_size": 19 }
https://github.com/mindsdb/mindsdb.git
2
isfuture
def isfuture(obj): return (hasattr(obj.__class__, '_asyncio_future_blocking') and obj._asyncio_future_blocking is not None)
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
base_futures.py
41
add python 3.10.4 for windows
55,982
0
27
24
10
220,370
10
XX-Net
5
python3.10.4/Lib/asyncio/base_futures.py
Python
3
{ "docstring": "Check for a Future.\n\n This returns True when obj is a Future instance or is advertising\n itself as duck-type compatible by setting _asyncio_future_blocking.\n See comment in Future for more details.\n ", "language": "en", "n_whitespaces": 42, "n_words": 30, "vocab_size": 26 }
https://github.com/XX-net/XX-Net.git
4
density
def density(w, **kwargs): if kwargs: warnings.warn( "Additional keyword arguments are deprecated in version 1.2 and will be" " removed in version 1.4.", FutureWarning, ) if hasattr(w, "toarray"): d = float(w.nnz) / (w.shape[0] * w.shape[1]) else: d = 0 if w is None else float((w != 0).sum()) / w.size return d
5d8a1994620713c2e4226fb8e40fef7e81af1103
17
extmath.py
135
API Deprecate the extra keyword arguments of utils.extmath.density (#24523) Co-authored-by: Meekail Zain <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,700
0
126
82
42
261,230
50
scikit-learn
13
sklearn/utils/extmath.py
Python
12
{ "docstring": "Compute density of a sparse vector.\n\n Parameters\n ----------\n w : array-like\n The sparse vector.\n **kwargs : keyword arguments\n Ignored.\n\n .. deprecated:: 1.2\n ``**kwargs`` were deprecated in version 1.2 and will be removed in\n 1.4.\n\n Returns\n -------\n float\n The density of w, between 0 and 1.\n ", "language": "en", "n_whitespaces": 119, "n_words": 45, "vocab_size": 36 }
https://github.com/scikit-learn/scikit-learn.git
1
get_faces_in_frame
def get_faces_in_frame(self, frame_name): logger.trace("Getting faces for frame_name: '%s'", frame_name) return self._data.get(frame_name, {}).get("faces", [])
5e73437be47f2410439a3c6716de96354e6a0c94
10
alignments.py
61
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
20,639
0
34
36
13
101,219
13
faceswap
7
lib/align/alignments.py
Python
3
{ "docstring": " Obtain the faces from :attr:`data` associated with a given frame_name.\n\n Parameters\n ----------\n frame_name: str\n The frame name to return faces for. This should be the base name of the frame, not the\n full path\n\n Returns\n -------\n list\n The list of face dictionaries that appear within the requested frame_name\n ", "language": "en", "n_whitespaces": 131, "n_words": 48, "vocab_size": 39 }
https://github.com/deepfakes/faceswap.git
1
test_switch_change_alarm_state
async def test_switch_change_alarm_state(hass, utcnow): helper = await setup_test_component(hass, create_security_system_service) await hass.services.async_call( "alarm_control_panel", "alarm_arm_home", {"entity_id": "alarm_control_panel.testdevice"}, blocking=True, ) helper.async_assert_service_values( ServicesTypes.SECURITY_SYSTEM, { CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 0, }, ) await hass.services.async_call( "alarm_control_panel", "alarm_arm_away", {"entity_id": "alarm_control_panel.testdevice"}, blocking=True, ) helper.async_assert_service_values( ServicesTypes.SECURITY_SYSTEM, { CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 1, }, ) await hass.services.async_call( "alarm_control_panel", "alarm_arm_night", {"entity_id": "alarm_control_panel.testdevice"}, blocking=True, ) helper.async_assert_service_values( ServicesTypes.SECURITY_SYSTEM, { CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 2, }, ) await hass.services.async_call( "alarm_control_panel", "alarm_disarm", {"entity_id": "alarm_control_panel.testdevice"}, blocking=True, ) helper.async_assert_service_values( ServicesTypes.SECURITY_SYSTEM, { CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 3, }, )
58b8c30221a6f6e5acbbe98b7e3298b03fb741f5
11
test_alarm_control_panel.py
290
Improve homekit_controller tests (#65266)
110,111
0
363
177
28
311,446
69
core
14
tests/components/homekit_controller/test_alarm_control_panel.py
Python
50
{ "docstring": "Test that we can turn a HomeKit alarm on and off again.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git
3
test_cache_multiple_objects
def test_cache_multiple_objects(self): sites = Site.objects.all() search_backend.cache(sites) content_type = ContentType.objects.get_for_model(Site) self.assertEqual( CachedValue.objects.filter(object_type=content_type).count(), len(SiteIndex.fields) * sites.count() ) for site in sites: for field_name, weight in SiteIndex.fields: self.assertTrue( CachedValue.objects.filter( object_type=content_type, object_id=site.pk, field=field_name, value=getattr(site, field_name), weight=weight ), )
9628dead07ccef9608b32906aa8194bc948e5a09
16
test_search.py
180
Closes #10560: New global search (#10676) * Initial work on new search backend * Clean up search backends * Return only the most relevant result per object * Clear any pre-existing cached entries on cache() * #6003: Implement global search functionality for custom field values * Tweak field weights & document guidance * Extend search() to accept a lookup type * Move get_registry() out of SearchBackend * Enforce object permissions when returning search results * Add indexers for remaining models * Avoid calling remove() on non-cacheable objects * Use new search backend by default * Extend search backend to filter by object type * Clean up search view form * Enable specifying lookup logic * Add indexes for value field * Remove object type selector from search bar * Introduce SearchTable and enable HTMX for results * Enable pagination * Remove legacy search backend * Cleanup * Use a UUID for CachedValue primary key * Refactoring search methods * Define max search results limit * Extend reindex command to support specifying particular models * Add clear() and size to SearchBackend * Optimize bulk caching performance * Highlight matched portion of field value * Performance improvements for reindexing * Started on search tests * Cleanup & docs * Documentation updates * Clean up SearchIndex * Flatten search registry to register by app_label.model_name * Clean up search backend classes * Clean up RestrictedGenericForeignKey and RestrictedPrefetch * Resolve migrations conflict
78,238
0
299
116
30
265,901
34
netbox
28
netbox/netbox/tests/test_search.py
Python
19
{ "docstring": "\n Test that multiples objects are cached appropriately\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/netbox-community/netbox.git
4
convert
def convert(gr, raw_node): type, value, context, children = raw_node if children or type in gr.number2symbol: # If there's exactly one child, return that child instead of # creating a new node. if len(children) == 1: return children[0] return Node(type, children, context=context) else: return Leaf(type, value, context=context)
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
pytree.py
97
add python 3.10.4 for windows
55,497
0
104
63
38
218,844
46
XX-Net
11
python3.10.4/Lib/lib2to3/pytree.py
Python
8
{ "docstring": "\n Convert raw node information to a Node or Leaf instance.\n\n This is passed to the parser driver which calls it whenever a reduction of a\n grammar rule produces a new complete node, so that the tree is build\n strictly bottom-up.\n ", "language": "en", "n_whitespaces": 56, "n_words": 40, "vocab_size": 34 }
https://github.com/XX-net/XX-Net.git
2
has_lib
def has_lib(self): return (self.distribution.has_pure_modules() or self.distribution.has_ext_modules())
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
install.py
42
add python 3.10.4 for windows
56,722
0
35
24
6
222,741
6
XX-Net
5
python3.10.4/Lib/distutils/command/install.py
Python
3
{ "docstring": "Returns true if the current distribution has any Python\n modules to install.", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
https://github.com/XX-net/XX-Net.git
1
get_all_providers
def get_all_providers() -> List[str]: from setup import PROVIDERS_REQUIREMENTS return list(PROVIDERS_REQUIREMENTS.keys())
3ed07474649b1e202f9b106105fef21f7b2cfddc
9
verify_providers.py
41
Seperate provider verification as standalone breeze command (#23454) This is another step in simplifying and converting to Python all of the CI/local development tooling. This PR separates out verification of providers as a separate breeze command `verify-provider-packages`. It was previously part of "prepare_provider_packages.py" but it has been now extracted to a separate in-container python file and it was wrapped with breeze's `verify-provider-packages` command. No longer provider verification is run with "preparing provider docs" nor "preparing provider packages" - it's a standaline command. This command is also used in CI now to run the tests: * all provider packages are built and created on CI together with   airflow version * the packages are installed inside the CI image and providers are verified * the 2.1 version of Airflow is installed together with all 2.1 - compatible providers and provider verification is run there too. This all is much simpler now - we got rediof some 500 lines of bash code again in favour of breeze python code. Fixes: #23430
9,376
0
19
23
10
48,147
10
airflow
7
scripts/in_container/verify_providers.py
Python
7
{ "docstring": "\n Returns all providers for regular packages.\n :return: list of providers that are considered for provider packages\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
https://github.com/apache/airflow.git
1
test_orderby_percentile_with_many_fields_transactions_unsupported_fields
def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self): response = self.get_response( self.organization.slug, field=[ "p50(sentry.transactions.measurements.lcp)", "sum(user_misery)", ], statsPeriod="1h", interval="1h", datasource="snuba", groupBy=["project_id", "transaction"], orderBy="p50(sentry.transactions.measurements.lcp)", ) assert response.status_code == 400 assert ( response.json()["detail"] == "Multi-field select order by queries is not supported for metric user_misery" )
9af098891a8243d08ee5ab6e51925a082135e3f2
11
test_organization_metrics.py
123
feat(metrics): Support multi-field orderby for performance [INGEST-805] (#31162) * feat(metrics): Support metrics multi-field orderby queries Adds support for the performance table to the metrics organization data endpoint
19,226
0
219
71
34
95,663
37
sentry
14
tests/sentry/api/endpoints/test_organization_metrics.py
Python
18
{ "docstring": "\n Test that contains a field in the `select` that is performance related but currently\n not supported should return a 400\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
https://github.com/getsentry/sentry.git
3
get_result
def get_result(self) -> Any: if not self._complete.is_set(): logger.warning("Aborting attempt to retrieve result from a LongRunningTask that is " "still running") return None if self.err: logger.debug("Error caught in thread") self._config.set_cursor_default(widget=self._widget) raise self.err[1].with_traceback(self.err[2]) logger.debug("Getting result from thread") retval = self._queue.get() logger.debug("Got result from thread") self._config.set_cursor_default(widget=self._widget) return retval
dc18c74eea0c7837a820d27628cb12b0824fa30e
11
utils.py
177
Bugfix: Preview for extract in batch mode
20,914
0
182
102
35
101,502
45
faceswap
17
lib/gui/utils.py
Python
23
{ "docstring": " Return the result from the given task.\n\n Returns\n -------\n varies:\n The result of the thread will depend on the given task. If a call is made to\n :func:`get_result` prior to the thread completing its task then ``None`` will be\n returned\n ", "language": "en", "n_whitespaces": 102, "n_words": 40, "vocab_size": 30 }
https://github.com/deepfakes/faceswap.git
1
get_snap_revisions
def get_snap_revisions(snap, version): print('Getting revision numbers for', snap, version) cmd = ['snapcraft', 'status', snap] process = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, universal_newlines=True) pattern = f'^\s+candidate\s+{version}\s+(\d+)\s*' revisions = re.findall(pattern, process.stdout, re.MULTILINE) assert len(revisions) == SNAP_ARCH_COUNT, f'Unexpected number of snaps found for {snap} {version} (expected {SNAP_ARCH_COUNT}, found {len(revisions)})' return revisions
39e8d14e1b221bf37526cc05ecc83beee30a3c57
10
finish_release.py
148
Set up 2.0 pre-releases (#9400) * update credential info * update release tooling to use candidate channel * split deploy jobs * pass parameter through * add 2.0 pipeline prerelease * add comments * quote file path
45,646
0
70
79
41
186,893
46
certbot
19
tools/finish_release.py
Python
8
{ "docstring": "Finds the revisions for the snap and version in the candidate channel.\n\n If you call this function without being logged in with snapcraft, it\n will hang with no output.\n\n :param str snap: the name of the snap on the snap store\n :param str version: snap version number, e.g. 1.7.0\n\n :returns: list of revision numbers\n :rtype: `list` of `str`\n\n :raises subprocess.CalledProcessError: if the snapcraft command\n fails\n\n :raises AssertionError: if the expected snaps are not found\n\n ", "language": "en", "n_whitespaces": 108, "n_words": 74, "vocab_size": 55 }
https://github.com/certbot/certbot.git
2
dup_zz_mignotte_bound
def dup_zz_mignotte_bound(f, K): from sympy.functions.combinatorial.factorials import binomial d = dup_degree(f) delta = _ceil(d / 2) delta2 = _ceil(delta / 2) # euclidean-norm eucl_norm = K.sqrt( sum( [cf**2 for cf in f] ) ) # biggest values of binomial coefficients (p. 538 of reference) t1 = binomial(delta - 1, delta2) t2 = binomial(delta - 1, delta2 - 1) lc = K.abs(dup_LC(f, K)) # leading coefficient bound = t1 * eucl_norm + t2 * lc # (p. 538 of reference) bound += dup_max_norm(f, K) # add max coeff for irreducible polys bound = _ceil(bound / 2) * 2 # round up to even integer return bound
e0dc14eca132f37c5f49369eb4051eae37c9b119
12
factortools.py
197
Refactored import ordering in functions
48,311
0
155
123
65
197,054
104
sympy
24
sympy/polys/factortools.py
Python
13
{ "docstring": "\n The Knuth-Cohen variant of Mignotte bound for\n univariate polynomials in `K[x]`.\n\n Examples\n ========\n\n >>> from sympy.polys import ring, ZZ\n >>> R, x = ring(\"x\", ZZ)\n\n >>> f = x**3 + 14*x**2 + 56*x + 64\n >>> R.dup_zz_mignotte_bound(f)\n 152\n\n By checking `factor(f)` we can see that max coeff is 8\n\n Also consider a case that `f` is irreducible for example `f = 2*x**2 + 3*x + 4`\n To avoid a bug for these cases, we return the bound plus the max coefficient of `f`\n\n >>> f = 2*x**2 + 3*x + 4\n >>> R.dup_zz_mignotte_bound(f)\n 6\n\n Lastly,To see the difference between the new and the old Mignotte bound\n consider the irreducible polynomial::\n\n >>> f = 87*x**7 + 4*x**6 + 80*x**5 + 17*x**4 + 9*x**3 + 12*x**2 + 49*x + 26\n >>> R.dup_zz_mignotte_bound(f)\n 744\n\n The new Mignotte bound is 744 whereas the old one (SymPy 1.5.1) is 1937664.\n\n\n References\n ==========\n\n ..[1] [Abbott2013]_\n\n ", "language": "en", "n_whitespaces": 225, "n_words": 149, "vocab_size": 90 }
https://github.com/sympy/sympy.git
1
require_accelerate
def require_accelerate(test_case): return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
2fbb237967f5d1b2eb65c2131954f23a24bd29ef
10
testing_utils.py
37
Add the auto_find_batch_size capability from Accelerate into Trainer (#17068) Co-authored-by: Sylvain Gugger <[email protected]> - Adds auto_batch_size finder - Moves training loop to an inner training loop
6,873
0
13
20
7
37,824
7
transformers
5
src/transformers/testing_utils.py
Python
2
{ "docstring": "\n Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.\n ", "language": "en", "n_whitespaces": 22, "n_words": 15, "vocab_size": 15 }
https://github.com/huggingface/transformers.git
3
get_strategy_run_id
def get_strategy_run_id(strategy) -> str: digest = hashlib.sha1() config = deepcopy(strategy.config) # Options that have no impact on results of individual backtest. not_important_keys = ('strategy_list', 'original_config', 'telegram', 'api_server') for k in not_important_keys: if k in config: del config[k] digest.update(rapidjson.dumps(config, default=str, number_mode=rapidjson.NM_NATIVE).encode('utf-8')) with open(strategy.__file__, 'rb') as fp: digest.update(fp.read()) return digest.hexdigest().lower()
16861db653ec8166f73fc8480894f186a137e7bd
13
misc.py
184
Implement previous backtest result reuse when config and strategy did not change.
34,273
0
133
107
44
148,500
48
freqtrade
23
freqtrade/misc.py
Python
18
{ "docstring": "\n Generate unique identification hash for a backtest run. Identical config and strategy file will\n always return an identical hash.\n :param strategy: strategy object.\n :return: hex string id.\n ", "language": "en", "n_whitespaces": 43, "n_words": 27, "vocab_size": 26 }
https://github.com/freqtrade/freqtrade.git
10
fit
def fit(self, X, y=None): self._validate_params() dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float if dtype == bool and X.dtype != bool: msg = ( "Data will be converted to boolean for" f" metric {self.metric}, to avoid this warning," " you may convert the data prior to calling fit." ) warnings.warn(msg, DataConversionWarning) X = self._validate_data(X, dtype=dtype, accept_sparse="csr") if self.metric == "precomputed" and issparse(X): with warnings.catch_warnings(): warnings.simplefilter("ignore", SparseEfficiencyWarning) # Set each diagonal to an explicit value so each point is its # own neighbor X.setdiag(X.diagonal()) memory = check_memory(self.memory) ( self.ordering_, self.core_distances_, self.reachability_, self.predecessor_, ) = memory.cache(compute_optics_graph)( X=X, min_samples=self.min_samples, algorithm=self.algorithm, leaf_size=self.leaf_size, metric=self.metric, metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs, max_eps=self.max_eps, ) # Extract clusters from the calculated orders and reachability if self.cluster_method == "xi": labels_, clusters_ = cluster_optics_xi( reachability=self.reachability_, predecessor=self.predecessor_, ordering=self.ordering_, min_samples=self.min_samples, min_cluster_size=self.min_cluster_size, xi=self.xi, predecessor_correction=self.predecessor_correction, ) self.cluster_hierarchy_ = clusters_ elif self.cluster_method == "dbscan": if self.eps is None: eps = self.max_eps else: eps = self.eps if eps > self.max_eps: raise ValueError( "Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps) ) labels_ = cluster_optics_dbscan( reachability=self.reachability_, core_distances=self.core_distances_, ordering=self.ordering_, eps=eps, ) self.labels_ = labels_ return self
ed3172b6c5c733906ce3aa2316c4ad60b5c26d10
15
_optics.py
528
MAINT Parameters validation for OPTICS (#24326) Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,498
0
908
336
132
260,794
179
scikit-learn
52
sklearn/cluster/_optics.py
Python
60
{ "docstring": "Perform OPTICS clustering.\n\n Extracts an ordered list of points and reachability distances, and\n performs initial clustering using ``max_eps`` distance specified at\n OPTICS object instantiation.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \\\n (n_samples, n_samples) if metric='precomputed'\n A feature array, or array of distances between samples if\n metric='precomputed'. If a sparse matrix is provided, it will be\n converted into CSR format.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns a fitted instance of self.\n ", "language": "en", "n_whitespaces": 234, "n_words": 87, "vocab_size": 73 }
https://github.com/scikit-learn/scikit-learn.git
1
_filter_boxes
def _filter_boxes(self, patch, boxes): center = boxes.centers.numpy() mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( center[:, 0] < patch[2]) * ( center[:, 1] < patch[3]) return mask
af063a6f25ddae4de90646f86b2db824f3d00138
13
transforms.py
117
[Refactor] Refactor pipelines with boxlist. (#8562) * Refactor pipelines and data_preprocesser by boxlist * Refactor browse_dataset.py * Update * Update * Update * Update * update * Update * Change with_box_wrapped to with_boxlist * Fix comments * Fix commits * Update UT
70,808
0
86
78
21
245,502
32
mmdetection
8
mmdet/datasets/transforms/transforms.py
Python
6
{ "docstring": "Check whether the center of each box is in the patch.\n\n Args:\n patch (list[int]): The cropped area, [left, top, right, bottom].\n boxes (numpy array, (N x 4)): Ground truth boxes.\n\n Returns:\n mask (numpy array, (N,)): Each box is inside or outside the patch.\n ", "language": "en", "n_whitespaces": 97, "n_words": 43, "vocab_size": 36 }
https://github.com/open-mmlab/mmdetection.git
3
_normalize_index
def _normalize_index(index, axis_size): if issubdtype(_dtype(index), np.unsignedinteger): return index if core.is_constant_dim(axis_size): axis_size_val = _lax_const(index, axis_size) else: axis_size_val = lax.convert_element_type(core.dimension_as_value(axis_size), _dtype(index)) return lax.select( lax.lt(index, _lax_const(index, 0)), lax.add(index, axis_size_val), index) TAKE_ALONG_AXIS_DOC = @_wraps(np.take_along_axis, update_doc=False, lax_description=TAKE_ALONG_AXIS_DOC) @partial(jit, static_argnames=('axis', 'mode'))
92ca76a0395ad32423e681b6d6ce6d84c361852b
@_wraps(np.take_along_axis, update_doc=False, lax_description=TAKE_ALONG_AXIS_DOC) @partial(jit, static_argnames=('axis', 'mode'))
13
lax_numpy.py
188
Skip normalization of unsigned indices
26,806
1
107
89
29
120,261
35
jax
25
jax/_src/numpy/lax_numpy.py
Python
12
{ "docstring": "Normalizes an index value in the range [-N, N) to the range [0, N).\nUnlike :func:`numpy.take_along_axis`, :func:`jax.numpy.take_along_axis` takes\nan optional ``mode`` parameter controlling how out-of-bounds indices should be\nhandled. By default, out-of-bounds indices are clamped into range. In a future\nchange, out-of-bounds indices will return invalid (e.g., ``NaN``) values\ninstead. See :attr:`jax.numpy.ndarray.at` for more discussion\nof out-of-bounds indexing in JAX.\n", "language": "en", "n_whitespaces": 53, "n_words": 60, "vocab_size": 51 }
https://github.com/google/jax.git
2
state_checkpoint_interval
def state_checkpoint_interval(self) -> Optional[int]: if self._is_finished: return 1 return 100
27b5ba338656b9adbfc8ebd90960a200a14d5935
7
streams.py
34
:tada: Source Looker: Migrate to native CDK (#9609)
494
0
42
20
9
3,604
10
airbyte
5
airbyte-integrations/connectors/source-looker/source_looker/streams.py
Python
5
{ "docstring": "this is a workaround: the Airbyte CDK forces for save the latest state after reading of all records", "language": "en", "n_whitespaces": 17, "n_words": 18, "vocab_size": 17 }
https://github.com/airbytehq/airbyte.git
2
timezone_tag
def timezone_tag(parser, token): bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument (timezone)" % bits[0]) tz = parser.compile_filter(bits[1]) nodelist = parser.parse(("endtimezone",)) parser.delete_first_token() return TimezoneNode(nodelist, tz) @register.tag("get_current_timezone")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.tag("get_current_timezone")
11
tz.py
126
Refs #33476 -- Reformatted code with Black.
51,491
1
56
67
27
206,336
29
django
15
django/templatetags/tz.py
Python
8
{ "docstring": "\n Enable a given time zone just for this block.\n\n The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a\n time zone name, or ``None``. If it is ``None``, the default time zone is\n used within the block.\n\n Sample usage::\n\n {% timezone \"Europe/Paris\" %}\n It is {{ now }} in Paris.\n {% endtimezone %}\n ", "language": "en", "n_whitespaces": 99, "n_words": 55, "vocab_size": 43 }
https://github.com/django/django.git
18
update
def update(self) -> None: with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning("Origin stop ID %s not found", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( "Destination stop ID %s not found", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = self._departure["departure_time"].replace( tzinfo=dt_util.UTC ) # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure["trip_id"] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug("Fetching trip details for %s", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure["route_id"] if not self._route or self._route.route_id != route_id: _LOGGER.debug("Fetching route details for %s", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug("Fetching agency details for %s", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( "Agency ID '%s' was not found in agency table, " "you may want to update the routes database table " "to fix this missing reference", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._agency: self._attr_attribution = self._agency.agency_name else: self._attr_attribution = None if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} " f"{self.origin} to {self.destination} next departure" ) if not self._departure: name = f"{DEFAULT_NAME}" self._name = self._custom_name or name
c717fd19de01fc822d146cc5e353959dfa86d5f7
17
sensor.py
727
Move attribution to standalone attribute [e-g] (#80513)
88,424
0
1,402
420
141
289,281
259
core
46
homeassistant/components/gtfs/sensor.py
Python
72
{ "docstring": "Get the latest data from GTFS and update the states.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/home-assistant/core.git
3
file_exists
def file_exists(self, filename): if not self.hass.config.is_allowed_path(filename): _LOGGER.warning("Path not allowed: %s", filename) return False if not os.path.isfile(filename): _LOGGER.warning("Not a file: %s", filename) return False return True
aa57907c1878b5d7bdaf258e0b135c189abb8f27
10
notify.py
91
Improve logging for Discord integration (#68160)
92,734
0
97
54
17
293,677
25
core
11
homeassistant/components/discord/notify.py
Python
8
{ "docstring": "Check if a file exists on disk and is in authorized path.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git
7
get
def get(self): response = { 'learn': False, 'predict': False, 'analyse': False } if os.name != 'posix': return response for process_type in response: processes_dir = Path(tempfile.gettempdir()).joinpath(f'mindsdb/processes/{process_type}/') if not processes_dir.is_dir(): continue process_marks = [x.name for x in processes_dir.iterdir()] for p_mark in process_marks: pid = int(p_mark.split('-')[0]) try: psutil.Process(pid) except Exception: processes_dir.joinpath(p_mark).unlink() else: response[process_type] = True return response @ns_conf.route('/telemetry')
44d7ef0e08e5144870ad2831ce6e221f9044c47c
@ns_conf.route('/telemetry')
16
util.py
230
'files' route
25,182
1
316
125
42
114,397
55
mindsdb
25
mindsdb/api/http/namespaces/util.py
Python
22
{ "docstring": " Checks server use native for learn or analyse.\n Will return right result only on Linux.\n ", "language": "en", "n_whitespaces": 34, "n_words": 15, "vocab_size": 15 }
https://github.com/mindsdb/mindsdb.git
2
get_task_type_filter_choices
def get_task_type_filter_choices(self): task_type_choices = [ (model, model.get_verbose_name()) for model in self.task_models ] task_type_choices.sort(key=lambda task_type: task_type[1].lower()) return task_type_choices
284380672da91b5f1d5ea35e01b5b86d7a534dbb
12
workflows.py
73
add another helper method get_task_type_filter_choices
15,532
0
74
45
16
70,612
17
wagtail
10
wagtail/admin/views/workflows.py
Python
7
{ "docstring": "\n To be called after dispatch(); returns the list of task type choices for filter on \"existing task\" tab\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 18 }
https://github.com/wagtail/wagtail.git
1
test_workflow_action_via_edit_view
def test_workflow_action_via_edit_view(self): # Post self.client.post( reverse("wagtailadmin_pages:edit", args=(self.page.id,)), { "title": "This title was edited while approving", "slug": str(self.page.slug), "content": str(self.page.content), "action-workflow-action": "True", "workflow-action-name": "approve", "workflow-action-extra-data": '{"comment": "my comment"}', }, ) # Check that the workflow was approved workflow_state = WorkflowState.objects.get( page=self.page, requested_by=self.submitter ) self.assertEqual(workflow_state.status, workflow_state.STATUS_APPROVED) # Check that the task was approved task_state = workflow_state.current_task_state self.assertEqual(task_state.status, task_state.STATUS_APPROVED) # Check that the comment was added to the task state correctly self.assertEqual(task_state.comment, "my comment") # Check that page edits made at the same time as the action have been saved page = Page.objects.get(id=self.page.id) self.assertEqual( page.get_latest_revision_as_page().title, "This title was edited while approving", )
d10f15e55806c6944827d801cd9c2d53f5da4186
13
test_workflows.py
271
Reformat with black
15,861
0
374
160
67
72,248
99
wagtail
26
wagtail/admin/tests/test_workflows.py
Python
24
{ "docstring": "\n Posting to the 'edit' view with 'action-workflow-action' set should perform the given workflow action in addition to updating page content\n ", "language": "en", "n_whitespaces": 35, "n_words": 20, "vocab_size": 18 }
https://github.com/wagtail/wagtail.git
1
test_zeroVector
def test_zeroVector(self): self.assertTrue(zeroVector(10).__str__().count("0") == 10)
f0af0c43340763724f139fa68aa1e5a9ffe458b4
14
tests.py
48
refactor: clean code Signed-off-by: slowy07 <[email protected]>
4,413
0
19
26
5
22,685
5
Python
6
linear-algebra-python/src/tests.py
Python
2
{ "docstring": "\n test for the global function zeroVector(...)\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/geekcomputers/Python.git
1
test_user_can_supply_a_sidecar_container_and_volume
def test_user_can_supply_a_sidecar_container_and_volume(self): manifest = KubernetesJob( command=["echo", "hello"], job={ "apiVersion": "batch/v1", "kind": "Job", "metadata": {"labels": {}}, "spec": { "template": { "spec": { "parallelism": 1, "completions": 1, "restartPolicy": "Never", "containers": [ { "name": "prefect-job", "env": [], }, { "name": "my-sidecar", "image": "cool-peeps/cool-code:latest", "volumeMounts": [ {"name": "data-volume", "mountPath": "/data/"} ], }, ], "volumes": [ {"name": "data-volume", "hostPath": "/all/the/data/"} ], } } }, }, ).build_job() pod = manifest["spec"]["template"]["spec"] assert pod["volumes"] == [{"name": "data-volume", "hostPath": "/all/the/data/"}] # the prefect-job container is still populated assert pod["containers"][0]["name"] == "prefect-job" assert pod["containers"][0]["command"] == ["echo", "hello"] assert pod["containers"][1] == { "name": "my-sidecar", "image": "cool-peeps/cool-code:latest", "volumeMounts": [{"name": "data-volume", "mountPath": "/data/"}], }
daddc2985f0cba6c6e0ae3903232cbca155e7e91
26
test_kubernetes_job.py
422
Port KubernetesFlowRunner tests to KubernetesJob tests
11,673
0
989
218
65
57,498
101
prefect
8
tests/infrastructure/test_kubernetes_job.py
Python
43
{ "docstring": "The user can supply a custom base job that includes more complex\n modifications, like a sidecar container and volumes", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 18 }
https://github.com/PrefectHQ/prefect.git
3
addCondition
def addCondition(self, *fns, **kwargs): for fn in fns: self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), fatal=kwargs.get('fatal', False))) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
15
pyparsing.py
107
upd; format
13,297
0
117
66
19
63,420
20
transferlearning
12
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
Python
6
{ "docstring": "Add a boolean predicate function to expression's list of parse actions. See\n :class:`setParseAction` for function call signatures. Unlike ``setParseAction``,\n functions passed to ``addCondition`` need to return boolean success/fail of the condition.\n\n Optional keyword arguments:\n - message = define a custom message to be used in the raised exception\n - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException\n\n Example::\n\n integer = Word(nums).setParseAction(lambda toks: int(toks[0]))\n year_int = integer.copy()\n year_int.addCondition(lambda toks: toks[0] >= 2000, message=\"Only support years 2000 and later\")\n date_str = year_int + '/' + integer + '/' + integer\n\n result = date_str.parseString(\"1999/12/31\") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)\n ", "language": "en", "n_whitespaces": 219, "n_words": 112, "vocab_size": 82 }
https://github.com/jindongwang/transferlearning.git
4
call_arktrades
def call_arktrades(self, other_args): parser = argparse.ArgumentParser( add_help=False, prog="arktrades", description=, ) parser.add_argument( "-l", "--limit", help="Limit of rows to show", dest="limit", default=10, type=check_positive, ) parser.add_argument( "-s", "--show_ticker", action="store_true", default=False, help="Flag to show ticker in table", dest="show_ticker", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: ark_view.display_ark_trades( ticker=self.ticker, num=ns_parser.limit, export=ns_parser.export, show_ticker=ns_parser.show_ticker, )
42b1acf5c3d2f9e9922ac8c4e956fffc08a5d7ae
11
dd_controller.py
229
Add an API wrapper for terminal functions to be used in python (#1170) * Initial implementation of the API wrapper for stocks/disc * Create a file for helper classes * Move models namespace class where it belongs... TO THE HELPER_CLASSES🚀 * Add backtesting api wrapper * Add ba api wrapper * Add dps api wrapper * Fix flake8 warnings in command help strings * Add comparison analysis models * Add dd api wrapper * Add fa api wrapper * Add gov api wrapper * Add ins api wrapper *note the note * Add options api wrapper *note the note * Add prediction api wrapper * Add api support for importing models from both local and common contexts * Add qa api wrapper * Add screener api wrapper *note the note * Fix typos in sia controller * Add sia api wrapper * Add ta api wrapper * Disable linting for specific lines that fail in CI * Remove deprecated parameter * Add exception handling when prediction packages are not installed
83,747
0
405
143
50
281,416
59
OpenBBTerminal
28
gamestonk_terminal/stocks/due_diligence/dd_controller.py
Python
36
{ "docstring": "Process arktrades command\n Get trades for ticker across all ARK funds.\n ", "language": "en", "n_whitespaces": 37, "n_words": 11, "vocab_size": 11 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
_solve_svd_design_matrix
def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y): w = ((singvals_sq + alpha) ** -1) - (alpha**-1) if self.fit_intercept: # detect intercept column normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) intercept_dim = _find_smallest_angle(normalized_sw, U) # cancel the regularization for the intercept w[intercept_dim] = -(alpha**-1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1) if len(y.shape) != 1: # handle case where y is 2-d G_inverse_diag = G_inverse_diag[:, np.newaxis] return G_inverse_diag, c
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
12
_ridge.py
220
MNT Update black to stable version (#22474)
75,474
0
202
143
57
258,898
76
scikit-learn
25
sklearn/linear_model/_ridge.py
Python
11
{ "docstring": "Compute dual coefficients and diagonal of G^-1.\n\n Used when we have an SVD decomposition of X\n (n_samples > n_features and X is dense).\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 20 }
https://github.com/scikit-learn/scikit-learn.git
3
test_parallel_state_with_long_tag
def test_parallel_state_with_long_tag(state, state_tree): short_command = "helloworld" long_command = short_command * 25 sls_contents = .format( short_command, long_command ) with pytest.helpers.temp_file("issue-49738.sls", sls_contents, state_tree): ret = state.sls( "issue-49738", __pub_jid="1", # Because these run in parallel we need a fake JID ) comments = sorted(x.comment for x in ret) expected = sorted( 'Command "{}" run'.format(x) for x in (short_command, long_command) ) assert comments == expected, "{} != {}".format(comments, expected) @pytest.mark.skip_on_darwin(reason="Test is broken on macosx") @pytest.mark.skip_on_windows( reason=( "Functional testing this on windows raises unicode errors. " "Tested in tests/pytests/integration/modules/state/test_state.py" ) )
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
@pytest.mark.skip_on_darwin(reason="Test is broken on macosx") @pytest.mark.skip_on_windows( reason=( "Functional testing this on windows raises unicode errors. " "Tested in tests/pytests/integration/modules/state/test_state.py" ) )
12
test_state.py
209
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <[email protected]>
54,306
1
184
97
67
215,988
86
salt
22
tests/pytests/functional/modules/state/test_state.py
Python
26
{ "docstring": "\n This tests the case where the state being executed has a long ID dec or\n name and states are being run in parallel. The filenames used for the\n parallel state cache were previously based on the tag for each chunk,\n and longer ID decs or name params can cause the cache file to be longer\n than the operating system's max file name length. To counter this we\n instead generate a SHA1 hash of the chunk's tag to use as the cache\n filename. This test will ensure that long tags don't cause caching\n failures.\n\n See https://github.com/saltstack/salt/issues/49738 for more info.\n \n test_cmd_short:\n cmd.run:\n - name: {}\n - parallel: True\n\n test_cmd_long:\n cmd.run:\n - name: {}\n - parallel: True\n ", "language": "en", "n_whitespaces": 193, "n_words": 114, "vocab_size": 80 }
https://github.com/saltstack/salt.git
2
process_downloaded_dataset
def process_downloaded_dataset(self): makedirs(self.processed_temp_path, exist_ok=True) for dataset in ["training", "testing"]: print(f">>> create ludwig formatted {dataset} data") labels, images = self.read_source_dataset(dataset, self.raw_dataset_path) self.write_output_dataset(labels, images, os.path.join(self.processed_temp_path, dataset)) self.output_training_and_test_data() rename(self.processed_temp_path, self.processed_dataset_path) print(">>> completed data preparation")
89d18365c41c4ded68edd2095349ce4a6caf5d18
12
__init__.py
141
Removes skimage and imageio dependencies replacing imsave with torchvision.utils.save_image (#1694)
901
0
106
84
31
5,992
31
ludwig
18
ludwig/datasets/mnist/__init__.py
Python
9
{ "docstring": "Read the training and test directories and write out a csv containing the training path and the\n label.", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 13 }
https://github.com/ludwig-ai/ludwig.git
5
stack
def stack(self, level=-1, dropna=True): # noqa: PR01, RT01, D200 if not isinstance(self.columns, pandas.MultiIndex) or ( isinstance(self.columns, pandas.MultiIndex) and is_list_like(level) and len(level) == self.columns.nlevels ): return self._reduce_dimension( query_compiler=self._query_compiler.stack(level, dropna) ) else: return self.__constructor__( query_compiler=self._query_compiler.stack(level, dropna) )
b541b6c18e6fb4515e998b9b4f88528490cf69c6
14
dataframe.py
148
REFACTOR-#3948: Use `__constructor__` in `DataFrame` and `Series` classes (#5485) Signed-off-by: Anatoly Myachev <[email protected]>
36,398
0
171
95
28
155,483
35
modin
15
modin/pandas/dataframe.py
Python
13
{ "docstring": "\n Stack the prescribed level(s) from columns to index.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/modin-project/modin.git
1
set_seed
def set_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)
5fdb54ece78b5d277fe26a3865beca8da0430495
8
igf.py
59
Add Information Gain Filtration algorithm (#16953) * Add information gain filtration algorithm * Complying with black requirements * Added author * Fixed import order * flake8 corrections Co-authored-by: Javier Turek <[email protected]>
6,997
0
21
34
6
38,629
6
transformers
8
examples/research_projects/information-gain-filtration/igf/igf.py
Python
5
{ "docstring": "\n For reproducible training\n\n Args:\n seed: A seed for reproducible training\n\n ", "language": "en", "n_whitespaces": 27, "n_words": 10, "vocab_size": 8 }
https://github.com/huggingface/transformers.git
11
test_step
def test_step(self, data): data = data_adapter.expand_1d(data) x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data) # These next two lines differ from the base method - they avoid issues when the labels are in # the input dict (and loss is computed internally) if y is None and "labels" in x: y = x["labels"] # Stops confusion with metric computations elif y is None and "input_ids" in x: # Just make any kind of dummy array to make loss work y = tf.zeros(tf.shape(x["input_ids"])[0], dtype=tf.int64) y_pred = self(x, training=False) self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) # Updates stateful loss metrics. if isinstance(y_pred, TFSeq2SeqLMOutput) and isinstance(y, tf.Tensor): y_pred = y_pred["logits"] self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result # These next two lines are also not in the base method - they correct the displayed metrics # when we're using a dummy loss, to avoid a bogus "loss_loss" value being shown. if "loss" in return_metrics and "loss_loss" in return_metrics: del return_metrics["loss_loss"] return return_metrics
44eaa2b3036c5c9a83ed781e08e3dc50aae193a9
16
modeling_tf_utils.py
323
Update TF test_step to match train_step (#15111) * Update TF test_step to match train_step * Update compile() warning to be clearer about what to pass
6,190
0
425
196
108
34,046
173
transformers
31
src/transformers/modeling_tf_utils.py
Python
22
{ "docstring": "\n A modification of Keras's default test_step that cleans up the printed metrics when we use a dummy loss.\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 18 }
https://github.com/huggingface/transformers.git
7
get_cloud_sync_client
def get_cloud_sync_client(remote_path): if remote_path.startswith(S3_PREFIX): if not distutils.spawn.find_executable("aws"): raise ValueError( "Upload uri starting with '{}' requires awscli tool" " to be installed".format(S3_PREFIX) ) sync_up_template = ( "aws s3 sync {source} {target} " "--only-show-errors {options}" ) sync_down_template = sync_up_template delete_template = ( "aws s3 rm {target} --recursive " "--only-show-errors {options}" ) exclude_template = "--exclude '{pattern}'" elif remote_path.startswith(GS_PREFIX): if not distutils.spawn.find_executable("gsutil"): raise ValueError( "Upload uri starting with '{}' requires gsutil tool" " to be installed".format(GS_PREFIX) ) sync_up_template = "gsutil rsync -r {options} {source} {target}" sync_down_template = sync_up_template delete_template = "gsutil rm -r {options} {target}" exclude_template = "-x '{regex_pattern}'" elif remote_path.startswith(HDFS_PREFIX): if not distutils.spawn.find_executable("hdfs"): raise ValueError( "Upload uri starting with '{}' requires hdfs tool" " to be installed".format(HDFS_PREFIX) ) sync_up_template = "hdfs dfs -put -f {source} {target}" sync_down_template = "hdfs dfs -get -f {source} {target}" delete_template = "hdfs dfs -rm -r {target}" exclude_template = None else: raise ValueError( f"Upload uri must start with one of: {ALLOWED_REMOTE_PREFIXES} " f"(is: `{remote_path}`)" ) return CommandBasedClient( sync_up_template, sync_down_template, delete_template, exclude_template ) @PublicAPI(stability="beta")
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
@PublicAPI(stability="beta")
16
sync_client.py
324
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,740
1
524
159
76
132,330
164
ray
19
python/ray/tune/sync_client.py
Python
43
{ "docstring": "Returns a CommandBasedClient that can sync to/from remote storage.\n\n Args:\n remote_path (str): Path to remote storage (S3, GS or HDFS).\n\n Raises:\n ValueError if malformed remote_dir.\n ", "language": "en", "n_whitespaces": 48, "n_words": 25, "vocab_size": 24 }
https://github.com/ray-project/ray.git
1
test_hvac_mode_vs_hvac_action_current_mode_wrong
async def test_hvac_mode_vs_hvac_action_current_mode_wrong(hass, utcnow): helper = await setup_test_component(hass, create_thermostat_service) await helper.async_update( ServicesTypes.THERMOSTAT, { CharacteristicsTypes.TEMPERATURE_CURRENT: 22, CharacteristicsTypes.TEMPERATURE_TARGET: 21, CharacteristicsTypes.HEATING_COOLING_CURRENT: 1, CharacteristicsTypes.HEATING_COOLING_TARGET: 0, CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT: 50, CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET: 45, }, ) state = await helper.poll_and_get_state() assert state.state == "off" assert state.attributes["hvac_action"] == "idle"
117c12d135039797e5c00e9f1c87ece7f4be13e0
11
test_climate.py
140
Fix Eve Thermo always showing as heating in homekit_controller even when off (#80019)
87,911
0
147
89
34
288,758
39
core
19
tests/components/homekit_controller/test_climate.py
Python
16
{ "docstring": "Check that we cope with buggy HEATING_COOLING_CURRENT.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/home-assistant/core.git
8
url_params_from_lookup_dict
def url_params_from_lookup_dict(lookups): params = {} if lookups and hasattr(lookups, "items"): for k, v in lookups.items(): if callable(v): v = v() if isinstance(v, (tuple, list)): v = ",".join(str(x) for x in v) elif isinstance(v, bool): v = ("0", "1")[v] else: v = str(v) params[k] = v return params
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
widgets.py
171
Refs #33476 -- Reformatted code with Black.
50,449
0
181
103
31
203,569
47
django
15
django/contrib/admin/widgets.py
Python
14
{ "docstring": "\n Convert the type of lookups specified in a ForeignKey limit_choices_to\n attribute to a dictionary of query parameters\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 15 }
https://github.com/django/django.git
6
from_biadjacency_matrix
def from_biadjacency_matrix(A, create_using=None, edge_attribute="weight"): r G = nx.empty_graph(0, create_using) n, m = A.shape # Make sure we get even the isolated nodes of the graph. G.add_nodes_from(range(n), bipartite=0) G.add_nodes_from(range(n, n + m), bipartite=1) # Create an iterable over (u, v, w) triples and for each triple, add an # edge from u to v with weight w. triples = ((u, n + v, d) for (u, v, d) in _generate_weighted_edges(A)) # If the entries in the adjacency matrix are integers and the graph is a # multigraph, then create parallel edges, each with weight 1, for each # entry in the adjacency matrix. Otherwise, create one edge for each # positive entry in the adjacency matrix and set the weight of that edge to # be the entry in the matrix. if A.dtype.kind in ("i", "u") and G.is_multigraph(): chain = itertools.chain.from_iterable triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) G.add_weighted_edges_from(triples, weight=edge_attribute) return G
8a325d26aa7fdd3a72580c4720fa97f971bbefcb
14
matrix.py
243
Use scipy.sparse array datastructure (#6037) * Use scipy.sparse array datastructure * Add reminder to rm wrapper when scipy adds creation fns. * Rm mention of np matrix from code comment. * Update networkx/algorithms/bipartite/matrix.py Co-authored-by: Stefan van der Walt <[email protected]> Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Stefan van der Walt <[email protected]>
42,335
0
223
157
93
177,315
159
networkx
27
networkx/algorithms/bipartite/matrix.py
Python
46
{ "docstring": "Creates a new bipartite graph from a biadjacency matrix given as a\n SciPy sparse array.\n\n Parameters\n ----------\n A: scipy sparse array\n A biadjacency matrix representation of a graph\n\n create_using: NetworkX graph\n Use specified graph for result. The default is Graph()\n\n edge_attribute: string\n Name of edge attribute to store matrix numeric value. The data will\n have the same type as the matrix entry (int, float, (real,imag)).\n\n Notes\n -----\n The nodes are labeled with the attribute `bipartite` set to an integer\n 0 or 1 representing membership in part 0 or part 1 of the bipartite graph.\n\n If `create_using` is an instance of :class:`networkx.MultiGraph` or\n :class:`networkx.MultiDiGraph` and the entries of `A` are of\n type :class:`int`, then this function returns a multigraph (of the same\n type as `create_using`) with parallel edges. In this case, `edge_attribute`\n will be ignored.\n\n See Also\n --------\n biadjacency_matrix\n from_numpy_array\n\n References\n ----------\n [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph\n ", "language": "en", "n_whitespaces": 236, "n_words": 143, "vocab_size": 99 }
https://github.com/networkx/networkx.git
3
_get_default_coordinate_values
def _get_default_coordinate_values(self) -> list[float | complex]: x_numbers = self.get_x_axis().get_tick_range() y_numbers = self.get_y_axis().get_tick_range() y_numbers = [complex(0, y) for y in y_numbers if y != 0] return [*x_numbers, *y_numbers]
daf23c9d1031b12d9c119b8f6b7e60727d7f9242
10
coordinate_systems.py
99
Upgraded typehints (#2429) * Future Annotations * Delete template_twitter_post.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed broken RTD Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
46,112
0
62
61
22
189,522
27
manim
11
manim/mobject/coordinate_systems.py
Python
12
{ "docstring": "Generate a list containing the numerical values of the plane's labels.\n\n Returns\n -------\n List[Union[float, complex]]\n A list of floats representing the x-axis and complex numbers representing the y-axis.\n ", "language": "en", "n_whitespaces": 67, "n_words": 28, "vocab_size": 22 }
https://github.com/ManimCommunity/manim.git
7
update
def update(self, **kwargs): for k, v in kwargs.items(): if k in self._AllowedKeys: setattr(self, k, v) else: raise AttributeError(f"{k} is an unknown keyword") for figmanager in _pylab_helpers.Gcf.figs.values(): for ax in figmanager.canvas.figure.axes: if ax.get_subplotspec() is not None: ss = ax.get_subplotspec().get_topmost_subplotspec() if ss.get_gridspec() == self: ax._set_position( ax.get_subplotspec().get_position(ax.figure))
c73f4c455514cf5422d27bf38c93250de8316b21
19
gridspec.py
188
Merge SubplotBase into AxesBase.
23,597
0
231
114
35
109,454
44
matplotlib
24
lib/matplotlib/gridspec.py
Python
13
{ "docstring": "\n Update the subplot parameters of the grid.\n\n Parameters that are not explicitly given are not changed. Setting a\n parameter to *None* resets it to :rc:`figure.subplot.*`.\n\n Parameters\n ----------\n left, right, top, bottom : float or None, optional\n Extent of the subplots as a fraction of figure width or height.\n wspace, hspace : float, optional\n Spacing between the subplots as a fraction of the average subplot\n width / height.\n ", "language": "en", "n_whitespaces": 157, "n_words": 67, "vocab_size": 45 }
https://github.com/matplotlib/matplotlib.git
1
test_valid_multiple_ops
def test_valid_multiple_ops(self) -> None: yaml_str = self.assert_success_from_gen_backend_stubs(yaml_str)
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
7
test_gen_backend_stubs.py
30
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
21,493
0
21
16
7
102,178
7
pytorch
4
tools/test/test_gen_backend_stubs.py
Python
8
{ "docstring": "\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- add.Tensor\n- abs", "language": "en", "n_whitespaces": 4, "n_words": 10, "vocab_size": 9 }
https://github.com/pytorch/pytorch.git
15
meet
def meet(self, timeout_s=180): if timeout_s <= 0: raise ValueError( "The 'timeout' argument must be positive. " "Got '{}'.".format(timeout_s) ) timeout_delta = datetime.timedelta(seconds=timeout_s) elapsed = datetime.timedelta(seconds=0) start_time = datetime.datetime.now() q, s = None, None if self._store_type == "redis" or self._store_type == "ray_internal_kv": while elapsed < timeout_delta: try: # I don't quite understand why we need gloo queue actor. q = ray.get_actor("gloo_queue") s = ray.get_actor(f"gloo_{self._group_name}_signal") break except ValueError: if self._context.rank == 0: if not q: ray.remote(gloo_util.glooQueue).options( name="gloo_queue", lifetime="detached" ).remote(1000) if not s: gloo_util.SignalActor.options( name=f"gloo_{self._group_name}_signal", lifetime="detached", ).remote(self._context.size) else: time.sleep(0.1) elapsed = datetime.datetime.now() - start_time if not q: raise RuntimeError("Unable to get gloo_queue.") if self._context.rank == 0: ray.get(q.put_nowait.remote(self._group_name)) while ray.get(q.index.remote(self._group_name)): time.sleep(0.1) self._context.connectFullMesh(self._store, self._device) ray.get(s.send.remote(self._context.rank)) if self._context.rank == 0: ray.get(s.wait.remote()) keys = [] keys += [f"rank_{i}" for i in range(self._context.size)] keys += [f"{i}" for i in range(self._context.size)] self._store.delKeys(keys) group_name = ray.get(q.get_nowait.remote()) assert group_name == self._group_name ray.kill(s)
905258dbc19753c81039f993477e7ab027960729
24
gloo_collective_group.py
631
Clean up docstyle in python modules and add LINT rule (#25272)
31,999
0
913
372
96
140,528
141
ray
46
python/ray/util/collective/collective_group/gloo_collective_group.py
Python
47
{ "docstring": "Meet at the named actor store.\n\n Args:\n timeout_s: timeout in seconds.\n\n Return:\n None\n ", "language": "en", "n_whitespaces": 56, "n_words": 13, "vocab_size": 13 }
https://github.com/ray-project/ray.git
1
test_vocolinc_vp3_setup
async def test_vocolinc_vp3_setup(hass): entity_registry = er.async_get(hass) outlet = entity_registry.async_get_or_create( "switch", "homekit_controller", "homekit-EU0121203xxxxx07-48", suggested_object_id="original_vocolinc_vp3_outlet", ) sensor = entity_registry.async_get_or_create( "sensor", "homekit_controller", "homekit-EU0121203xxxxx07-aid:1-sid:48-cid:97", suggested_object_id="original_vocolinc_vp3_power", ) accessories = await setup_accessories_from_file(hass, "vocolinc_vp3.json") await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="VOCOlinc-VP3-123456", model="VP3", manufacturer="VOCOlinc", sw_version="1.101.2", hw_version="1.0.3", serial_number="EU0121203xxxxx07", devices=[], entities=[ EntityTestInfo( entity_id="switch.original_vocolinc_vp3_outlet", friendly_name="VOCOlinc-VP3-123456 Outlet", unique_id="00:00:00:00:00:00_1_48", state="on", ), EntityTestInfo( entity_id="sensor.original_vocolinc_vp3_power", friendly_name="VOCOlinc-VP3-123456 Power", unique_id="00:00:00:00:00:00_1_48_97", unit_of_measurement=POWER_WATT, capabilities={"state_class": SensorStateClass.MEASUREMENT}, state="0", ), ], ), ) assert ( entity_registry.async_get(outlet.entity_id).unique_id == "00:00:00:00:00:00_1_48" ) assert ( entity_registry.async_get(sensor.entity_id).unique_id == "00:00:00:00:00:00_1_48_97" )
f23b1750e85f07091eb896a0b12b8f95e5646338
18
test_vocolinc_vp3.py
332
Migrate HomeKit Controller to use stable identifiers (#80064)
88,035
0
582
196
57
288,886
75
core
33
tests/components/homekit_controller/specific_devices/test_vocolinc_vp3.py
Python
53
{ "docstring": "Test that a VOCOlinc VP3 can be correctly setup in HA.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git