complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
test_check_perms_set_owner_test_true
def test_check_perms_set_owner_test_true(test_file): expected = { "comment": "", "changes": {"owner": "Backup Operators"}, "name": str(test_file), "result": None, } with patch.dict(win_dacl.__opts__, {"test": True}): result = win_file.check_perms( path=str(test_file), owner="Backup Operators", inheritance=None ) assert result == expected
5550d1823e9cb571740ae9e57b25424cfe6a919e
13
test_check_perms.py
132
Add changelong
54,577
0
104
74
29
216,411
32
salt
14
tests/pytests/functional/modules/win_file/test_check_perms.py
Python
12
{ "docstring": "\n Test setting the owner of a file with test=True\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/saltstack/salt.git
3
_get_comm_key_send_recv
def _get_comm_key_send_recv(my_rank, my_gpu_idx, peer_rank, peer_gpu_idx): if my_rank < peer_rank: lower_key = str(my_rank) + "_" + str(my_gpu_idx) higher_key = str(peer_rank) + "_" + str(peer_gpu_idx) elif my_rank > peer_rank: lower_key = str(peer_rank) + "_" + str(peer_gpu_idx) higher_key = str(my_rank) + "_" + str(my_gpu_idx) else: raise RuntimeError( "Send and recv happens on the same process. ray.util.collective " "does not support this case as of now. Alternatively, consider " "doing GPU to GPU memcpy?" ) comm_key = lower_key + ":" + higher_key return comm_key
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
13
nccl_collective_group.py
164
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,912
0
173
92
51
133,004
80
ray
10
python/ray/util/collective/collective_group/nccl_collective_group.py
Python
15
{ "docstring": "Return a key given source and destination ranks for p2p tasks.\n\n The p2p key is in the following form:\n [min_rank]_[gpu_index]:[max_rank]_[gpu_index].\n\n Args:\n my_rank (int): the rank of the source process.\n my_gpu_idx (int): the source gpu index on the process.\n peer_rank (int): the rank of the destination process.\n peer_gpu_idx (int): the destination gpu index on the process.\n\n Returns:\n comm_key (str): a string key to query the communication cache.\n ", "language": "en", "n_whitespaces": 128, "n_words": 66, "vocab_size": 38 }
https://github.com/ray-project/ray.git
1
update_last_login
def update_last_login(sender, user, **kwargs): user.last_login = timezone.now() user.save(update_fields=["last_login"])
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
models.py
52
Refs #33476 -- Reformatted code with Black.
50,509
0
17
30
8
203,684
8
django
9
django/contrib/auth/models.py
Python
3
{ "docstring": "\n A signal receiver which updates the last_login date for\n the user logging in.\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 12 }
https://github.com/django/django.git
1
test__create_document_index_wrong_mapping_raises
def test__create_document_index_wrong_mapping_raises(self, mocked_document_store, index): mocked_document_store.search_fields = ["age"] mocked_document_store.client.indices.exists.return_value = True mocked_document_store.client.indices.get.return_value = {self.index_name: index} with pytest.raises(Exception, match=f"The search_field 'age' of index '{self.index_name}' with type 'integer'"): mocked_document_store._create_document_index(self.index_name)
e7627c3f8b241654b61f8523479c81f855102f0a
13
test_opensearch.py
115
Use opensearch-py in OpenSearchDocumentStore (#2691) * add Opensearch extras * let OpenSearchDocumentStore use opensearch-py * Update Documentation & Code Style * fix a bug found after adding tests Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Sara Zan <[email protected]>
75,113
0
72
66
23
257,683
26
haystack
16
test/document_stores/test_opensearch.py
Python
6
{ "docstring": "\n Ensure the method raises if we specify a field in `search_fields` that's not text\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
https://github.com/deepset-ai/haystack.git
4
plot
def plot(self, *args, scalex=True, scaley=True, data=None, **kwargs): kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D) lines = [*self._get_lines(*args, data=data, **kwargs)] for line in lines: self.add_line(line) if scalex: self._request_autoscale_view("x") if scaley: self._request_autoscale_view("y") return lines
7c6c5f6215b40a27cfefb7bf21246299fd9b3a1e
11
_axes.py
137
Fix removed cross-references
23,109
0
111
86
26
108,228
29
matplotlib
16
lib/matplotlib/axes/_axes.py
Python
10
{ "docstring": "\n Plot y versus x as lines and/or markers.\n\n Call signatures::\n\n plot([x], y, [fmt], *, data=None, **kwargs)\n plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)\n\n The coordinates of the points or line nodes are given by *x*, *y*.\n\n The optional parameter *fmt* is a convenient way for defining basic\n formatting like color, marker and linestyle. It's a shortcut string\n notation described in the *Notes* section below.\n\n >>> plot(x, y) # plot x and y using default line style and color\n >>> plot(x, y, 'bo') # plot x and y using blue circle markers\n >>> plot(y) # plot y using x as index array 0..N-1\n >>> plot(y, 'r+') # ditto, but with red plusses\n\n You can use `.Line2D` properties as keyword arguments for more\n control on the appearance. Line properties and *fmt* can be mixed.\n The following two calls yield identical results:\n\n >>> plot(x, y, 'go--', linewidth=2, markersize=12)\n >>> plot(x, y, color='green', marker='o', linestyle='dashed',\n ... linewidth=2, markersize=12)\n\n When conflicting with *fmt*, keyword arguments take precedence.\n\n\n **Plotting labelled data**\n\n There's a convenient way for plotting objects with labelled data (i.e.\n data that can be accessed by index ``obj['y']``). Instead of giving\n the data in *x* and *y*, you can provide the object in the *data*\n parameter and just give the labels for *x* and *y*::\n\n >>> plot('xlabel', 'ylabel', data=obj)\n\n All indexable objects are supported. This could e.g. be a `dict`, a\n `pandas.DataFrame` or a structured numpy array.\n\n\n **Plotting multiple sets of data**\n\n There are various ways to plot multiple sets of data.\n\n - The most straight forward way is just to call `plot` multiple times.\n Example:\n\n >>> plot(x1, y1, 'bo')\n >>> plot(x2, y2, 'go')\n\n - If *x* and/or *y* are 2D arrays a separate data set will be drawn\n for every column. If both *x* and *y* are 2D, they must have the\n same shape. If only one of them is 2D with shape (N, m) the other\n must have length N and will be used for every data set m.\n\n Example:\n\n >>> x = [1, 2, 3]\n >>> y = np.array([[1, 2], [3, 4], [5, 6]])\n >>> plot(x, y)\n\n is equivalent to:\n\n >>> for col in range(y.shape[1]):\n ... plot(x, y[:, col])\n\n - The third way is to specify multiple sets of *[x]*, *y*, *[fmt]*\n groups::\n\n >>> plot(x1, y1, 'g^', x2, y2, 'g-')\n\n In this case, any additional keyword argument applies to all\n datasets. Also this syntax cannot be combined with the *data*\n parameter.\n\n By default, each line is assigned a different style specified by a\n 'style cycle'. The *fmt* and line property parameters are only\n necessary if you want explicit deviations from these defaults.\n Alternatively, you can also change the style cycle using\n :rc:`axes.prop_cycle`.\n\n\n Parameters\n ----------\n x, y : array-like or scalar\n The horizontal / vertical coordinates of the data points.\n *x* values are optional and default to ``range(len(y))``.\n\n Commonly, these parameters are 1D arrays.\n\n They can also be scalars, or two-dimensional (in that case, the\n columns represent separate data sets).\n\n These arguments cannot be passed as keywords.\n\n fmt : str, optional\n A format string, e.g. 'ro' for red circles. See the *Notes*\n section for a full description of the format strings.\n\n Format strings are just an abbreviation for quickly setting\n basic line properties. All of these and more can also be\n controlled by keyword arguments.\n\n This argument cannot be passed as keyword.\n\n data : indexable object, optional\n An object with labelled data. If given, provide the label names to\n plot in *x* and *y*.\n\n .. note::\n Technically there's a slight ambiguity in calls where the\n second label is a valid *fmt*. ``plot('n', 'o', data=obj)``\n could be ``plt(x, y)`` or ``plt(y, fmt)``. In such cases,\n the former interpretation is chosen, but a warning is issued.\n You may suppress the warning by adding an empty format string\n ``plot('n', 'o', '', data=obj)``.\n\n Returns\n -------\n list of `.Line2D`\n A list of lines representing the plotted data.\n\n Other Parameters\n ----------------\n scalex, scaley : bool, default: True\n These parameters determine if the view limits are adapted to the\n data limits. The values are passed on to\n `~.axes.Axes.autoscale_view`.\n\n **kwargs : `.Line2D` properties, optional\n *kwargs* are used to specify properties like a line label (for\n auto legends), linewidth, antialiasing, marker face color.\n Example::\n\n >>> plot([1, 2, 3], [1, 2, 3], 'go-', label='line 1', linewidth=2)\n >>> plot([1, 2, 3], [1, 4, 9], 'rs', label='line 2')\n\n If you specify multiple lines with one plot call, the kwargs apply\n to all those lines. In case the label object is iterable, each\n element is used as labels for each set of data.\n\n Here is a list of available `.Line2D` properties:\n\n %(Line2D:kwdoc)s\n\n See Also\n --------\n scatter : XY scatter plot with markers of varying size and/or color (\n sometimes also called bubble chart).\n\n Notes\n -----\n **Format Strings**\n\n A format string consists of a part for color, marker and line::\n\n fmt = '[marker][line][color]'\n\n Each of them is optional. If not provided, the value from the style\n cycle is used. Exception: If ``line`` is given, but no ``marker``,\n the data will be a line without markers.\n\n Other combinations such as ``[color][marker][line]`` are also\n supported, but note that their parsing may be ambiguous.\n\n **Markers**\n\n ============= ===============================\n character description\n ============= ===============================\n ``'.'`` point marker\n ``','`` pixel marker\n ``'o'`` circle marker\n ``'v'`` triangle_down marker\n ``'^'`` triangle_up marker\n ``'<'`` triangle_left marker\n ``'>'`` triangle_right marker\n ``'1'`` tri_down marker\n ``'2'`` tri_up marker\n ``'3'`` tri_left marker\n ``'4'`` tri_right marker\n ``'8'`` octagon marker\n ``'s'`` square marker\n ``'p'`` pentagon marker\n ``'P'`` plus (filled) marker\n ``'*'`` star marker\n ``'h'`` hexagon1 marker\n ``'H'`` hexagon2 marker\n ``'+'`` plus marker\n ``'x'`` x marker\n ``'X'`` x (filled) marker\n ``'D'`` diamond marker\n ``'d'`` thin_diamond marker\n ``'|'`` vline marker\n ``'_'`` hline marker\n ============= ===============================\n\n **Line Styles**\n\n ============= ===============================\n character description\n ============= ===============================\n ``'-'`` solid line style\n ``'--'`` dashed line style\n ``'-.'`` dash-dot line style\n ``':'`` dotted line style\n ============= ===============================\n\n Example format strings::\n\n 'b' # blue markers with default shape\n 'or' # red circles\n '-g' # green solid line\n '--' # dashed line with default color\n '^k:' # black triangle_up markers connected by a dotted line\n\n **Colors**\n\n The supported color abbreviations are the single letter codes\n\n ============= ===============================\n character color\n ============= ===============================\n ``'b'`` blue\n ``'g'`` green\n ``'r'`` red\n ``'c'`` cyan\n ``'m'`` magenta\n ``'y'`` yellow\n ``'k'`` black\n ``'w'`` white\n ============= ===============================\n\n and the ``'CN'`` colors that index into the default property cycle.\n\n If the color is the only part of the format string, you can\n additionally use any `matplotlib.colors` spec, e.g. full names\n (``'green'``) or hex strings (``'#008000'``).\n ", "language": "en", "n_whitespaces": 2954, "n_words": 1065, "vocab_size": 527 }
https://github.com/matplotlib/matplotlib.git
1
start
async def start(self, fn, *args): raise RuntimeError("`GatherTaskGroup` does not support `start`.")
5c53517aee853a5262fd2373a4fae5a15ad72da4
8
asyncio.py
30
Wait for task run futures concurrently
11,051
0
25
16
11
54,410
11
prefect
5
src/prefect/utilities/asyncio.py
Python
2
{ "docstring": "\n Since `start` returns the result of `task_status.started()` but here we must\n return the key instead, we just won't support this method for now.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 21 }
https://github.com/PrefectHQ/prefect.git
2
test_unimplemented_dtypes_table_columns
def test_unimplemented_dtypes_table_columns(setup_path): with ensure_clean_store(setup_path) as store: dtypes = [("date", datetime.date(2001, 1, 2))] # currently not supported dtypes #### for n, f in dtypes: df = tm.makeDataFrame() df[n] = f msg = re.escape(f"[{n}] is not implemented as a table column") with pytest.raises(TypeError, match=msg): store.append(f"df1_{n}", df) # frame df = tm.makeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["datetime1"] = datetime.date(2001, 1, 2) df = df._consolidate() with ensure_clean_store(setup_path) as store: # this fails because we have a date in the object block...... msg = re.escape( ) with pytest.raises(TypeError, match=msg): store.append("df_unimplemented", df)
de6a5cc21e59e61733be1b7f8ae7d3d4e15374f2
15
test_errors.py
281
REF: remove NDFrame._convert (#50026)
40,719
0
242
158
56
171,781
88
pandas
21
pandas/tests/io/pytables/test_errors.py
Python
21
{ "docstring": "Cannot serialize the column [datetime1]\nbecause its data contents are not [string] but [date] object dtype", "language": "en", "n_whitespaces": 14, "n_words": 16, "vocab_size": 16 }
https://github.com/pandas-dev/pandas.git
4
record_setattr
def record_setattr(context, builder, sig, args, attr): typ, valty = sig.args target, val = args context.sentry_record_alignment(typ, attr) offset = typ.offset(attr) elemty = typ.typeof(attr) if isinstance(elemty, types.NestedArray): # TODO: assert both sides are arrays dptr = cgutils.get_record_member(builder, target, offset, context.get_data_type(elemty)) dataval_ptr = context.data_model_manager[elemty].get(builder, val, 'data') dataval_ptr = builder.bitcast(dataval_ptr, context.get_data_type(elemty).as_pointer()) align = None if typ.aligned else 1 dataval = builder.load(dataval_ptr) builder.store(dataval, dptr, align=align) else: dptr = cgutils.get_record_member(builder, target, offset, context.get_data_type(elemty)) val = context.cast(builder, val, valty, elemty) align = None if typ.aligned else 1 context.pack_value(builder, elemty, val, dptr, align=align) @lower_builtin('static_getitem', types.Record, types.StringLiteral)
0d41a799678a9ad7cd77f1e19201d570bc40fdf8
@lower_builtin('static_getitem', types.Record, types.StringLiteral)
14
arrayobj.py
325
ugly poc
39,113
1
260
204
58
161,968
88
numba
36
numba/np/arrayobj.py
Python
20
{ "docstring": "\n Generic setattr() implementation for records: set the given\n record member, i.e. a scalar.\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 13 }
https://github.com/numba/numba.git
2
getvalue
def getvalue(self): if callable(getattr(self.stream, "getvalue", None)): return self.stream.getvalue()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
base.py
50
Refs #33476 -- Reformatted code with Black.
50,864
0
33
29
8
204,736
8
django
5
django/core/serializers/base.py
Python
3
{ "docstring": "\n Return the fully serialized queryset (or None if the output stream is\n not seekable).\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/django/django.git
2
itermonthdates
def itermonthdates(self, year, month): for y, m, d in self.itermonthdays3(year, month): yield datetime.date(y, m, d)
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
calendar.py
55
add python 3.10.4 for windows
56,274
0
40
37
13
221,217
15
XX-Net
10
python3.10.4/Lib/calendar.py
Python
3
{ "docstring": "\n Return an iterator for one month. The iterator will yield datetime.date\n values and will always iterate through complete weeks, so it will yield\n dates outside the specified month.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 23 }
https://github.com/XX-net/XX-Net.git
1
test_lazy_load
def test_lazy_load(self): with self.assertNumQueries(1): # Get the instance. The StreamField should *not* load the image yet instance = StreamModel.objects.get(pk=self.with_image.pk) with self.assertNumQueries(0): # Access the body. The StreamField should still not get the image. body = instance.body with self.assertNumQueries(1): # Access the image item from the stream. The image is fetched now body[0].value with self.assertNumQueries(0): # Everything has been fetched now, no further database queries. self.assertEqual(body[0].value, self.image) self.assertEqual(body[1].value, "foo")
d10f15e55806c6944827d801cd9c2d53f5da4186
13
test_streamfield.py
162
Reformat with black
16,262
0
202
93
46
74,545
68
wagtail
13
wagtail/core/tests/test_streamfield.py
Python
10
{ "docstring": "\n Getting a single item should lazily load the StreamField, only\n accessing the database once the StreamField is accessed\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
https://github.com/wagtail/wagtail.git
1
add_print_formats
def add_print_formats(): frappe.reload_doc("regional", "print_format", "detailed_tax_invoice") frappe.reload_doc("regional", "print_format", "simplified_tax_invoice") frappe.reload_doc("regional", "print_format", "tax_invoice") frappe.db.sql( )
494bd9ef78313436f0424b918f200dab8fc7c20b
8
setup.py
83
style: format code with black
14,487
0
7
42
9
67,308
13
erpnext
5
erpnext/regional/united_arab_emirates/setup.py
Python
8
{ "docstring": " update `tabPrint Format` set disabled = 0 where\n\t\tname in('Simplified Tax Invoice', 'Detailed Tax Invoice', 'Tax Invoice') ", "language": "en", "n_whitespaces": 17, "n_words": 17, "vocab_size": 15 }
https://github.com/frappe/erpnext.git
2
mixin_essential_parser
def mixin_essential_parser(parser): gp = add_arg_group(parser, title='Essential') gp.add_argument( '--name', type=str, help=, ) gp.add_argument( '--workspace', type=str, default=None, help='The working directory for any IO operations in this object. ' 'If not set, then derive from its parent `workspace`.', ) from jina import __resources_path__ gp.add_argument( '--log-config', type=str, default=os.path.join(__resources_path__, 'logging.default.yml'), help='The YAML config of the logger used in this object.', ) gp.add_argument( '--quiet', action='store_true', default=False, help='If set, then no log will be emitted from this object.', ) gp.add_argument( '--quiet-error', action='store_true', default=False, help='If set, then exception stack information will not be added to the log', ) gp.add_argument( '--workspace-id', type=str, default=random_identity(), help='the UUID for identifying the workspace. When not given a random id will be assigned.' 'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same ' '`workspace-id`.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, )
a3b71c7208b3cd48aa7bc978c3343a074947e3d9
11
base.py
254
fix(parsers): clearify flow args (#4701)
2,214
0
370
150
87
12,206
129
jina
20
jina/parsers/orchestrate/base.py
Python
53
{ "docstring": "Mixing in arguments required by every module into the given parser.\n :param parser: the parser instance to which we add arguments\n \n The name of this object.\n\n This will be used in the following places:\n - how you refer to this object in Python/YAML/CLI\n - visualization\n - log message header\n - ...\n\n When not given, then the default naming strategy will apply.\n ", "language": "en", "n_whitespaces": 112, "n_words": 61, "vocab_size": 49 }
https://github.com/jina-ai/jina.git
2
set_collection_path_collation
def set_collection_path_collation(apps, schema_editor): if schema_editor.connection.vendor == "postgresql": schema_editor.execute( )
d10f15e55806c6944827d801cd9c2d53f5da4186
10
0027_fix_collection_path_collation.py
43
Reformat with black
16,099
0
41
23
9
73,767
9
wagtail
6
wagtail/core/migrations/0027_fix_collection_path_collation.py
Python
7
{ "docstring": "\n Treebeard's path comparison logic can fail on certain locales such as sk_SK, which\n sort numbers after letters. To avoid this, we explicitly set the collation for the\n 'path' column to the (non-locale-specific) 'C' collation.\n\n See: https://groups.google.com/d/msg/wagtail/q0leyuCnYWI/I9uDvVlyBAAJ\n \n ALTER TABLE wagtailcore_collection ALTER COLUMN path TYPE VARCHAR(255) COLLATE \"C\"\n ", "language": "en", "n_whitespaces": 81, "n_words": 46, "vocab_size": 42 }
https://github.com/wagtail/wagtail.git
2
test_invalid_logo_upload
def test_invalid_logo_upload(self) -> None: for fname in self.corrupt_files: with self.subTest(fname=fname): self.login("iago") with get_test_image_file(fname) as fp: result = self.client_post( "/json/realm/logo", {"file": fp, "night": orjson.dumps(self.night).decode()} ) self.assert_json_error( result, "Could not decode image; did you upload an image file?" )
b729f00fc289a731c56d7f6b4afcca2878e9309e
21
test_upload.py
136
test_upload: Uncomment subTest contexts. Signed-off-by: Anders Kaseorg <[email protected]>
17,597
0
202
76
35
83,125
37
zulip
15
zerver/tests/test_upload.py
Python
14
{ "docstring": "\n A PUT request to /json/realm/logo with an invalid file should fail.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/zulip/zulip.git
1
feature_engineering_standard
def feature_engineering_standard(self, dataframe, **kwargs): dataframe["%-day_of_week"] = dataframe["date"].dt.dayofweek dataframe["%-hour_of_day"] = dataframe["date"].dt.hour return dataframe
c2936d551b8ad6ccf7b57e2ac6cb55d8550622cf
10
FreqaiExampleStrategy.py
68
improve doc, update test strats, change function names
35,167
0
40
39
11
151,925
12
freqtrade
7
freqtrade/templates/FreqaiExampleStrategy.py
Python
4
{ "docstring": "\n *Only functional with FreqAI enabled strategies*\n This optional function will be called once with the dataframe of the base timeframe.\n This is the final function to be called, which means that the dataframe entering this\n function will contain all the features and columns created by all other\n freqai_feature_engineering_* functions.\n\n This function is a good place to do custom exotic feature extractions (e.g. tsfresh).\n This function is a good place for any feature that should not be auto-expanded upon\n (e.g. day of the week).\n\n All features must be prepended with `%` to be recognized by FreqAI internals.\n\n More details about feature engineering available:\n\n https://www.freqtrade.io/en/latest/freqai-feature-engineering\n\n :param df: strategy dataframe which will receive the features\n usage example: dataframe[\"%-day_of_week\"] = (dataframe[\"date\"].dt.dayofweek + 1) / 7\n ", "language": "en", "n_whitespaces": 220, "n_words": 121, "vocab_size": 80 }
https://github.com/freqtrade/freqtrade.git
8
test_pack
def test_pack(self): if self.sfx not in ("b16", "b32", "b64"): return # create the vectors data = self._data() rdata = self._data(reverse=True) vdata = self._load_b(data) vrdata = self._load_b(rdata) pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}") # for scalar execution, concatenate the elements of the multiple lists # into a single list (spack) and then iterate over the elements of # the created list applying a mask to capture the first byte of them. if self.sfx == "b16": spack = [(i & 0xFF) for i in (list(rdata) + list(data))] vpack = pack_simd(vrdata, vdata) elif self.sfx == "b32": spack = [(i & 0xFF) for i in (2*list(rdata) + 2*list(data))] vpack = pack_simd(vrdata, vrdata, vdata, vdata) elif self.sfx == "b64": spack = [(i & 0xFF) for i in (4*list(rdata) + 4*list(data))] vpack = pack_simd(vrdata, vrdata, vrdata, vrdata, vdata, vdata, vdata, vdata) assert vpack == spack
1134be2ca463d3f5d788c4c7b1dcf3ef1f9e3d33
16
test_simd.py
336
SIMD: Use universal intrinsics to implement comparison functions
38,613
0
352
207
74
160,369
137
numpy
17
numpy/core/tests/test_simd.py
Python
19
{ "docstring": "\n Pack multiple vectors into one\n Test intrinsics:\n npyv_pack_b8_b16\n npyv_pack_b8_b32\n npyv_pack_b8_b64\n ", "language": "en", "n_whitespaces": 65, "n_words": 10, "vocab_size": 10 }
https://github.com/numpy/numpy.git
4
valid_color_configuration
def valid_color_configuration(config): deprecated = {CONF_COLOR_TEMP, CONF_HS, CONF_RGB, CONF_XY} if config[CONF_COLOR_MODE] and any(config.get(key) for key in deprecated): raise vol.Invalid(f"color_mode must not be combined with any of {deprecated}") return config _PLATFORM_SCHEMA_BASE = ( MQTT_RW_SCHEMA.extend( { vol.Optional(CONF_BRIGHTNESS, default=DEFAULT_BRIGHTNESS): cv.boolean, vol.Optional( CONF_BRIGHTNESS_SCALE, default=DEFAULT_BRIGHTNESS_SCALE ): vol.All(vol.Coerce(int), vol.Range(min=1)), vol.Inclusive( CONF_COLOR_MODE, "color_mode", default=DEFAULT_COLOR_MODE ): cv.boolean, vol.Optional(CONF_COLOR_TEMP, default=DEFAULT_COLOR_TEMP): cv.boolean, vol.Optional(CONF_EFFECT, default=DEFAULT_EFFECT): cv.boolean, vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]), vol.Optional( CONF_FLASH_TIME_LONG, default=DEFAULT_FLASH_TIME_LONG ): cv.positive_int, vol.Optional( CONF_FLASH_TIME_SHORT, default=DEFAULT_FLASH_TIME_SHORT ): cv.positive_int, vol.Optional(CONF_HS, default=DEFAULT_HS): cv.boolean, vol.Optional(CONF_MAX_MIREDS): cv.positive_int, vol.Optional(CONF_MIN_MIREDS): cv.positive_int, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean, vol.Optional(CONF_QOS, default=DEFAULT_QOS): vol.All( vol.Coerce(int), vol.In([0, 1, 2]) ), vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean, vol.Optional(CONF_RGB, default=DEFAULT_RGB): cv.boolean, vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic, vol.Inclusive(CONF_SUPPORTED_COLOR_MODES, "color_mode"): vol.All( cv.ensure_list, [vol.In(VALID_COLOR_MODES)], vol.Unique(), valid_supported_color_modes, ), vol.Optional(CONF_XY, default=DEFAULT_XY): cv.boolean, }, ) .extend(MQTT_ENTITY_COMMON_SCHEMA.schema) .extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema) ) # Configuring MQTT Lights under the light platform key is deprecated in HA Core 2022.6 PLATFORM_SCHEMA_JSON = vol.All( cv.PLATFORM_SCHEMA.extend(_PLATFORM_SCHEMA_BASE.schema), valid_color_configuration, ) DISCOVERY_SCHEMA_JSON = vol.All( # CONF_WHITE_VALUE is no longer supported, support was removed in 2022.9 cv.removed(CONF_WHITE_VALUE), _PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA), valid_color_configuration, ) PLATFORM_SCHEMA_MODERN_JSON = vol.All( _PLATFORM_SCHEMA_BASE, valid_color_configuration, )
73001e29ff22f7a5008a0b0a0e058aae22771d16
18
schema_json.py
694
Remove deprecated white_value support from MQTT light (#76848) * Remove deprecated white_value support from MQTT light * Remove deprecated white_value support from MQTT JSON light * Remove deprecated white_value support from MQTT template light
102,738
0
628
47
121
303,930
158
core
73
homeassistant/components/mqtt/light/schema_json.py
Python
5
{ "docstring": "Test color_mode is not combined with deprecated config.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
test_decrypt_pillar_invalid_renderer
def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir): opts = salt_master.config.copy() opts["decrypt_pillar"] = [{"secrets:vault": "gpg"}] opts["decrypt_pillar_default"] = "foo" opts["decrypt_pillar_renderers"] = ["foo", "bar"] pillar_obj = salt.pillar.Pillar(opts, grains, "test", "base") ret = pillar_obj.compile_pillar() expected = copy.deepcopy(GPG_PILLAR_ENCRYPTED) expected["_errors"] = [ "Failed to decrypt pillar key 'secrets:vault': 'gpg' is not a valid decryption" " renderer. Valid choices are: foo, bar" ] assert ret["_errors"] == expected["_errors"] assert ret["secrets"]["vault"]["foo"] == expected["secrets"]["vault"]["foo"] assert ret["secrets"]["vault"]["bar"] == expected["secrets"]["vault"]["bar"] assert ret["secrets"]["vault"]["baz"] == expected["secrets"]["vault"]["baz"] assert ret["secrets"]["vault"]["qux"] == expected["secrets"]["vault"]["qux"]
b856d3225ef1003cbe94499dc8bd82efffabb661
10
test_gpg.py
346
Add tests for gpg decryption failure option Test that: 1. Pillar registers an error when `gpg_decrypt_must_succeed` is `True` and decryption fails 2. The GPG renderer fails silently when `gpg_decrypt_must_succeed` is `False` Also mock `__opts__["gpg_decrypt_must_succeed"]` for gpg renderer unit pytests.
54,344
0
132
185
56
216,038
73
salt
16
tests/pytests/functional/pillar/test_gpg.py
Python
17
{ "docstring": "\n Test decryption using a renderer which is not permitted. It should\n fail, leaving the encrypted keys intact, and add an error to the pillar\n dictionary.\n\n decrypt_pillar_default: foo\n decrypt_pillar_renderers:\n - foo\n - bar\n decrypt_pillar:\n - 'secrets:vault': gpg\n ", "language": "en", "n_whitespaces": 97, "n_words": 36, "vocab_size": 32 }
https://github.com/saltstack/salt.git
2
test_unreadable
def test_unreadable(self, message_mock, editor, caplog, qtbot): editor.edit("") filename = pathlib.Path(editor._filename) assert filename.exists() filename.chmod(0o277) if os.access(str(filename), os.R_OK): # Docker container or similar pytest.skip("File was still readable") with caplog.at_level(logging.ERROR): editor._proc._proc.finished.emit(0, QProcess.ExitStatus.NormalExit) assert not filename.exists() msg = message_mock.getmsg(usertypes.MessageLevel.error) assert msg.text.startswith("Failed to read back edited file: ")
0877fb0d78635692e481c8bde224fac5ad0dd430
12
test_editor.py
197
Run scripts/dev/rewrite_enums.py
117,695
0
146
119
39
321,395
43
qutebrowser
35
tests/unit/misc/test_editor.py
Python
12
{ "docstring": "Test file handling when closing with an unreadable file.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/qutebrowser/qutebrowser.git
1
RecurrenceOperators
def RecurrenceOperators(base, generator): ring = RecurrenceOperatorAlgebra(base, generator) return (ring, ring.shift_operator)
498015021131af4dbb07eb110e5badaba8250c7b
8
recurrence.py
38
Updated import locations
47,825
0
19
24
10
196,325
10
sympy
6
sympy/holonomic/recurrence.py
Python
3
{ "docstring": "\n Returns an Algebra of Recurrence Operators and the operator for\n shifting i.e. the `Sn` operator.\n The first argument needs to be the base polynomial ring for the algebra\n and the second argument must be a generator which can be either a\n noncommutative Symbol or a string.\n\n Examples\n ========\n\n >>> from sympy import ZZ\n >>> from sympy import symbols\n >>> from sympy.holonomic.recurrence import RecurrenceOperators\n >>> n = symbols('n', integer=True)\n >>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')\n ", "language": "en", "n_whitespaces": 114, "n_words": 74, "vocab_size": 53 }
https://github.com/sympy/sympy.git
2
_patch_faces
def _patch_faces(self, queue_in, queue_out, sample_size): logger.trace("Patching faces") self._converter.process(queue_in, queue_out) swapped = [] idx = 0 while idx < sample_size: logger.trace("Patching image %s of %s", idx + 1, sample_size) item = queue_out.get() swapped.append(item[1]) logger.trace("Patched image %s of %s", idx + 1, sample_size) idx += 1 logger.trace("Patched faces") return swapped
71c20252c2e747f692289cdefe80ad0d5a456ea6
10
preview.py
150
bugfix: Preview Tool, ensure all config items are written
19,975
0
159
91
31
100,506
48
faceswap
14
tools/preview/preview.py
Python
13
{ "docstring": " Patch faces.\n\n Run the convert process on the swapped faces and return the patched faces.\n\n patch_queue_in: :class:`queue.Queue`\n The input queue for the patching process\n queue_out: :class:`queue.Queue`\n The output queue from the patching process\n sample_size: int\n The number of samples to be displayed\n\n Returns\n -------\n list\n The swapped faces patched with the selected convert settings\n ", "language": "en", "n_whitespaces": 155, "n_words": 54, "vocab_size": 36 }
https://github.com/deepfakes/faceswap.git
5
get_cv2_image
def get_cv2_image(self, image): if isinstance(image, str): if os.path.isfile(image): image = cv2.imread(image, cv2.IMREAD_COLOR).astype('float32') else: raise FileNotFoundError("Cannot find {}".format(image)) elif isinstance(image, np.ndarray): image = image.astype('float32') elif isinstance(image, PIL.Image.Image): image = np.asarray(image)[:, :, ::-1] else: raise TypeError("Unsupport image format. Only path-to-file, opencv BGR image, and PIL image are supported.") return image
803b90729d25fda253011c505d0189e8e63cc039
15
DBNet.py
186
add dbnet
27,298
0
175
111
35
123,121
48
EasyOCR
20
easyocr/DBNet/DBNet.py
Python
13
{ "docstring": "\n Load or convert input to OpenCV BGR image numpy array.\n\n Parameters\n ----------\n image : str, PIL.Image, or np.ndarray\n Image to load or convert.\n\n Raises\n ------\n FileNotFoundError\n Raised when the input is a path to file (str), but the file is not found.\n TypeError\n Raised when the data type of the input is not supported.\n\n Returns\n -------\n image : np.ndarray\n OpenCV BGR image.\n ", "language": "en", "n_whitespaces": 191, "n_words": 62, "vocab_size": 41 }
https://github.com/JaidedAI/EasyOCR.git
3
serialize_model_as_bytecode
def serialize_model_as_bytecode(model): # Note: we don't use a RAM path for this because zipfile cannot write # to such paths. temp_dir = tempfile.mkdtemp() try: filepath = os.path.join(temp_dir, "model.keras") saving_lib.save_model(model, filepath) with open(filepath, "rb") as f: data = f.read() except Exception as e: raise e else: return data finally: tf.io.gfile.rmtree(temp_dir)
2ed044d06d0ae552477672aa8b778f8edafb52f1
13
pickle_utils.py
134
Use new saving logic for pickling. This is somewhat cleaner since it restores the exact same model (no usage of traces). It may however be less convenient since it requires get_config() to be implemented and the use of a custom_object_scope. PiperOrigin-RevId: 474146108
83,134
0
126
75
44
279,795
49
keras
21
keras/saving/pickle_utils.py
Python
13
{ "docstring": "Convert a Keras Model into a bytecode representation for pickling.\n\n Args:\n model: Keras Model instance.\n\n Returns:\n Tuple that can be read by `deserialize_from_bytecode`.\n ", "language": "en", "n_whitespaces": 46, "n_words": 23, "vocab_size": 20 }
https://github.com/keras-team/keras.git
7
boilerplate_gen
def boilerplate_gen(): _figure_commands = ( 'figimage', 'figtext:text', 'gca', 'gci:_gci', 'ginput', 'subplots_adjust', 'suptitle', 'tight_layout', 'waitforbuttonpress', ) # These methods are all simple wrappers of Axes methods by the same name. _axes_commands = ( 'acorr', 'angle_spectrum', 'annotate', 'arrow', 'autoscale', 'axhline', 'axhspan', 'axis', 'axline', 'axvline', 'axvspan', 'bar', 'barbs', 'barh', 'bar_label', 'boxplot', 'broken_barh', 'clabel', 'cohere', 'contour', 'contourf', 'csd', 'errorbar', 'eventplot', 'fill', 'fill_between', 'fill_betweenx', 'grid', 'hexbin', 'hist', 'stairs', 'hist2d', 'hlines', 'imshow', 'legend', 'locator_params', 'loglog', 'magnitude_spectrum', 'margins', 'minorticks_off', 'minorticks_on', 'pcolor', 'pcolormesh', 'phase_spectrum', 'pie', 'plot', 'plot_date', 'psd', 'quiver', 'quiverkey', 'scatter', 'semilogx', 'semilogy', 'specgram', 'spy', 'stackplot', 'stem', 'step', 'streamplot', 'table', 'text', 'tick_params', 'ticklabel_format', 'tricontour', 'tricontourf', 'tripcolor', 'triplot', 'violinplot', 'vlines', 'xcorr', # pyplot name : real name 'sci:_sci', 'title:set_title', 'xlabel:set_xlabel', 'ylabel:set_ylabel', 'xscale:set_xscale', 'yscale:set_yscale', ) cmappable = { 'contour': 'if __ret._A is not None: sci(__ret) # noqa', 'contourf': 'if __ret._A is not None: sci(__ret) # noqa', 'hexbin': 'sci(__ret)', 'scatter': 'sci(__ret)', 'pcolor': 'sci(__ret)', 'pcolormesh': 'sci(__ret)', 'hist2d': 'sci(__ret[-1])', 'imshow': 'sci(__ret)', 'spy': 'if isinstance(__ret, cm.ScalarMappable): sci(__ret) # noqa', 'quiver': 'sci(__ret)', 'specgram': 'sci(__ret[-1])', 'streamplot': 'sci(__ret.lines)', 'tricontour': 'if __ret._A is not None: sci(__ret) # noqa', 'tricontourf': 'if __ret._A is not None: sci(__ret) # noqa', 'tripcolor': 'sci(__ret)', } for spec in _figure_commands: if ':' in spec: name, called_name = spec.split(':') else: name = called_name = spec yield generate_function(name, f'Figure.{called_name}', FIGURE_METHOD_TEMPLATE) for spec in _axes_commands: if ':' in spec: name, called_name = spec.split(':') else: name = called_name = spec template = (AXES_CMAPPABLE_METHOD_TEMPLATE if name in cmappable else AXES_METHOD_TEMPLATE) yield generate_function(name, f'Axes.{called_name}', template, sci_command=cmappable.get(name)) cmaps = ( 'autumn', 'bone', 'cool', 'copper', 'flag', 'gray', 'hot', 'hsv', 'jet', 'pink', 'prism', 'spring', 'summer', 'winter', 'magma', 'inferno', 'plasma', 'viridis', "nipy_spectral" ) # add all the colormaps (autumn, hsv, ....) for name in cmaps: yield AUTOGEN_MSG yield CMAP_TEMPLATE.format(name=name)
032316bc6c7798fca6c82de24167c975f237687f
13
boilerplate.py
780
Cleanup documentation generation for pyplot - remove the awkward `pyplot.plotting()` function, which only served as a namespace to take up the docs for pyplot and output them via `.. autofunction` - Instead generate the same information using `.. autosummary::`. We have to list the desired methods here explicitly. I've added a test that these are the same as previously auto-generated in the `plotting()` docstring. If we change anything in pyplot, we'll be notified through the test failure that we have to adapt the autosummary list. - Removed the docstring generation logic `_setup_pyplot_info_docstrings()`. Apart from generating the `plotting()` docstring, this added docstrings to the pyplot colormap setters. Instead, we now add these docstrings directly via boilerplate.py Co-authored-by: Elliott Sales de Andrade <[email protected]>
23,247
0
1,350
398
190
108,536
275
matplotlib
19
tools/boilerplate.py
Python
147
{ "docstring": "Generator of lines for the automated part of pyplot.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/matplotlib/matplotlib.git
14
get_data_by_territory
def get_data_by_territory(filters, common_columns): columns = [ { "label": "Territory", "fieldname": "territory", "fieldtype": "Link", "options": "Territory", "width": 150, } ] columns += common_columns customers_in = get_customer_stats(filters, tree_view=True) territory_dict = {} for t in frappe.db.sql( , as_dict=1 ): territory_dict.update({t.name: {"parent": t.parent_territory, "is_group": t.is_group}}) depth_map = frappe._dict() for name, info in territory_dict.items(): default = depth_map.get(info["parent"]) + 1 if info["parent"] else 0 depth_map.setdefault(name, default) data = [] for name, indent in depth_map.items(): condition = customers_in.get(name) new = customers_in[name]["new"] if condition else [0, 0.0] repeat = customers_in[name]["repeat"] if condition else [0, 0.0] temp = { "territory": name, "parent_territory": territory_dict[name]["parent"], "indent": indent, "new_customers": new[0], "repeat_customers": repeat[0], "total": new[0] + repeat[0], "new_customer_revenue": new[1], "repeat_customer_revenue": repeat[1], "total_revenue": new[1] + repeat[1], "bold": 0 if indent else 1, } data.append(temp) loop_data = sorted(data, key=lambda k: k["indent"], reverse=True) for ld in loop_data: if ld["parent_territory"]: parent_data = [x for x in data if x["territory"] == ld["parent_territory"]][0] for key in parent_data.keys(): if key not in ["indent", "territory", "parent_territory", "bold"]: parent_data[key] += ld[key] return columns, data, None, None, None, 1
494bd9ef78313436f0424b918f200dab8fc7c20b
16
customer_acquisition_and_loyalty.py
608
style: format code with black
14,514
0
119
382
112
67,398
166
erpnext
40
erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py
Python
47
{ "docstring": "SELECT name, lft, parent_territory, is_group FROM `tabTerritory` ORDER BY lft", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/frappe/erpnext.git
2
test_multipartite_layout_layer_order
def test_multipartite_layout_layer_order(): G = nx.Graph() for node, layer in zip(("a", "b", "c", "d", "e"), (2, 3, 1, 2, 4)): G.add_node(node, subset=layer) # Horizontal alignment, therefore y-coord determines layers pos = nx.multipartite_layout(G, align="horizontal") # Nodes "a" and "d" are in the same layer assert pos["a"][-1] == pos["d"][-1] # positions should be sorted according to layer assert pos["c"][-1] < pos["a"][-1] < pos["b"][-1] < pos["e"][-1] # Make sure that multipartite_layout still works when layers are not sortable G.nodes["a"]["subset"] = "layer_0" # Can't sort mixed strs/ints pos_nosort = nx.multipartite_layout(G) # smoke test: this should not raise assert pos_nosort.keys() == pos.keys()
23fc7568a5179c54dccd0c6f68760877f88aa4b6
10
test_layout.py
257
Recover order of layers in multipartite_layout when layers are sortable (#5705) * Add test for sorted layers. * Implement layer sorting when possible. Co-authored-by: Dan Schult <[email protected]> * Add test case for non-sortable layers. Co-authored-by: Dan Schult <[email protected]>
42,138
0
144
151
75
176,846
96
networkx
15
networkx/drawing/tests/test_layout.py
Python
10
{ "docstring": "Return the layers in sorted order if the layers of the multipartite\n graph are sortable. See gh-5691", "language": "en", "n_whitespaces": 19, "n_words": 17, "vocab_size": 14 }
https://github.com/networkx/networkx.git
2
trace_basic_info
def trace_basic_info(finder): # type: (PackageFinder) -> None # Display where finder is looking for packages search_scope = finder.search_scope locations = search_scope.get_formatted_locations() if locations: logger.info(locations)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
9
req_command.py
50
upd; format
12,210
0
77
27
22
60,561
24
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_internal/cli/req_command.py
Python
5
{ "docstring": "\n Trace basic information about the provided objects.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/jindongwang/transferlearning.git
4
draw
def draw(self, pictorial=True): if not numpy: raise ImportError("To use this function numpy module is required") x = self.variable # checking whether length is an expression in terms of any Symbol. if isinstance(self.length, Expr): l = list(self.length.atoms(Symbol)) # assigning every Symbol a default value of 10 l = {i:10 for i in l} length = self.length.subs(l) else: l = {} length = self.length height = length/10 rectangles = [] rectangles.append({'xy':(0, 0), 'width':length, 'height': height, 'facecolor':"brown"}) annotations, markers, load_eq,load_eq1, fill = self._draw_load(pictorial, length, l) support_markers, support_rectangles = self._draw_supports(length, l) rectangles += support_rectangles markers += support_markers sing_plot = plot(height + load_eq, height + load_eq1, (x, 0, length), xlim=(-height, length + height), ylim=(-length, 1.25*length), annotations=annotations, markers=markers, rectangles=rectangles, line_color='brown', fill=fill, axis=False, show=False) return sing_plot
24f1e7730119fe958cc8e28411f790c9a5ec04eb
13
beam.py
344
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
49,636
0
318
226
91
200,421
119
sympy
35
sympy/physics/continuum_mechanics/beam.py
Python
22
{ "docstring": "\n Returns a plot object representing the beam diagram of the beam.\n\n .. note::\n The user must be careful while entering load values.\n The draw function assumes a sign convention which is used\n for plotting loads.\n Given a right handed coordinate system with XYZ coordinates,\n the beam's length is assumed to be along the positive X axis.\n The draw function recognizes positive loads(with n>-2) as loads\n acting along negative Y direction and positive moments acting\n along positive Z direction.\n\n Parameters\n ==========\n\n pictorial: Boolean (default=True)\n Setting ``pictorial=True`` would simply create a pictorial (scaled) view\n of the beam diagram not with the exact dimensions.\n Although setting ``pictorial=False`` would create a beam diagram with\n the exact dimensions on the plot\n\n Examples\n ========\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> from sympy.physics.continuum_mechanics.beam import Beam\n >>> from sympy import symbols\n >>> R1, R2 = symbols('R1, R2')\n >>> E, I = symbols('E, I')\n >>> b = Beam(50, 20, 30)\n >>> b.apply_load(10, 2, -1)\n >>> b.apply_load(R1, 10, -1)\n >>> b.apply_load(R2, 30, -1)\n >>> b.apply_load(90, 5, 0, 23)\n >>> b.apply_load(10, 30, 1, 50)\n >>> b.apply_support(50, \"pin\")\n >>> b.apply_support(0, \"fixed\")\n >>> b.apply_support(20, \"roller\")\n >>> p = b.draw()\n >>> p\n Plot object containing:\n [0]: cartesian line: 25*SingularityFunction(x, 5, 0) - 25*SingularityFunction(x, 23, 0)\n + SingularityFunction(x, 30, 1) - 20*SingularityFunction(x, 50, 0)\n - SingularityFunction(x, 50, 1) + 5 for x over (0.0, 50.0)\n [1]: cartesian line: 5 for x over (0.0, 50.0)\n >>> p.show()\n\n ", "language": "en", "n_whitespaces": 694, "n_words": 234, "vocab_size": 153 }
https://github.com/sympy/sympy.git
1
get_number_of_leave_days
def get_number_of_leave_days(from_date, to_date, holiday_list): number_of_days = date_diff(to_date, from_date) + 1 holidays = frappe.db.sql( , (from_date, to_date, holiday_list), )[0][0] number_of_days = flt(number_of_days) - flt(holidays) return number_of_days
494bd9ef78313436f0424b918f200dab8fc7c20b
11
student_leave_application.py
85
style: format code with black
14,052
0
17
57
20
65,905
25
erpnext
11
erpnext/education/doctype/student_leave_application/student_leave_application.py
Python
15
{ "docstring": "\n\t\tSELECT\n\t\t\tCOUNT(DISTINCT holiday_date)\n\t\tFROM `tabHoliday` h1,`tabHoliday List` h2\n\t\tWHERE\n\t\t\th1.parent = h2.name and\n\t\t\th1.holiday_date between %s and %s and\n\t\t\th2.name = %s", "language": "en", "n_whitespaces": 15, "n_words": 22, "vocab_size": 16 }
https://github.com/frappe/erpnext.git
3
getgeneratorlocals
def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError("{!r} is not a Python generator".format(generator)) frame = getattr(generator, "gi_frame", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED'
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
inspect.py
115
add python 3.10.4 for windows
55,315
0
74
50
33
218,447
43
XX-Net
13
python3.10.4/Lib/inspect.py
Python
8
{ "docstring": "\n Get the mapping of generator local variables to their current values.\n\n A dict is returned, with the keys the local variable names and values the\n bound values.", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 22 }
https://github.com/XX-net/XX-Net.git
3
_remove_invalid_user
def _remove_invalid_user(self, request): try: stored_backend = load_backend( request.session.get(auth.BACKEND_SESSION_KEY, "") ) except ImportError: # backend failed to load auth.logout(request) else: if isinstance(stored_backend, RemoteUserBackend): auth.logout(request)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
middleware.py
89
Refs #33476 -- Reformatted code with Black.
50,501
0
136
52
22
203,668
23
django
13
django/contrib/auth/middleware.py
Python
10
{ "docstring": "\n Remove the current authenticated user in the request which is invalid\n but only if the user is authenticated via the RemoteUserBackend.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 15 }
https://github.com/django/django.git
5
batch_test
def batch_test(num_threads, delay): with mock.patch( "ray.autoscaler._private.aws.node_provider.make_ec2_client" ), mock.patch.object(AWSNodeProvider, "_create_tags", mock_create_tags): provider = AWSNodeProvider( provider_config={"region": "nowhere"}, cluster_name="default" ) provider.batch_counter = 0 provider.tag_update_counter = 0 provider.tag_cache = {str(x): {} for x in range(num_threads)} threads = [] for x in range(num_threads): thread = threading.Thread( target=provider.set_node_tags, args=(str(x), {"foo": "bar"}) ) threads.append(thread) for thread in threads: thread.start() time.sleep(delay) for thread in threads: thread.join() return provider.batch_counter, provider.tag_update_counter
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
17
test_aws_batch_tag_update.py
256
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,474
0
239
154
43
131,087
61
ray
29
python/ray/tests/aws/test_aws_batch_tag_update.py
Python
22
{ "docstring": "Run AWSNodeProvider.set_node_tags in several threads, with a\n specified delay between thread launches.\n\n Return the number of batches of tag updates and the number of tags\n updated.\n ", "language": "en", "n_whitespaces": 38, "n_words": 26, "vocab_size": 22 }
https://github.com/ray-project/ray.git
13
sample_to_features_text
def sample_to_features_text(sample, tasks, max_seq_len, tokenizer): if tokenizer.is_fast: text = sample.clear_text["text"] # Here, we tokenize the sample for the second time to get all relevant ids # This should change once we git rid of FARM's tokenize_with_metadata() inputs = tokenizer( text, return_token_type_ids=True, truncation=True, truncation_strategy="longest_first", max_length=max_seq_len, return_special_tokens_mask=True, ) if (len(inputs["input_ids"]) - inputs["special_tokens_mask"].count(1)) != len(sample.tokenized["tokens"]): logger.error( f"FastTokenizer encoded sample {sample.clear_text['text']} to " f"{len(inputs['input_ids']) - inputs['special_tokens_mask'].count(1)} tokens, which differs " f"from number of tokens produced in tokenize_with_metadata(). \n" f"Further processing is likely to be wrong." ) else: # TODO It might be cleaner to adjust the data structure in sample.tokenized tokens_a = sample.tokenized["tokens"] tokens_b = sample.tokenized.get("tokens_b", None) inputs = tokenizer.encode_plus( tokens_a, tokens_b, add_special_tokens=True, truncation=False, # truncation_strategy is deprecated return_token_type_ids=True, is_split_into_words=False, ) input_ids, segment_ids = inputs["input_ids"], inputs["token_type_ids"] # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. padding_mask = [1] * len(input_ids) # Padding up to the sequence length. # Normal case: adding multiple 0 to the right # Special cases: # a) xlnet pads on the left and uses "4" for padding token_type_ids if tokenizer.__class__.__name__ == "XLNetTokenizer": pad_on_left = True segment_ids = pad(segment_ids, max_seq_len, 4, pad_on_left=pad_on_left) else: pad_on_left = False segment_ids = pad(segment_ids, max_seq_len, 0, pad_on_left=pad_on_left) input_ids = pad(input_ids, max_seq_len, tokenizer.pad_token_id, pad_on_left=pad_on_left) padding_mask = pad(padding_mask, max_seq_len, 0, pad_on_left=pad_on_left) assert len(input_ids) == max_seq_len assert len(padding_mask) == max_seq_len assert len(segment_ids) == max_seq_len feat_dict = { "input_ids": input_ids, "padding_mask": padding_mask, "segment_ids": segment_ids, } # Add Labels for different tasks for task_name, task in tasks.items(): try: label_name = task["label_name"] label_raw = sample.clear_text[label_name] label_list = task["label_list"] if task["task_type"] == "classification": # id of label try: label_ids = [label_list.index(label_raw)] except ValueError as e: raise ValueError(f"[Task: {task_name}] Observed label {label_raw} not in defined label_list") elif task["task_type"] == "multilabel_classification": # multi-hot-format label_ids = [0] * len(label_list) for l in label_raw.split(","): if l != "": label_ids[label_list.index(l)] = 1 elif task["task_type"] == "regression": label_ids = [float(label_raw)] else: raise ValueError(task["task_type"]) except KeyError: # For inference mode we don't expect labels label_ids = None if label_ids is not None: feat_dict[task["label_tensor_name"]] = label_ids return [feat_dict]
a59bca366174d9c692fa19750c24d65f47660ef7
20
input_features.py
778
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
74,800
0
1,081
439
214
256,204
338
haystack
48
haystack/modeling/data_handler/input_features.py
Python
71
{ "docstring": "\n Generates a dictionary of features for a given input sample that is to be consumed by a text classification model.\n\n :param sample: Sample object that contains human readable text and label fields from a single text classification data sample\n :type sample: Sample\n :param tasks: A dictionary where the keys are the names of the tasks and the values are the details of the task (e.g. label_list, metric, tensor name)\n :type tasks: dict\n :param max_seq_len: Sequences are truncated after this many tokens\n :type max_seq_len: int\n :param tokenizer: A tokenizer object that can turn string sentences into a list of tokens\n :return: A list with one dictionary containing the keys \"input_ids\", \"padding_mask\" and \"segment_ids\" (also \"label_ids\" if not\n in inference mode). The values are lists containing those features.\n :rtype: list\n ", "language": "en", "n_whitespaces": 174, "n_words": 128, "vocab_size": 84 }
https://github.com/deepset-ai/haystack.git
1
available
def available(self) -> bool: return (self.ecowitt.last_update_m + 5 * 60) > time.monotonic()
105bb3e08264c753012f10fd35f8358c8683646d
10
entity.py
44
Ecowitt integration (#77441) * Add ecowitt integration * add tests * use total * use total * test coverage * Update homeassistant/components/ecowitt/__init__.py Co-authored-by: Paulus Schoutsen <[email protected]> * Update homeassistant/components/ecowitt/binary_sensor.py Co-authored-by: Paulus Schoutsen <[email protected]> * Update homeassistant/components/ecowitt/entity.py Co-authored-by: Paulus Schoutsen <[email protected]> * Update homeassistant/components/ecowitt/diagnostics.py Co-authored-by: Paulus Schoutsen <[email protected]> * add to async_on_unload * remove attr_name / unload callback * support unload platforms * using replace * address mapping * update type * mark final * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Fix bracket * Fix another bracket * Address comment * Add strings * update tests * Update homeassistant/components/ecowitt/strings.json Co-authored-by: Martin Hjelmare <[email protected]> * update text * Update homeassistant/components/ecowitt/strings.json Co-authored-by: Martin Hjelmare <[email protected]> Co-authored-by: Paulus Schoutsen <[email protected]> Co-authored-by: Martin Hjelmare <[email protected]>
104,111
0
26
26
12
305,321
12
core
7
homeassistant/components/ecowitt/entity.py
Python
3
{ "docstring": "Return whether the state is based on actual reading from device.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git
7
_get_selection_override
def _get_selection_override(self, prev, next_, opposite): cmdutils.check_exclusive((prev, next_, opposite), 'pno') if prev: return QTabBar.SelectionBehavior.SelectLeftTab elif next_: return QTabBar.SelectionBehavior.SelectRightTab elif opposite: conf_selection = config.val.tabs.select_on_remove if conf_selection == QTabBar.SelectionBehavior.SelectLeftTab: return QTabBar.SelectionBehavior.SelectRightTab elif conf_selection == QTabBar.SelectionBehavior.SelectRightTab: return QTabBar.SelectionBehavior.SelectLeftTab elif conf_selection == QTabBar.SelectionBehavior.SelectPreviousTab: raise cmdutils.CommandError( "-o is not supported with 'tabs.select_on_remove' set to " "'last-used'!") else: # pragma: no cover raise ValueError("Invalid select_on_remove value " "{!r}!".format(conf_selection)) return None
0877fb0d78635692e481c8bde224fac5ad0dd430
17
commands.py
194
Run scripts/dev/rewrite_enums.py
117,512
0
313
118
45
321,077
63
qutebrowser
20
qutebrowser/browser/commands.py
Python
20
{ "docstring": "Helper function for tab_close to get the tab to select.\n\n Args:\n prev: Force selecting the tab before the current tab.\n next_: Force selecting the tab after the current tab.\n opposite: Force selecting the tab in the opposite direction of\n what's configured in 'tabs.select_on_remove'.\n\n Return:\n QTabBar.SelectionBehavior.SelectLeftTab, QTabBar.SelectionBehavior.SelectRightTab, or None if no change\n should be made.\n ", "language": "en", "n_whitespaces": 151, "n_words": 54, "vocab_size": 37 }
https://github.com/qutebrowser/qutebrowser.git
42
resolve_type_hint
def resolve_type_hint(hint) -> Any: origin, args = _get_type_hint_origin(hint) excluded_fields = get_override(hint, "exclude_fields", []) if origin is None and is_basic_type(hint, allow_none=False): return build_basic_type(hint) elif origin is None and inspect.isclass(hint) and issubclass(hint, tuple): # a convoluted way to catch NamedTuple. suggestions welcome. if get_type_hints(hint): properties = {k: resolve_type_hint(v) for k, v in get_type_hints(hint).items()} else: properties = {k: build_basic_type(OpenApiTypes.ANY) for k in hint._fields} return build_object_type(properties=properties, required=properties.keys()) elif origin is list or hint is list: return build_array_type( resolve_type_hint(args[0]) if args else build_basic_type(OpenApiTypes.ANY) ) elif origin is tuple: return build_array_type( schema=build_basic_type(args[0]), max_length=len(args), min_length=len(args), ) elif origin is dict or origin is defaultdict: schema = build_basic_type(OpenApiTypes.OBJECT) if args and args[1] is not typing.Any: schema["additionalProperties"] = resolve_type_hint(args[1]) return schema elif origin is set: return build_array_type(resolve_type_hint(args[0])) elif origin is frozenset: return build_array_type(resolve_type_hint(args[0])) elif origin is Literal: # Literal only works for python >= 3.8 despite typing_extensions, because it # behaves slightly different w.r.t. __origin__ schema = {"enum": list(args)} if all(type(args[0]) is type(choice) for choice in args): schema.update(build_basic_type(type(args[0]))) return schema elif inspect.isclass(hint) and issubclass(hint, Enum): schema = {"enum": [item.value for item in hint]} mixin_base_types = [t for t in hint.__mro__ if is_basic_type(t)] if mixin_base_types: schema.update(build_basic_type(mixin_base_types[0])) return schema elif isinstance(hint, _TypedDictMeta): return build_object_type( properties={ k: resolve_type_hint(v) for k, v in get_type_hints(hint).items() if k not in excluded_fields }, description=inspect.cleandoc(hint.__doc__ or ""), required=[h for h in hint.__required_keys__ if h not in excluded_fields], ) elif origin is Union: type_args = [arg for arg in args if arg is not type(None)] # noqa: E721 if len(type_args) > 1: schema = {"oneOf": [resolve_type_hint(arg) for arg in type_args]} else: schema = resolve_type_hint(type_args[0]) if type(None) in args: schema["nullable"] = True return schema elif origin is collections.abc.Iterable: return build_array_type(resolve_type_hint(args[0])) elif isinstance(hint, typing._TypedDictMeta): raise UnableToProceedError("Wrong TypedDict class, please use typing_extensions.TypedDict") else: raise UnableToProceedError(hint)
286bf2ae7ecfdd6698d8fb1cd4753f107159d4d2
17
spectacular_ports.py
895
ref: use dict instead of OrderedDict since sentry is >python3.6 (#39695) partially automated (especially the fixtures) also via `\(([^]+), (.*)\),$` -> `\1: \2,`
18,119
0
788
569
148
86,529
284
sentry
63
src/sentry/apidocs/spectacular_ports.py
Python
67
{ "docstring": "drf-spectacular library method modified as described above", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/getsentry/sentry.git
1
edge_centers
def edge_centers(self): x0, y0, width, height = self._rect_bbox w = width / 2. h = height / 2. xe = x0, x0 + w, x0 + width, x0 + w ye = y0 + h, y0, y0 + h, y0 + height transform = self._get_rotation_transform() coords = transform.transform(np.array([xe, ye]).T).T return coords[0], coords[1]
1504c4d7d4ed3121c6aa0e8060325ddf1bd10d06
13
widgets.py
144
DOC: fix various typos
22,906
0
115
97
28
107,770
52
matplotlib
17
lib/matplotlib/widgets.py
Python
9
{ "docstring": "\n Midpoint of rectangle edges in data coordinates from left,\n moving anti-clockwise.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
https://github.com/matplotlib/matplotlib.git
2
get_event_type
def get_event_type(self) -> str: column = self._get_column_name(Columns.TYPE) if column in self._snuba_data: return cast(str, self._snuba_data[column]) return cast(str, self.data.get("type", "default"))
6aaaf5089b2c39757883179df5a8512db3b0c716
11
models.py
87
feat(perf_issues): Add `GroupEvent` and split some functionality in `Event` into a base class. (#38143) Since we can now have events with multiple groups, we can no longer rely on the `Event.group` property. This pr adds in a `GroupEvent` subclass that should be passed around wherever we expect an event to have a single `Group` associated with it. `Event` has been split up into `BaseEvent` and `Event`. We will deprecate and remove uses of `group_id` and `group` in the `Event` class going forward. If we need an event with a `Group`, we can use `build_group_events` to fetch all `GroupEvents` associated with the `Event`, or `for_group` if we just need a specific `Event`/`Group` pairing. Going forward, the plan is to store all groups in the `groups` property. This means that error events being sent via eventstream will have their group included in `groups` as well. We'll need to update the errors processor in snuba to look there instead of `group_id`. This seems cleaner long term, instead of having both `group_id` and `group_ids` passed through. To figure out where we need to use `build_group_events` and `for_group` we can do a mix of searching the codebase and commenting out the `group_id` and `group` properties and see how CI goes.
17,984
0
57
53
15
85,390
18
sentry
11
src/sentry/eventstore/models.py
Python
10
{ "docstring": "\n Return the type of this event.\n\n See ``sentry.eventtypes``.\n ", "language": "en", "n_whitespaces": 30, "n_words": 8, "vocab_size": 8 }
https://github.com/getsentry/sentry.git
1
_dequantize
def _dequantize(self, quantized_val, scale, zero_point): real_val = scale * (quantized_val - zero_point) return real_val
d68c786ff81bad19c04619d6a999ff34aaa724e7
9
qat_quantizer.py
36
[Compression] remove pruning v1 & refactor directory (#5228)
24,993
0
35
23
13
113,659
14
nni
6
nni/compression/pytorch/quantization/qat_quantizer.py
Python
3
{ "docstring": "\n dequantize quantized value.\n Because we simulate quantization in training process, all the computations still happen as float point computations, which means we\n first quantize tensors then dequantize them. For more details, please refer to the paper.\n\n Parameters\n ----------\n quantized_val : torch.Tensor\n the quantized value to be de-quantized\n scale : torch.Tensor\n quantization scale\n zero_point : torch.Tensor\n quantization zero point\n\n Returns\n -------\n Tensor\n ", "language": "en", "n_whitespaces": 179, "n_words": 61, "vocab_size": 47 }
https://github.com/microsoft/nni.git
1
get_attributes
def get_attributes(self) -> dict[str, str]: return _attributes( message=self.message, type=self.type, )
871b2ca73adcba3a35551247cf839246cf121231
9
_junit_xml.py
45
Simplify existing type hints.
78,616
0
53
29
10
266,836
10
ansible
7
lib/ansible/utils/_junit_xml.py
Python
6
{ "docstring": "Return a dictionary of attributes for this instance.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ansible/ansible.git
8
node_attribute_xy
def node_attribute_xy(G, attribute, nodes=None): if nodes is None: nodes = set(G) else: nodes = set(nodes) Gnodes = G.nodes for u, nbrsdict in G.adjacency(): if u not in nodes: continue uattr = Gnodes[u].get(attribute, None) if G.is_multigraph(): for v, keys in nbrsdict.items(): vattr = Gnodes[v].get(attribute, None) for _ in keys: yield (uattr, vattr) else: for v in nbrsdict: vattr = Gnodes[v].get(attribute, None) yield (uattr, vattr)
34d9d630bb02426d297d3e20fedb7da8c3ced03a
16
pairs.py
210
MAINT: Cleanup assortativity module, remove unused variables (#5301) Remove unused variables, sort imports, raise errors instead of accepting invalid arguments silently Co-authored-by: Dan Schult <[email protected]>
41,837
0
232
135
39
176,323
63
networkx
17
networkx/algorithms/assortativity/pairs.py
Python
19
{ "docstring": "Returns iterator of node-attribute pairs for all edges in G.\n\n Parameters\n ----------\n G: NetworkX graph\n\n attribute: key\n The node attribute key.\n\n nodes: list or iterable (optional)\n Use only edges that are incident to specified nodes.\n The default is all nodes.\n\n Returns\n -------\n (x, y): 2-tuple\n Generates 2-tuple of (attribute, attribute) values.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_node(1, color=\"red\")\n >>> G.add_node(2, color=\"blue\")\n >>> G.add_edge(1, 2)\n >>> list(nx.node_attribute_xy(G, \"color\"))\n [('red', 'blue')]\n\n Notes\n -----\n For undirected graphs each edge is produced twice, once for each edge\n representation (u, v) and (v, u), with the exception of self-loop edges\n which only appear once.\n ", "language": "en", "n_whitespaces": 194, "n_words": 101, "vocab_size": 83 }
https://github.com/networkx/networkx.git
1
test_apply_cache_factor_from_config
def test_apply_cache_factor_from_config(self): config = {"caches": {"event_cache_size": "10k"}} self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() cache = LruCache( max_size=self.config.event_cache_size, apply_cache_factor_from_config=False, ) add_resizable_cache("event_cache", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 10240)
d38d242411b8910dfacde1e61fd3a0ec5cbcaa66
11
test_cache.py
132
Reload cache factors from disk on SIGHUP (#12673)
72,179
0
99
77
20
248,248
21
synapse
16
tests/config/test_cache.py
Python
10
{ "docstring": "Caches can disable applying cache factor updates, mainly used by\n event cache size.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git
1
remove
def remove(self) -> AwaitRemove: await_remove = self.app._remove_nodes([self]) return await_remove
f07684438f1d4f9f18022b1e0d21b908ca299a23
9
widget.py
38
fix remove freeze
45,233
0
30
22
8
185,889
9
textual
6
src/textual/widget.py
Python
8
{ "docstring": "Remove the Widget from the DOM (effectively deleting it)\n\n Returns:\n AwaitRemove: An awaitable object that waits for the widget to be removed.\n ", "language": "en", "n_whitespaces": 47, "n_words": 22, "vocab_size": 20 }
https://github.com/Textualize/textual.git
3
binaries_check
def binaries_check(app_configs, **kwargs): error = "Paperless can't find {}. Without it, consumption is impossible." hint = "Either it's not in your ${PATH} or it's not installed." binaries = (settings.CONVERT_BINARY, settings.OPTIPNG_BINARY, "tesseract") check_messages = [] for binary in binaries: if shutil.which(binary) is None: check_messages.append(Warning(error.format(binary), hint)) return check_messages @register()
fc695896dd8b0169001c438054a79e347053fac6
@register()
15
checks.py
117
Format Python code with black
116,916
1
85
65
39
318,781
47
paperless-ngx
17
src/paperless/checks.py
Python
9
{ "docstring": "\n Paperless requires the existence of a few binaries, so we do some checks\n for those here.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 16 }
https://github.com/paperless-ngx/paperless-ngx.git
7
generate_gray
def generate_gray(self, **hints): bits = self.n start = None if "start" in hints: start = hints["start"] elif "rank" in hints: start = GrayCode.unrank(self.n, hints["rank"]) if start is not None: self._current = start current = self.current graycode_bin = gray_to_bin(current) if len(graycode_bin) > self.n: raise ValueError('Gray code start has length %i but should ' 'not be greater than %i' % (len(graycode_bin), bits)) self._current = int(current, 2) graycode_int = int(''.join(graycode_bin), 2) for i in range(graycode_int, 1 << bits): if self._skip: self._skip = False else: yield self.current bbtc = (i ^ (i + 1)) gbtc = (bbtc ^ (bbtc >> 1)) self._current = (self._current ^ gbtc) self._current = 0
498015021131af4dbb07eb110e5badaba8250c7b
13
graycode.py
303
Updated import locations
47,600
0
336
186
71
196,100
105
sympy
22
sympy/combinatorics/graycode.py
Python
25
{ "docstring": "\n Generates the sequence of bit vectors of a Gray Code.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> a = GrayCode(3)\n >>> list(a.generate_gray())\n ['000', '001', '011', '010', '110', '111', '101', '100']\n >>> list(a.generate_gray(start='011'))\n ['011', '010', '110', '111', '101', '100']\n >>> list(a.generate_gray(rank=4))\n ['110', '111', '101', '100']\n\n See Also\n ========\n\n skip\n\n References\n ==========\n\n .. [1] Knuth, D. (2011). The Art of Computer Programming,\n Vol 4, Addison Wesley\n\n ", "language": "en", "n_whitespaces": 206, "n_words": 65, "vocab_size": 49 }
https://github.com/sympy/sympy.git
3
not_in_timeout
def not_in_timeout(cls, last_triggered, timeout): return ( last_triggered is None or timeout is None or (time.time() - last_triggered > timeout) )
8c2428c9d355ca5fbc3dd90e9820ceb1cc795837
12
watchdog.py
51
[autofix.ci] apply automated fixes
74,072
0
74
32
16
253,415
20
mitmproxy
5
examples/contrib/webscanner_helper/watchdog.py
Python
6
{ "docstring": "Checks if current error lies not in timeout after last trigger (potential reset of connection).", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
https://github.com/mitmproxy/mitmproxy.git
8
_get_spec
def _get_spec(cls, fullname, path, target=None): # If this ends up being a namespace package, namespace_path is # the list of paths that will become its __path__ namespace_path = [] for entry in path: if not isinstance(entry, (str, bytes)): continue finder = cls._path_importer_cache(entry) if finder is not None: if hasattr(finder, 'find_spec'): spec = finder.find_spec(fullname, target) else: spec = cls._legacy_get_spec(fullname, finder) if spec is None: continue if spec.loader is not None: return spec portions = spec.submodule_search_locations if portions is None: raise ImportError('spec missing loader') # This is possibly part of a namespace package. # Remember these path entries (if any) for when we # create a namespace package, and continue iterating # on path. namespace_path.extend(portions) else: spec = _bootstrap.ModuleSpec(fullname, None) spec.submodule_search_locations = namespace_path return spec
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
_bootstrap_external.py
224
add python 3.10.4 for windows
55,141
0
510
137
76
218,115
123
XX-Net
23
python3.10.4/Lib/importlib/_bootstrap_external.py
Python
23
{ "docstring": "Find the loader or namespace_path for this module/package name.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/XX-net/XX-Net.git
7
test_list_rooms_pagination
def test_list_rooms_pagination(self) -> None: # Create 5 test rooms total_rooms = 5 room_ids = [] for _ in range(total_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) room_ids.append(room_id) # Set the name of the rooms so we get a consistent returned ordering for idx, room_id in enumerate(room_ids): self.helper.send_state( room_id, "m.room.name", {"name": str(idx)}, tok=self.admin_user_tok, ) # Request the list of rooms returned_room_ids = [] start = 0 limit = 2 run_count = 0 should_repeat = True while should_repeat: run_count += 1 url = "/_synapse/admin/v1/rooms?from=%d&limit=%d&order_by=%s" % ( start, limit, "name", ) channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertTrue("rooms" in channel.json_body) for r in channel.json_body["rooms"]: returned_room_ids.append(r["room_id"]) # Check that the correct number of total rooms was returned self.assertEqual(channel.json_body["total_rooms"], total_rooms) # Check that the offset is correct # We're only getting 2 rooms each page, so should be 2 * last run_count self.assertEqual(channel.json_body["offset"], 2 * (run_count - 1)) if run_count > 1: # Check the value of prev_batch is correct self.assertEqual(channel.json_body["prev_batch"], 2 * (run_count - 2)) if "next_batch" not in channel.json_body: # We have reached the end of the list should_repeat = False else: # Make another query with an updated start value start = channel.json_body["next_batch"] # We should've queried the endpoint 3 times self.assertEqual( run_count, 3, msg="Should've queried 3 times for 5 rooms with limit 2 per query", ) # Check that we received all of the room ids self.assertEqual(room_ids, returned_room_ids) url = "/_synapse/admin/v1/rooms?from=%d&limit=%d" % (start, limit) channel = self.make_request( "GET", url.encode("ascii"), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body)
c97042f7eef3748e17c90e48a4122389a89c4735
14
test_room.py
542
Use literals in place of `HTTPStatus` constants in tests (#13469)
72,648
0
974
329
142
249,141
246
synapse
33
tests/rest/admin/test_room.py
Python
58
{ "docstring": "Test that we can get a full list of rooms through pagination", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git
3
draw_background
def draw_background(self, face): x_offset = RACK_ELEVATION_BORDER_WIDTH + self.legend_width url_string = '{}?{}&position={{}}'.format( reverse('dcim:device_add'), urlencode({ 'site': self.rack.site.pk, 'location': self.rack.location.pk if self.rack.location else '', 'rack': self.rack.pk, 'face': face, }) ) for ru in range(0, self.rack.u_height): y_offset = RACK_ELEVATION_BORDER_WIDTH + ru * self.unit_height text_coords = ( x_offset + self.unit_width / 2, y_offset + self.unit_height / 2 ) link = Hyperlink(href=url_string.format(ru), target='_blank') link.add(Rect((x_offset, y_offset), (self.unit_width, self.unit_height), class_='slot')) link.add(self.drawing.text('add device', insert=text_coords, class_='add-device')) self.drawing.add(link)
0c915f7de9612c7485da3713cc6d63f368698a5d
16
svg.py
300
Clean up rack elevation rendering
77,988
0
302
187
53
265,101
67
netbox
31
netbox/dcim/svg.py
Python
21
{ "docstring": "\n Draw the rack unit placeholders which form the \"background\" of the rack elevation.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 10 }
https://github.com/netbox-community/netbox.git
2
update
def update(self) -> None: self._data.update() self._attr_native_value = self._data.get_value(self._type) if self._attr_native_value is None: _LOGGER.debug("Could not get data for %s", self._type)
bf7239c25db06f1377a895244a906b43242c9963
10
sensor.py
78
Improve entity type hints [d] (#77031)
103,157
0
58
46
17
304,350
19
core
8
homeassistant/components/danfoss_air/sensor.py
Python
10
{ "docstring": "Update the new state of the sensor.\n\n This is done through the DanfossAir object that does the actual\n communication with the Air CCM.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 19 }
https://github.com/home-assistant/core.git
14
compatible_platforms
def compatible_platforms(provided, required): if provided is None or required is None or provided == required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
16
__init__.py
281
upd; format
13,149
0
455
168
95
63,105
163
transferlearning
13
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
Python
22
{ "docstring": "Can code for the `provided` platform run on the `required` platform?\n\n Returns true if either platform is ``None``, or the platforms are equal.\n\n XXX Needs compatibility checks for Linux and other unixy OSes.\n ", "language": "en", "n_whitespaces": 42, "n_words": 33, "vocab_size": 29 }
https://github.com/jindongwang/transferlearning.git
2
option_list_all
def option_list_all(self): # type: () -> List[optparse.Option] res = self.option_list[:] for i in self.option_groups: res.extend(i.option_list) return res
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
parser.py
53
upd; format
12,198
0
63
31
16
60,536
17
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py
Python
5
{ "docstring": "Get a list of all options, including those in option groups.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/jindongwang/transferlearning.git
2
is_private
def is_private(self): return (self.network_address.is_private and self.broadcast_address.is_private)
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
ipaddress.py
34
add python 3.10.4 for windows
55,379
0
35
20
6
218,544
6
XX-Net
4
python3.10.4/Lib/ipaddress.py
Python
3
{ "docstring": "Test if this address is allocated for private networks.\n\n Returns:\n A boolean, True if the address is reserved per\n iana-ipv4-special-registry or iana-ipv6-special-registry.\n\n ", "language": "en", "n_whitespaces": 58, "n_words": 22, "vocab_size": 19 }
https://github.com/XX-net/XX-Net.git
5
_get_tune_run_arguments
def _get_tune_run_arguments(self) -> Dict[str, Any]: return dict( mode=self._tune_config.mode, metric=self._tune_config.metric, callbacks=self._run_config.callbacks, sync_config=self._run_config.sync_config, stop=self._run_config.stop, max_failures=( self._run_config.failure_config.max_failures if self._run_config.failure_config else 0 ), keep_checkpoints_num=( self._run_config.checkpoint_config.num_to_keep if self._run_config.checkpoint_config else None ), checkpoint_score_attr=( self._run_config.checkpoint_config._tune_legacy_checkpoint_score_attr if self._run_config.checkpoint_config else None ), _experiment_checkpoint_dir=self._experiment_checkpoint_dir, raise_on_failed_trial=False, fail_fast=( self._run_config.failure_config.fail_fast if self._run_config.failure_config else False ), verbose=self._run_config.verbose, )
b3878e26d765e28dd7c69abadbd856181037db97
13
tuner_internal.py
220
[AIR] Fix `ResourceChangingScheduler` not working with AIR (#26307) This PR ensures that the new trial resources set by `ResourceChangingScheduler` are respected by the train loop logic by modifying the scaling config to match. Previously, even though trials had their resources updated, the scaling config was not modified which lead to eg. new workers not being spawned in the `DataParallelTrainer` even though resources were available. In order to accomplish this, `ScalingConfigDataClass` is updated to allow equality comparisons with other `ScalingConfigDataClass`es (using the underlying PGF) and to create a `ScalingConfigDataClass` from a PGF. Please note that this is an internal only change intended to actually make `ResourceChangingScheduler` work. In the future, `ResourceChangingScheduler` should be updated to operate on `ScalingConfigDataClass`es instead of PGFs as it is now. That will require a deprecation cycle.
27,646
0
421
155
32
124,651
44
ray
24
python/ray/tune/impl/tuner_internal.py
Python
32
{ "docstring": "Get tune.run arguments common for both new and resumed runs.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
11
step
def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad # apply LARS LR adaptation, LARC clipping, weight decay # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) # FIXME nested where required since logical and/or not working in PT XLA trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, trust_ratio, one_tensor), one_tensor, ) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss
cdcd0a92ca8a3dc120336a5dde1b7d6ecd5e9186
19
lars.py
534
fix lars
119,869
0
947
331
118
331,584
182
pytorch-image-models
34
timm/optim/lars.py
Python
44
{ "docstring": "Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n ", "language": "en", "n_whitespaces": 44, "n_words": 19, "vocab_size": 17 }
https://github.com/huggingface/pytorch-image-models.git
1
_get
def _get(self, *args, **kwargs): return ( self.deserialize_messages(self.request.session.get(self.session_key)), True, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
session.py
55
Refs #33476 -- Reformatted code with Black.
50,674
0
52
35
9
204,183
9
django
9
django/contrib/messages/storage/session.py
Python
5
{ "docstring": "\n Retrieve a list of messages from the request's session. This storage\n always stores everything it is given, so return True for the\n all_retrieved flag.\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 23 }
https://github.com/django/django.git
9
parse_known_args
def parse_known_args(self, args=None, namespace=None, nohelp=False): if args is None: # args default to the system args args = _sys.argv[1:] args = fix_underscores(args) # handle the single dash stuff. See _handle_single_dash_addarg for info actions = set() for action in self._actions: actions.update(action.option_strings) args = self._handle_single_dash_parsearg(args, actions) if nohelp: # ignore help args = [ a for a in args if a != '-h' and a != '--help' and a != '--helpall' and a != '--h' ] return super().parse_known_args(args, namespace)
4291c8a63a3ae9e7107dda0f90fff8da3b31d29b
15
params.py
177
python 3.8 parser fix on args_that_override (#4507) * single dash * handle args during parsing
47,160
0
251
107
48
195,034
77
ParlAI
17
parlai/core/params.py
Python
15
{ "docstring": "\n Parse known args to ignore help flag.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/facebookresearch/ParlAI.git
1
test_uptime
def test_uptime(monkeypatch, qapp): monkeypatch.setattr(objects, 'qapp', qapp) launch_time = datetime.datetime(1, 1, 1, 1, 1, 1, 1) monkeypatch.setattr(qapp, "launch_time", launch_time, raising=False)
6c4e2810285af0698538aed9d46a99de085eb310
8
test_version.py
77
pylint: Fix new unnecessary-lambda-assignment
117,395
0
31
90
15
320,852
19
qutebrowser
8
tests/unit/utils/test_version.py
Python
10
{ "docstring": "Test _uptime runs and check if microseconds are dropped.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/qutebrowser/qutebrowser.git
4
get_address_territory
def get_address_territory(address_name): territory = None if address_name: address_fields = frappe.db.get_value("Address", address_name, ["city", "state", "country"]) for value in address_fields: territory = frappe.db.get_value("Territory", value) if territory: break return territory
494bd9ef78313436f0424b918f200dab8fc7c20b
13
cart.py
95
style: format code with black
14,022
0
18
55
22
65,820
27
erpnext
8
erpnext/e_commerce/shopping_cart/cart.py
Python
9
{ "docstring": "Tries to match city, state and country of address to existing territory", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/frappe/erpnext.git
4
testDotsInLogdir
def testDotsInLogdir(self): local_dir_path = Path("/tmp/test_rel_dots") local_dir = str(local_dir_path) if local_dir_path.exists(): local_dir = tempfile.mkdtemp(prefix=str(local_dir_path) + "_") trial = Trial(trainable_name="rel_logdir", local_dir=local_dir) with self.assertRaises(ValueError): trial.logdir = "/tmp/test_rel/../dots" with self.assertRaises(ValueError): trial.logdir = local_dir + "/../" if shutil.rmtree.avoids_symlink_attacks: if local_dir_path.exists(): shutil.rmtree(local_dir)
2a5d322e705df080e9254c9c9a3e187c1ea41c4e
14
test_trial_relative_logdir.py
179
[tune] Relative logdir paths in trials for ExperimentAnalysis in remote buckets (#25063) When running an experiment for example in the cloud and syncing to a bucket the logdir path in the trials will be changed when working with the checkpoints in the bucket. There are some workarounds, but the easier solution is to also add a rel_logdir containing the relative path to the trials/checkpoints that can handle any changes in the location of experiment results. As discussed with @Yard1 and @krfricke Co-authored-by: Antoni Baum <[email protected]> Co-authored-by: Kai Fricke <[email protected]>
32,293
0
151
100
22
141,204
36
ray
19
python/ray/tune/tests/test_trial_relative_logdir.py
Python
13
{ "docstring": "This should result in errors as dots in paths are not allowed.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/ray-project/ray.git
1
_compute_tree_reduce_metadata
def _compute_tree_reduce_metadata(self, axis, new_parts): new_axes, new_axes_lengths = [0, 0], [0, 0] new_axes[axis] = ["__reduced__"] new_axes[axis ^ 1] = self.axes[axis ^ 1] new_axes_lengths[axis] = [1] new_axes_lengths[axis ^ 1] = self._axes_lengths[axis ^ 1] new_dtypes = None result = self.__constructor__( new_parts, *new_axes, *new_axes_lengths, new_dtypes, ) return result
58bbcc37477866d19c8b092a0e1974a4f0baa586
9
dataframe.py
140
REFACTOR-#2656: Update modin to fit algebra (code only) (#3717) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: Rehan Durrani <[email protected]>
35,232
0
158
93
30
153,048
44
modin
11
modin/core/dataframe/pandas/dataframe/dataframe.py
Python
14
{ "docstring": "\n Compute the metadata for the result of reduce function.\n\n Parameters\n ----------\n axis : int\n The axis on which reduce function was applied.\n new_parts : NumPy 2D array\n Partitions with the result of applied function.\n\n Returns\n -------\n PandasDataframe\n Modin series (1xN frame) containing the reduced data.\n ", "language": "en", "n_whitespaces": 142, "n_words": 45, "vocab_size": 36 }
https://github.com/modin-project/modin.git
3
as_instanceof_cause
def as_instanceof_cause(self): cause_cls = self.cause.__class__ if issubclass(RayTaskError, cause_cls): return self # already satisfied if issubclass(cause_cls, RayError): return self # don't try to wrap ray internal errors error_msg = str(self)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
8
exceptions.py
66
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,335
0
88
74
24
130,722
29
ray
10
python/ray/exceptions.py
Python
15
{ "docstring": "Returns an exception that is an instance of the cause's class.\n\n The returned exception will inherit from both RayTaskError and the\n cause class and will contain all of the attributes of the cause\n exception.\n ", "language": "en", "n_whitespaces": 62, "n_words": 34, "vocab_size": 24 }
https://github.com/ray-project/ray.git
2
vf2pp_is_isomorphic
def vf2pp_is_isomorphic(G1, G2, node_label=None, default_label=None): if vf2pp_isomorphism(G1, G2, node_label, default_label) is not None: return True return False
a796f526c7ce6a7f182aee4b81b8499feabe1a45
8
vf2pp.py
51
VF2++ for Directed Graphs (#5972) Modify vf2pp implementation to support directed graphs. Updates all helper functions and state/parameter objects to account for in/out degree. Includes other changes such as renaming the keyword argument from node_labels to node_label to better reflect the fact that the label kwarg expects a single value. Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]>
42,325
0
33
35
15
177,286
17
networkx
6
networkx/algorithms/isomorphism/vf2pp.py
Python
4
{ "docstring": "Examines whether G1 and G2 are isomorphic.\n\n Parameters\n ----------\n G1, G2 : NetworkX Graph or MultiGraph instances.\n The two graphs to check for isomorphism.\n\n node_label : str, optional\n The name of the node attribute to be used when comparing nodes.\n The default is `None`, meaning node attributes are not considered\n in the comparison. Any node that doesn't have the `node_label`\n attribute uses `default_label` instead.\n\n default_label : scalar\n Default value to use when a node doesn't have an attribute\n named `node_label`. Default is `None`.\n\n Returns\n -------\n bool\n True if the two graphs are isomorphic, False otherwise.\n ", "language": "en", "n_whitespaces": 178, "n_words": 95, "vocab_size": 71 }
https://github.com/networkx/networkx.git
1
test_predict_proba_6
def test_predict_proba_6(): est = TPOTClassifier(generations=1,population_size=1, template='LogisticRegression') est.fit(training_features, training_target) assert hasattr(est, "predict_proba") #This model has predict_proba est.predict_proba(training_features) est = TPOTClassifier(generations=1,population_size=1, template='LinearSVC') est.fit(training_features, training_target) assert not hasattr(est, "predict_proba") #This model does not have predict_proba, but it hasattr shouldn't raise an error
d39f3be7b3f49e6178f29fb03461a920103caa35
10
tpot_tests.py
124
added test to hasattr
43,372
0
63
74
28
181,580
39
tpot
11
tests/tpot_tests.py
Python
8
{ "docstring": "Assert that TPOT's predict_proba is exposed when available, and hidden when not.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/EpistasisLab/tpot.git
7
lu
def lu(self, fn_luid, ignorekeys=[], luName=None, frameID=None, frameName=None): # look for this LU in cache if not self._lu_idx: self._buildluindex() OOV = object() luinfo = self._lu_idx.get(fn_luid, OOV) if luinfo is OOV: # LU not in the index. We create a placeholder by falling back to # luName, frameID, and frameName. However, this will not be listed # among the LUs for its frame. self._warn( "LU ID not found: {} ({}) in {} ({})".format( luName, fn_luid, frameName, frameID ) ) luinfo = AttrDict( { "_type": "lu", "ID": fn_luid, "name": luName, "frameID": frameID, "status": "Problem", } ) f = self.frame_by_id(luinfo.frameID) assert f.name == frameName, (f.name, frameName) luinfo["frame"] = f self._lu_idx[fn_luid] = luinfo elif "_type" not in luinfo: # we only have an index entry for the LU. loading the frame will replace this. f = self.frame_by_id(luinfo.frameID) luinfo = self._lu_idx[fn_luid] if ignorekeys: return AttrDict({k: v for k, v in luinfo.items() if k not in ignorekeys}) return luinfo
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
13
framenet.py
314
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <[email protected]> Co-authored-by: Tom Aarsen <[email protected]> Co-authored-by: Tom Aarsen <[email protected]>
7,602
0
565
193
99
42,540
152
nltk
22
nltk/corpus/reader/framenet.py
Python
30
{ "docstring": "\n Access a lexical unit by its ID. luName, frameID, and frameName are used\n only in the event that the LU does not have a file in the database\n (which is the case for LUs with \"Problem\" status); in this case,\n a placeholder LU is created which just contains its name, ID, and frame.\n\n\n Usage examples:\n\n >>> from nltk.corpus import framenet as fn\n >>> fn.lu(256).name\n 'foresee.v'\n >>> fn.lu(256).definition\n 'COD: be aware of beforehand; predict.'\n >>> fn.lu(256).frame.name\n 'Expectation'\n >>> list(map(PrettyDict, fn.lu(256).lexemes))\n [{'POS': 'V', 'breakBefore': 'false', 'headword': 'false', 'name': 'foresee', 'order': 1}]\n\n >>> fn.lu(227).exemplars[23] # doctest: +NORMALIZE_WHITESPACE\n exemplar sentence (352962):\n [sentNo] 0\n [aPos] 59699508\n <BLANKLINE>\n [LU] (227) guess.v in Coming_to_believe\n <BLANKLINE>\n [frame] (23) Coming_to_believe\n <BLANKLINE>\n [annotationSet] 2 annotation sets\n <BLANKLINE>\n [POS] 18 tags\n <BLANKLINE>\n [POS_tagset] BNC\n <BLANKLINE>\n [GF] 3 relations\n <BLANKLINE>\n [PT] 3 phrases\n <BLANKLINE>\n [Other] 1 entry\n <BLANKLINE>\n [text] + [Target] + [FE]\n <BLANKLINE>\n When he was inside the house , Culley noticed the characteristic\n ------------------\n Content\n <BLANKLINE>\n he would n't have guessed at .\n -- ******* --\n Co C1 [Evidence:INI]\n (Co=Cognizer, C1=Content)\n <BLANKLINE>\n <BLANKLINE>\n\n The dict that is returned from this function will contain most of the\n following information about the LU. Note that some LUs do not contain\n all of these pieces of information - particularly 'totalAnnotated' and\n 'incorporatedFE' may be missing in some LUs:\n\n - 'name' : the name of the LU (e.g. 'merger.n')\n - 'definition' : textual definition of the LU\n - 'ID' : the internal ID number of the LU\n - '_type' : 'lu'\n - 'status' : e.g. 'Created'\n - 'frame' : Frame that this LU belongs to\n - 'POS' : the part of speech of this LU (e.g. 'N')\n - 'totalAnnotated' : total number of examples annotated with this LU\n - 'incorporatedFE' : FE that incorporates this LU (e.g. 'Ailment')\n - 'sentenceCount' : a dict with the following two keys:\n - 'annotated': number of sentences annotated with this LU\n - 'total' : total number of sentences with this LU\n\n - 'lexemes' : a list of dicts describing the lemma of this LU.\n Each dict in the list contains these keys:\n\n - 'POS' : part of speech e.g. 'N'\n - 'name' : either single-lexeme e.g. 'merger' or\n multi-lexeme e.g. 'a little'\n - 'order': the order of the lexeme in the lemma (starting from 1)\n - 'headword': a boolean ('true' or 'false')\n - 'breakBefore': Can this lexeme be separated from the previous lexeme?\n Consider: \"take over.v\" as in::\n\n Germany took over the Netherlands in 2 days.\n Germany took the Netherlands over in 2 days.\n\n In this case, 'breakBefore' would be \"true\" for the lexeme\n \"over\". Contrast this with \"take after.v\" as in::\n\n Mary takes after her grandmother.\n *Mary takes her grandmother after.\n\n In this case, 'breakBefore' would be \"false\" for the lexeme \"after\"\n\n - 'lemmaID' : Can be used to connect lemmas in different LUs\n - 'semTypes' : a list of semantic type objects for this LU\n - 'subCorpus' : a list of subcorpora\n - Each item in the list is a dict containing the following keys:\n - 'name' :\n - 'sentence' : a list of sentences in the subcorpus\n - each item in the list is a dict with the following keys:\n - 'ID':\n - 'sentNo':\n - 'text': the text of the sentence\n - 'aPos':\n - 'annotationSet': a list of annotation sets\n - each item in the list is a dict with the following keys:\n - 'ID':\n - 'status':\n - 'layer': a list of layers\n - each layer is a dict containing the following keys:\n - 'name': layer name (e.g. 'BNC')\n - 'rank':\n - 'label': a list of labels for the layer\n - each label is a dict containing the following keys:\n - 'start': start pos of label in sentence 'text' (0-based)\n - 'end': end pos of label in sentence 'text' (0-based)\n - 'name': name of label (e.g. 'NN1')\n\n Under the hood, this implementation looks up the lexical unit information\n in the *frame* definition file. That file does not contain\n corpus annotations, so the LU files will be accessed on demand if those are\n needed. In principle, valence patterns could be loaded here too,\n though these are not currently supported.\n\n :param fn_luid: The id number of the lexical unit\n :type fn_luid: int\n :param ignorekeys: The keys to ignore. These keys will not be\n included in the output. (optional)\n :type ignorekeys: list(str)\n :return: All information about the lexical unit\n :rtype: dict\n ", "language": "en", "n_whitespaces": 2239, "n_words": 723, "vocab_size": 328 }
https://github.com/nltk/nltk.git
1
combine_expression
def combine_expression(self, connector, sub_expressions): conn = " %s " % connector return conn.join(sub_expressions)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
8
operations.py
38
Refs #33476 -- Reformatted code with Black.
50,952
0
34
22
12
204,879
13
django
6
django/db/backends/base/operations.py
Python
3
{ "docstring": "\n Combine a list of subexpressions into a single expression, using\n the provided connecting operator. This is required because operators\n can vary between backends (e.g., Oracle with %% and &) and between\n subexpression types (e.g., date expressions).\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 32 }
https://github.com/django/django.git
7
_map_infrequent_categories
def _map_infrequent_categories(self, X_int, X_mask): if not self._infrequent_enabled: return for col_idx in range(X_int.shape[1]): infrequent_idx = self._infrequent_indices[col_idx] if infrequent_idx is None: continue X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0] if self.handle_unknown == "infrequent_if_exist": # All the unknown values are now mapped to the # infrequent_idx[0], which makes the unknown values valid # This is needed in `transform` when the encoding is formed # using `X_mask`. X_mask[:, col_idx] = True # Remaps encoding in `X_int` where the infrequent categories are # grouped together. for i, mapping in enumerate(self._default_to_infrequent_mappings): if mapping is None: continue X_int[:, i] = np.take(mapping, X_int[:, i])
7f0006c8aad1a09621ad19c3db19c3ff0555a183
12
_encoders.py
182
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
75,641
0
318
114
62
259,203
94
scikit-learn
17
sklearn/preprocessing/_encoders.py
Python
14
{ "docstring": "Map infrequent categories to integer representing the infrequent category.\n\n This modifies X_int in-place. Values that were invalid based on `X_mask`\n are mapped to the infrequent category if there was an infrequent\n category for that feature.\n\n Parameters\n ----------\n X_int: ndarray of shape (n_samples, n_features)\n Integer encoded categories.\n\n X_mask: ndarray of shape (n_samples, n_features)\n Bool mask for valid values in `X_int`.\n ", "language": "en", "n_whitespaces": 137, "n_words": 59, "vocab_size": 46 }
https://github.com/scikit-learn/scikit-learn.git
1
test_is_wis_on_estimate_on_dataset
def test_is_wis_on_estimate_on_dataset(self): config = self.dqn_on_fake_ds.copy() config = config.evaluation( off_policy_estimation_methods={ "is": {"type": ImportanceSampling}, "wis": {"type": WeightedImportanceSampling}, }, ) num_actions = config.action_space.n algo = config.build() evaluated_results = algo.evaluate() ope_results = evaluated_results["evaluation"]["off_policy_estimator"] policy = algo.get_policy() wis_gain, wis_ste = compute_expected_is_or_wis_estimator( self.train_df, policy, num_actions=num_actions, is_wis=True ) is_gain, is_ste = compute_expected_is_or_wis_estimator( self.train_df, policy, num_actions=num_actions, is_wis=False ) check(wis_gain, ope_results["wis"]["v_gain_mean"]) check(wis_ste, ope_results["wis"]["v_gain_ste"]) check(is_gain, ope_results["is"]["v_gain_mean"]) check(is_ste, ope_results["is"]["v_gain_ste"])
e368dd9b4e10026767df66d1811a92bd8ca2d8f9
14
test_ope.py
281
[RLlib] By-pass Evaluation workers when doing OPE (#30135) Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
30,916
0
251
168
42
136,429
58
ray
27
rllib/offline/estimators/tests/test_ope.py
Python
23
{ "docstring": "Test that the IS and WIS estimators work.\n\n First we compute the estimates with RLlib's algorithm and then compare the\n results to the estimates that are manually computed on raw data frame version\n of the dataset to check correctness.\n ", "language": "en", "n_whitespaces": 67, "n_words": 39, "vocab_size": 31 }
https://github.com/ray-project/ray.git
1
f2cexpr
def f2cexpr(expr): # TODO: support Fortran `len` function with optional kind parameter expr = re.sub(r'\blen\b', 'f2py_slen', expr) return expr
d4e11c7a2eb64861275facb076d47ccd135fa28c
9
capi_maps.py
38
ENH: Support character string arrays TST: added test for issue #18684 ENH: f2py opens files with correct encoding, fixes #635 TST: added test for issue #6308 TST: added test for issue #4519 TST: added test for issue #3425 ENH: Implement user-defined hooks support for post-processing f2py data structure. Implement character BC hook. ENH: Add support for detecting utf-16 and utf-32 encodings.
38,648
0
31
21
18
160,521
19
numpy
4
numpy/f2py/capi_maps.py
Python
3
{ "docstring": "Rewrite Fortran expression as f2py supported C expression.\n\n Due to the lack of a proper expression parser in f2py, this\n function uses a heuristic approach that assumes that Fortran\n arithmetic expressions are valid C arithmetic expressions when\n mapping Fortran function calls to the corresponding C function/CPP\n macros calls.\n\n ", "language": "en", "n_whitespaces": 66, "n_words": 48, "vocab_size": 36 }
https://github.com/numpy/numpy.git
2
_setup_connection
def _setup_connection(self): # noqa cur = self.connection.cursor() if ('store',) not in list(cur.execute("SELECT name FROM sqlite_master WHERE type='table';")): cur.execute( ) self.internal_registry.commit()
27a34a6a706a06e1241671d29c8cab93d77a19c1
11
storage_handler.py
83
feat: add docs, improve base class signatures
25,164
0
79
45
20
114,363
20
mindsdb
9
mindsdb/integrations/libs/storage_handler.py
Python
6
{ "docstring": " Checks that a key-value table exists, otherwise creates it. create table store (key text, value text)", "language": "en", "n_whitespaces": 16, "n_words": 16, "vocab_size": 15 }
https://github.com/mindsdb/mindsdb.git
5
export_table
def export_table(self, table, columns=None): exclude_columns = {'pk', 'actions'} if columns: all_columns = [col_name for col_name, _ in table.selected_columns + table.available_columns] exclude_columns.update({ col for col in all_columns if col not in columns }) exporter = TableExport( export_format=TableExport.CSV, table=table, exclude_columns=exclude_columns ) return exporter.response( filename=f'netbox_{self.queryset.model._meta.verbose_name_plural}.csv' )
1024adca72570f58ac899850c5ca66bf782ee528
14
object_views.py
147
Exclude actions column from export
77,639
0
184
84
33
264,189
43
netbox
22
netbox/netbox/views/generic/object_views.py
Python
15
{ "docstring": "\n Export all table data in CSV format.\n\n :param table: The Table instance to export\n :param columns: A list of specific columns to include. If not specified, all columns will be exported.\n ", "language": "en", "n_whitespaces": 60, "n_words": 31, "vocab_size": 27 }
https://github.com/netbox-community/netbox.git
1
euler_poly
def euler_poly(n, x=None, polys=False): return appell_poly(n, [[1], [1, QQ(-1,2)]], 1, lambda p, i: -p / 2, QQ, x, polys) @public
e875bdb804b0285e4a9bd8de0158436e792c03cb
@public
12
appellseqs.py
81
Initial definition of Appell sequences
49,296
1
25
55
20
199,618
20
sympy
9
sympy/polys/appellseqs.py
Python
2
{ "docstring": "Generates the Euler polynomial of degree `n` in `x`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 67, "n_words": 35, "vocab_size": 29 }
https://github.com/sympy/sympy.git
1
test_retention_event_purged_without_state_event
def test_retention_event_purged_without_state_event(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) self._test_retention_event_purged(room_id, one_day_ms * 2)
1901cb1d4a8b7d9af64493fbd336e9aa2561c20c
10
test_retention.py
58
Add type hints to `tests/rest/client` (#12084)
71,470
0
33
36
12
247,060
12
synapse
10
tests/rest/client/test_retention.py
Python
6
{ "docstring": "Tests that expired events are correctly purged when the room's retention policy\n is defined by the server's configuration's default retention policy.\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 19 }
https://github.com/matrix-org/synapse.git
3
test_reconnect
async def test_reconnect(hass, caplog, config): patch_key, entity_id, config_entry = _setup(config) config_entry.add_to_hass(hass) with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[ patch_key ], patchers.patch_shell(SHELL_RESPONSE_OFF)[ patch_key ], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER: assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() await async_update_entity(hass, entity_id) state = hass.states.get(entity_id) assert state is not None assert state.state == STATE_OFF caplog.clear() caplog.set_level(logging.WARNING) with patchers.patch_connect(False)[patch_key], patchers.patch_shell(error=True)[ patch_key ], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER: for _ in range(5): await async_update_entity(hass, entity_id) state = hass.states.get(entity_id) assert state is not None assert state.state == STATE_UNAVAILABLE assert len(caplog.record_tuples) == 2 assert caplog.record_tuples[0][1] == logging.ERROR assert caplog.record_tuples[1][1] == logging.WARNING caplog.set_level(logging.DEBUG) with patchers.patch_connect(True)[patch_key], patchers.patch_shell( SHELL_RESPONSE_STANDBY )[patch_key], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER: await async_update_entity(hass, entity_id) state = hass.states.get(entity_id) assert state is not None assert state.state == STATE_STANDBY if patch_key == "python": assert ( "ADB connection to 127.0.0.1:5555 successfully established" in caplog.record_tuples[2] ) else: assert ( "ADB connection to 127.0.0.1:5555 via ADB server 127.0.0.1:5037 successfully established" in caplog.record_tuples[2] ) @pytest.mark.parametrize( "config", [ CONFIG_ANDROIDTV_PYTHON_ADB, CONFIG_FIRETV_PYTHON_ADB, CONFIG_ANDROIDTV_ADB_SERVER, CONFIG_FIRETV_ADB_SERVER, ], )
d645e80ccd5acb92c5ee6bce30c20bc634fc3e77
@pytest.mark.parametrize( "config", [ CONFIG_ANDROIDTV_PYTHON_ADB, CONFIG_FIRETV_PYTHON_ADB, CONFIG_ANDROIDTV_ADB_SERVER, CONFIG_FIRETV_ADB_SERVER, ], )
13
test_media_player.py
536
Clean up async_update_entity helper usage (#68641)
93,474
1
455
320
78
294,438
145
core
47
tests/components/androidtv/test_media_player.py
Python
45
{ "docstring": "Test that the error and reconnection attempts are logged correctly.\n\n \"Handles device/service unavailable. Log a warning once when\n unavailable, log once when reconnected.\"\n\n https://developers.home-assistant.io/docs/en/integration_quality_scale_index.html\n ", "language": "en", "n_whitespaces": 36, "n_words": 24, "vocab_size": 22 }
https://github.com/home-assistant/core.git
1
test_missing_required_field
def test_missing_required_field(self): cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True) cf3.save() cf3.content_types.set([ContentType.objects.get_for_model(Site)]) site = Site(name='Test Site', slug='test-site') # Set custom field data with a required field omitted site.custom_field_data['foo'] = 'abc' with self.assertRaises(ValidationError): site.clean() site.custom_field_data['baz'] = 'def' site.clean()
ea6d86e6c4bb6037465410db6205a7471bc81a6c
11
test_customfields.py
165
Closes #10052: The cf attribute now returns deserialized custom field data
78,274
0
115
92
28
266,037
34
netbox
22
netbox/extras/tests/test_customfields.py
Python
10
{ "docstring": "\n Check that a ValidationError is raised if any required custom fields are not present.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
https://github.com/netbox-community/netbox.git
5
get_account_type_based_gl_data
def get_account_type_based_gl_data(company, start_date, end_date, account_type, filters=None): cond = "" filters = frappe._dict(filters or {}) if filters.include_default_book_entries: company_fb = frappe.db.get_value("Company", company, "default_finance_book") cond = % ( frappe.db.escape(filters.finance_book), frappe.db.escape(company_fb), ) else: cond = " AND (finance_book in (%s, '') OR finance_book IS NULL)" % ( frappe.db.escape(cstr(filters.finance_book)) ) gl_sum = frappe.db.sql_list( .format( cond=cond ), (company, start_date, end_date, account_type), ) return gl_sum[0] if gl_sum and gl_sum[0] else 0
494bd9ef78313436f0424b918f200dab8fc7c20b
16
cash_flow.py
214
style: format code with black
13,820
0
45
137
48
65,186
64
erpnext
19
erpnext/accounts/report/cash_flow/cash_flow.py
Python
27
{ "docstring": " AND (finance_book in (%s, %s, '') OR finance_book IS NULL)\n\t\t\t\n\t\tselect sum(credit) - sum(debit)\n\t\tfrom `tabGL Entry`\n\t\twhere company=%s and posting_date >= %s and posting_date <= %s\n\t\t\tand voucher_type != 'Period Closing Voucher'\n\t\t\tand account in ( SELECT name FROM tabAccount WHERE account_type = %s) {cond}\n\t", "language": "en", "n_whitespaces": 41, "n_words": 46, "vocab_size": 40 }
https://github.com/frappe/erpnext.git
7
get_customer_stats
def get_customer_stats(filters, tree_view=False): company_condition = "" if filters.get("company"): company_condition = " and company=%(company)s" customers = [] customers_in = {} for si in frappe.db.sql( .format( company_condition=company_condition ), filters, as_dict=1, ): key = si.territory if tree_view else si.posting_date.strftime("%Y-%m") new_or_repeat = "new" if si.customer not in customers else "repeat" customers_in.setdefault(key, {"new": [0, 0.0], "repeat": [0, 0.0]}) # if filters.from_date <= si.posting_date.strftime('%Y-%m-%d'): if getdate(filters.from_date) <= getdate(si.posting_date): customers_in[key][new_or_repeat][0] += 1 customers_in[key][new_or_repeat][1] += si.base_grand_total if new_or_repeat == "new": customers.append(si.customer) return customers_in
494bd9ef78313436f0424b918f200dab8fc7c20b
13
customer_acquisition_and_loyalty.py
272
style: format code with black
14,515
0
52
170
56
67,399
75
erpnext
24
erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py
Python
24
{ "docstring": "Calculates number of new and repeated customers and revenue.select territory, posting_date, customer, base_grand_total from `tabSales Invoice`\n\t\twhere docstatus=1 and posting_date <= %(to_date)s\n\t\t{company_condition} order by posting_date", "language": "en", "n_whitespaces": 23, "n_words": 26, "vocab_size": 23 }
https://github.com/frappe/erpnext.git
2
_safe_read
def _safe_read(self, amt): data = self.fp.read(amt) if len(data) < amt: raise IncompleteRead(data, amt-len(data)) return data
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
client.py
65
add python 3.10.4 for windows
54,889
0
54
40
14
217,703
15
XX-Net
8
python3.10.4/Lib/http/client.py
Python
5
{ "docstring": "Read the number of bytes requested.\n\n This function should be used when <amt> bytes \"should\" be present for\n reading. If the bytes are truly not available (due to EOF), then the\n IncompleteRead exception can be used to detect the problem.\n ", "language": "en", "n_whitespaces": 68, "n_words": 40, "vocab_size": 31 }
https://github.com/XX-net/XX-Net.git
2
cell_length
def cell_length(self) -> int: # Done on demand and cached, as this is an O(n) operation if self._cell_length is None: self._cell_length = Segment.get_line_length(self._segments) return self._cell_length
6f82ad9c4a2e17812a68d3c76d7eae89aee3a515
11
strip.py
53
adds Strip primitive
45,438
0
64
31
22
186,299
25
textual
7
src/textual/strip.py
Python
5
{ "docstring": "Get the number of cells required to render this object.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/Textualize/textual.git
1
add_metadata_summerizer
def add_metadata_summerizer(): docs = [ Document( content=, meta={ "sub_content": "Pegasus Example", "topic": "California's Electricity", "context": "Dummy - PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires.", }, ), Document( content=, meta={"sub_content": "Paris best tour best tour", "topic": "Eiffel tower"}, ), ] # Original input is overwrote after the "predict". So adding the same input as check_output to assess the output check_output = deepcopy(docs) summarizer = TransformersSummarizer(model_name_or_path="google/pegasus-xsum") summary = summarizer.predict(documents=docs) assert len(summary[0].meta) == len(check_output[0].meta) assert len(summary[1].meta) - 1 == len(check_output[1].meta) assert ( summary[0].meta["context"] == ) summary = summarizer.predict(documents=docs, generate_single_summary=True) assert len(summary) == 1 assert not summary[0].meta # Metadata is not returned in case of a single summary
4d8f40425bc4e7346359b7609720a50ac10b8af9
13
test_summarizer.py
273
Passing the meta-data in the summerizer response (#2179) * Passing the all the meta-data in the summerizer * Disable metadata forwarding if `generate_single_summary` is `True` * Update Documentation & Code Style * simplify tests * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
75,094
0
304
162
88
257,543
122
haystack
15
test/nodes/test_summarizer.py
Python
27
{ "docstring": "PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", "language": "en", "n_whitespaces": 221, "n_words": 222, "vocab_size": 117 }
https://github.com/deepset-ai/haystack.git
1
_draw_linenumber
def _draw_linenumber(self, posno, lineno): self._draw_text( self._get_linenumber_pos(posno), str(lineno).rjust(self.line_number_chars), font=self.fonts.get_font(self.line_number_bold, self.line_number_italic), text_fg=self.line_number_fg, text_bg=None, )
f3166e673fe8d40277b804d35d77dcdb760fc3b3
11
img.py
91
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,336
0
124
61
12
20,345
12
pipenv
17
pipenv/patched/notpip/_vendor/pygments/formatters/img.py
Python
9
{ "docstring": "\n Remember a line number drawable to paint later.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/pypa/pipenv.git
12
qr
def qr(a, mode='reduced'): if mode not in ('reduced', 'complete', 'r', 'raw'): if mode in ('f', 'full'): # 2013-04-01, 1.8 msg = "".join(( "The 'full' option is deprecated in favor of 'reduced'.\n", "For backward compatibility let mode default.")) warnings.warn(msg, DeprecationWarning, stacklevel=3) mode = 'reduced' elif mode in ('e', 'economic'): # 2013-04-01, 1.8 msg = "The 'economic' option is deprecated." warnings.warn(msg, DeprecationWarning, stacklevel=3) mode = 'economic' else: raise ValueError(f"Unrecognized mode '{mode}'") a, wrap = _makearray(a) _assert_stacked_2d(a) m, n = a.shape[-2:] t, result_t = _commonType(a) a = a.astype(t, copy=True) a = _to_native_byte_order(a) mn = min(m, n) if m <= n: gufunc = _umath_linalg.qr_r_raw_m else: gufunc = _umath_linalg.qr_r_raw_n signature = 'D->D' if isComplexType(t) else 'd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_qr) tau = gufunc(a, signature=signature, extobj=extobj) # handle modes that don't return q if mode == 'r': r = triu(a[..., :mn, :]) r = r.astype(result_t, copy=False) return wrap(r) if mode == 'raw': q = transpose(a) q = q.astype(result_t, copy=False) tau = tau.astype(result_t, copy=False) return wrap(q), tau if mode == 'economic': a = a.astype(result_t, copy=False) return wrap(a) # mc is the number of columns in the resulting q # matrix. If the mode is complete then it is # same as number of rows, and if the mode is reduced, # then it is the minimum of number of rows and columns. if mode == 'complete' and m > n: mc = m gufunc = _umath_linalg.qr_complete else: mc = mn gufunc = _umath_linalg.qr_reduced signature = 'DD->D' if isComplexType(t) else 'dd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_qr) q = gufunc(a, tau, signature=signature, extobj=extobj) r = triu(a[..., :mc, :]) q = q.astype(result_t, copy=False) r = r.astype(result_t, copy=False) return wrap(q), wrap(r) # Eigenvalues @array_function_dispatch(_unary_dispatcher)
7dd53bce20121b3818a5960371dfdd1f138296bd
@array_function_dispatch(_unary_dispatcher)
14
linalg.py
694
DOC: Update linalg.qr docstring with numerically stable example (#21149) Co-authored-by: Melissa Weber Mendonça <[email protected]>
38,478
1
625
407
130
160,078
270
numpy
43
numpy/linalg/linalg.py
Python
53
{ "docstring": "\n Compute the qr factorization of a matrix.\n\n Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is\n upper-triangular.\n\n Parameters\n ----------\n a : array_like, shape (..., M, N)\n An array-like object with the dimensionality of at least 2.\n mode : {'reduced', 'complete', 'r', 'raw'}, optional\n If K = min(M, N), then\n\n * 'reduced' : returns q, r with dimensions\n (..., M, K), (..., K, N) (default)\n * 'complete' : returns q, r with dimensions (..., M, M), (..., M, N)\n * 'r' : returns r only with dimensions (..., K, N)\n * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)\n\n The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,\n see the notes for more information. The default is 'reduced', and to\n maintain backward compatibility with earlier versions of numpy both\n it and the old default 'full' can be omitted. Note that array h\n returned in 'raw' mode is transposed for calling Fortran. The\n 'economic' mode is deprecated. The modes 'full' and 'economic' may\n be passed using only the first letter for backwards compatibility,\n but all others must be spelled out. See the Notes for more\n explanation.\n\n\n Returns\n -------\n q : ndarray of float or complex, optional\n A matrix with orthonormal columns. When mode = 'complete' the\n result is an orthogonal/unitary matrix depending on whether or not\n a is real/complex. The determinant may be either +/- 1 in that\n case. In case the number of dimensions in the input array is\n greater than 2 then a stack of the matrices with above properties\n is returned.\n r : ndarray of float or complex, optional\n The upper-triangular matrix or a stack of upper-triangular\n matrices if the number of dimensions in the input array is greater\n than 2.\n (h, tau) : ndarrays of np.double or np.cdouble, optional\n The array h contains the Householder reflectors that generate q\n along with r. The tau array contains scaling factors for the\n reflectors. In the deprecated 'economic' mode only h is returned.\n\n Raises\n ------\n LinAlgError\n If factoring fails.\n\n See Also\n --------\n scipy.linalg.qr : Similar function in SciPy.\n scipy.linalg.rq : Compute RQ decomposition of a matrix.\n\n Notes\n -----\n This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,\n ``dorgqr``, and ``zungqr``.\n\n For more information on the qr factorization, see for example:\n https://en.wikipedia.org/wiki/QR_factorization\n\n Subclasses of `ndarray` are preserved except for the 'raw' mode. So if\n `a` is of type `matrix`, all the return values will be matrices too.\n\n New 'reduced', 'complete', and 'raw' options for mode were added in\n NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In\n addition the options 'full' and 'economic' were deprecated. Because\n 'full' was the previous default and 'reduced' is the new default,\n backward compatibility can be maintained by letting `mode` default.\n The 'raw' option was added so that LAPACK routines that can multiply\n arrays by q using the Householder reflectors can be used. Note that in\n this case the returned arrays are of type np.double or np.cdouble and\n the h array is transposed to be FORTRAN compatible. No routines using\n the 'raw' return are currently exposed by numpy, but some are available\n in lapack_lite and just await the necessary work.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6)\n >>> q, r = np.linalg.qr(a)\n >>> np.allclose(a, np.dot(q, r)) # a does equal qr\n True\n >>> r2 = np.linalg.qr(a, mode='r')\n >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'\n True\n >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input\n >>> q, r = np.linalg.qr(a)\n >>> q.shape\n (3, 2, 2)\n >>> r.shape\n (3, 2, 2)\n >>> np.allclose(a, np.matmul(q, r))\n True\n\n Example illustrating a common use of `qr`: solving of least squares\n problems\n\n What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for\n the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points\n and you'll see that it should be y0 = 0, m = 1.) The answer is provided\n by solving the over-determined matrix equation ``Ax = b``, where::\n\n A = array([[0, 1], [1, 1], [1, 1], [2, 1]])\n x = array([[y0], [m]])\n b = array([[1], [0], [2], [1]])\n\n If A = qr such that q is orthonormal (which is always possible via\n Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,\n however, we simply use `lstsq`.)\n\n >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])\n >>> A\n array([[0, 1],\n [1, 1],\n [1, 1],\n [2, 1]])\n >>> b = np.array([1, 2, 2, 3])\n >>> q, r = np.linalg.qr(A)\n >>> p = np.dot(q.T, b)\n >>> np.dot(np.linalg.inv(r), p)\n array([ 1., 1.])\n\n ", "language": "en", "n_whitespaces": 1269, "n_words": 761, "vocab_size": 356 }
https://github.com/numpy/numpy.git
3
members
def members(self) -> list[ZHAGroupMember]: return [ ZHAGroupMember(self, self._zha_gateway.devices[member_ieee], endpoint_id) for (member_ieee, endpoint_id) in self._zigpy_group.members.keys() if member_ieee in self._zha_gateway.devices ]
fb108533580d5f4c326ca970d8e6fd4998cc5593
11
group.py
80
Fix mypy issues in zha core modules (#74028) * Fix mypy issues in zha gateway, group and helpers * Cleanup device * Apply suggestion * Raise ValueError * Use hass.config.path
113,184
0
73
53
17
314,578
19
core
10
homeassistant/components/zha/core/group.py
Python
7
{ "docstring": "Return the ZHA devices that are members of this group.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
get_dummy_message
def get_dummy_message(doc): return frappe.render_template( , dict(doc=doc, payment_url="{{ payment_url }}"), ) @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
11
payment_request.py
51
style: format code with black
13,757
1
5
24
11
64,929
11
erpnext
7
erpnext/accounts/doctype/payment_request/payment_request.py
Python
17
{ "docstring": "{% if doc.contact_person -%}\n<p>Dear {{ doc.contact_person }},</p>\n{%- else %}<p>Hello,</p>{% endif %}\n\n<p>{{ _(\"Requesting payment against {0} {1} for amount {2}\").format(doc.doctype,\n\tdoc.name, doc.get_formatted(\"grand_total\")) }}</p>\n\n<a href=\"{{ payment_url }}\">{{ _(\"Make Payment\") }}</a>\n\n<p>{{ _(\"If you have any questions, please get back to us.\") }}</p>\n\n<p>{{ _(\"Thank you for your business!\") }}</p>\n", "language": "en", "n_whitespaces": 43, "n_words": 51, "vocab_size": 44 }
https://github.com/frappe/erpnext.git
4
call_find
def call_find(self, other_args): parser = argparse.ArgumentParser( prog="find", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-c", "--coin", help="Symbol Name or Id of Coin", dest="coin", required="-h" not in other_args, type=str, ) parser.add_argument( "-k", "--key", dest="key", help="Specify by which column you would like to search: symbol, name, id", type=str, choices=FIND_KEYS, default="symbol", ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Number of records to display", type=check_positive, ) parser.add_argument( "--source", dest="source", choices=CRYPTO_SOURCES.keys(), default="cg", help="Source of data.", type=str, ) parser.add_argument( "-s", "--skip", default=0, dest="skip", help="Skip n of records", type=check_positive, ) if other_args and not other_args[0][0] == "-": other_args.insert(0, "-c") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) # TODO: merge find + display_all_coins if ns_parser: find( coin=ns_parser.coin, source=ns_parser.source, key=ns_parser.key, top=ns_parser.limit, export=ns_parser.export, ) display_all_coins( coin=ns_parser.coin, source=ns_parser.source, top=ns_parser.limit, skip=ns_parser.skip, show_all=bool("ALL" in other_args), export=ns_parser.export, )
4501dfd442d371150b8785d379c5354095b6954b
14
crypto_controller.py
446
Crypto features: Replace coingecko scrapping (#1156) * replaced cgcategories with api * added coingecko categories * refactoring commands to use api, added coins to cryptocontroller and merged find and coins * autocompletion for coins * removed unused vars * added dappradar features * refactoring commands position * refactoring commands position * adding visual commands and fixed report * skipped tests for now * lint notebook * correct report * black formatter keeps crying because notebook * removed unused imports * Fixed black * Keep kernel metadata 'cause it's required by papermill * Change jupyter cleanup hook to one based on nbconvert * Try fix the hook I just broke * Fix trailing commas in the crypto notebook * Change the jupyter hook to a one that's featured on pre-commit's page * Format report notebook and test new notebook hook * Black the notebook * Remove deleted functions from the crypto discovery API * Remove deleted functions from the crypto overview API * replaced print for console print and removed print from table * replaced print for console print and removed print from table * auto completion + sort for all discovery commands * replacing help messages * fix linting * added docs and removed unused commands * added todos and fixed help messages * lint * pr issues fixed * updated tests * tests merge * replaced with new rich table function Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]>
84,022
0
863
281
93
282,059
121
OpenBBTerminal
38
gamestonk_terminal/cryptocurrency/crypto_controller.py
Python
86
{ "docstring": "Process find command\n Find similar coin by coin name,symbol or id. If you don't remember exact name or id of the Coin at CoinGecko,\n Binance, Coinbase or CoinPaprika you can use this command to display coins with similar name, symbol or id\n to your search query.\n Example of usage: coin name is something like \"polka\". So I can try: find -c polka -k name -t 25\n It will search for coin that has similar name to polka and display top 25 matches.\n -c, --coin stands for coin - you provide here your search query\n -k, --key it's a searching key. You can search by symbol, id or name of coin\n -l, --limit it displays top N number of records.\n coins: Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of\n coin then in result you will see ids of coins with best match for all mentioned services.\n If you provide ALL keyword in your search query, then all coins will be displayed. To move over coins you\n can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins\n from 100 to 130 will be displayed. By default skip = 0, limit = 10.\n If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance).\n If you want to search only in given source then use --source flag. E.g. if you want to find coin with name\n uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10\n ", "language": "en", "n_whitespaces": 439, "n_words": 252, "vocab_size": 140 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
4
_needs_cache_invalidation
def _needs_cache_invalidation(self, command): invalidate = False cfg_cmds = [] try: # AnsiblePlugin base class in Ansible 2.9 does not have has_option() method. # TO-DO: use has_option() when we drop 2.9 support. cfg_cmds = self.cliconf.get_option("config_commands") except AttributeError: cfg_cmds = [] if (self._is_in_config_mode()) or (to_text(command) in cfg_cmds): invalidate = True return invalidate
76b746655a36807fa9198064ca9fe7c6cc00083a
11
network_cli.py
100
Add `use_rsa_sha2_algorithms` option for paramiko (#78789) Fixes #76737 Fixes #77673 Co-authored-by: Matt Clay <[email protected]>
79,529
0
154
57
37
268,500
50
ansible
10
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py
Python
10
{ "docstring": "\n This method determines if it is necessary to invalidate\n the existing cache based on whether the device has entered\n configuration mode or if the last command sent to the device\n is potentially capable of making configuration changes.\n\n :param command: The last command sent to the target device.\n :returns: A boolean indicating if cache invalidation is required or not.\n ", "language": "en", "n_whitespaces": 108, "n_words": 58, "vocab_size": 41 }
https://github.com/ansible/ansible.git
4
transform
def transform(self, X): if self.solver == "lsqr": raise NotImplementedError( "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')." ) check_is_fitted(self) X = self._validate_data(X, reset=False) if self.solver == "svd": X_new = np.dot(X - self.xbar_, self.scalings_) elif self.solver == "eigen": X_new = np.dot(X, self.scalings_) return X_new[:, : self._max_components]
ab08e4dba5f1f87b8c3395f32469a6ddb5e34f89
12
discriminant_analysis.py
147
DOC Add documentation on output shape of LDA.transform (#22238)
75,274
0
155
88
38
258,522
47
scikit-learn
14
sklearn/discriminant_analysis.py
Python
12
{ "docstring": "Project data to maximize class separation.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components) or \\\n (n_samples, min(rank, n_components))\n Transformed data. In the case of the 'svd' solver, the shape\n is (n_samples, min(rank, n_components)).\n ", "language": "en", "n_whitespaces": 139, "n_words": 46, "vocab_size": 34 }
https://github.com/scikit-learn/scikit-learn.git
2
get_object_with_snapshot
def get_object_with_snapshot(self): obj = super().get_object() if hasattr(obj, 'snapshot'): obj.snapshot() return obj
efd5a73a187f1183a278595d7345046abee5800b
10
__init__.py
55
Refactor API views
77,754
0
50
30
10
264,557
11
netbox
7
netbox/netbox/api/viewsets/__init__.py
Python
5
{ "docstring": "\n Save a pre-change snapshot of the object immediately after retrieving it. This snapshot will be used to\n record the \"before\" data in the changelog.\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 21 }
https://github.com/netbox-community/netbox.git
1
prepare_all_coins_df
def prepare_all_coins_df() -> pd.DataFrame: gecko_coins_df = load_coins_list("coingecko_coins.json") paprika_coins_df = load_coins_list("coinpaprika_coins.json") paprika_coins_df = paprika_coins_df[paprika_coins_df["is_active"]] paprika_coins_df = paprika_coins_df[["rank", "id", "name", "symbol", "type"]] yahoofinance_coins_df = load_coins_list("yahoofinance_coins.json") # TODO: Think about scheduled job, that once a day will update data binance_coins_df = load_binance_map().rename(columns={"symbol": "Binance"}) coinbase_coins_df = load_coinbase_map().rename(columns={"symbol": "Coinbase"}) gecko_paprika_coins_df = pd.merge( gecko_coins_df, paprika_coins_df, on="name", how="left" ) df_merged = pd.merge( left=gecko_paprika_coins_df, right=binance_coins_df, left_on="id_x", right_on="id", how="left", ) df_merged.rename( columns={ "id_x": "CoinGecko", "symbol_x": "Symbol", "id_y": "CoinPaprika", }, inplace=True, ) df_merged = pd.merge( left=df_merged, right=coinbase_coins_df, left_on="CoinGecko", right_on="id", how="left", ) yahoofinance_coins_df.rename( columns={ "symbol": "Symbol", }, inplace=True, ) df_merged = pd.merge( left=df_merged, right=yahoofinance_coins_df[["Symbol", "id"]], on="Symbol", how="left", ) df_merged.rename( columns={ "id": "YahooFinance", }, inplace=True, ) return df_merged[ ["CoinGecko", "CoinPaprika", "Binance", "Coinbase", "YahooFinance", "Symbol"] ]
9923c6974cdb659164d7aefd5523de4bfd563553
12
cryptocurrency_helpers.py
464
Add YahooFinance to crypto load (#1533) * Add yf to crypto load * silence pylint too many branches * Add yf chart * Add exception handling when coins are not found * Fix tests failing * Address PR Comments * Change the backwards yf crypto chart and volume label * fix gst file Co-authored-by: jmaslek <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]>
84,446
0
418
265
76
283,133
113
OpenBBTerminal
23
gamestonk_terminal/cryptocurrency/cryptocurrency_helpers.py
Python
71
{ "docstring": "Helper method which loads coins from all sources: CoinGecko, CoinPaprika,\n Binance, Yahoo Finance and merge those coins on keys:\n\n CoinGecko - > name < - CoinPaprika\n CoinGecko - > id <- Binance\n\n Returns\n -------\n pd.DataFrame\n CoinGecko - id for coin in CoinGecko API: uniswap\n CoinPaprika - id for coin in CoinPaprika API: uni-uniswap\n Binance - symbol (baseAsset) for coin in Binance API: UNI\n Coinbase - symbol for coin in Coinbase Pro API e.g UNI\n Yahoo Finance - symbol for coin in Yahoo Finance e.g. UNI1-USD\n\n Symbol: uni\n ", "language": "en", "n_whitespaces": 158, "n_words": 87, "vocab_size": 47 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
_get_autoscaling_config_with_overrides
def _get_autoscaling_config_with_overrides() -> dict: config = _get_basic_autoscaling_config() config["available_node_types"]["small-group"]["resources"]["memory"] = 300000000 config["available_node_types"]["small-group"]["resources"]["GPU"] = 100 config["available_node_types"]["small-group"]["resources"]["CPU"] = 100 config["available_node_types"]["gpu-group"]["resources"]["GPU"] = 100 return config
7d3ceb222c8af98a5c101b1c28ab37ffcb0a3793
11
test_autoscaling_config.py
143
[kuberay][autoscaler] Improve CPU, GPU, and memory detection. (#26219) This PR improves the autoscaler's resource detection logic
27,528
0
42
74
14
124,153
21
ray
4
python/ray/tests/kuberay/test_autoscaling_config.py
Python
8
{ "docstring": "Autoscaling config with memory and gpu annotations.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/ray-project/ray.git
1
test_launcher_ensures_stdio
def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', ]) self.assertEqual(cp.returncode, 0)
6604e0d015fbd7a3e5602a6f3831d786b4ed659d
10
check_build.py
71
Fix regression in 0.26.0 that caused launching kitty without working STDIO handles to result in high CPU usage and prewarming failing Fixes #5444
21,711
0
52
42
16
103,727
18
kitty
11
kitty_tests/check_build.py
Python
15
{ "docstring": "\\\nimport os, sys\nif sys.stdin:\n os.close(sys.stdin.fileno())\nif sys.stdout:\n os.close(sys.stdout.fileno())\nif sys.stderr:\n os.close(sys.stderr.fileno())\nos.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)')\n", "language": "en", "n_whitespaces": 37, "n_words": 34, "vocab_size": 26 }
https://github.com/kovidgoyal/kitty.git
2
_get_device_coords
def _get_device_coords(self, position, height): x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH y = RACK_ELEVATION_BORDER_WIDTH if self.rack.desc_units: y += int((position - 1) * self.unit_height) else: y += int((self.rack.u_height - position + 1) * self.unit_height) - int(height * self.unit_height) return x, y
0c915f7de9612c7485da3713cc6d63f368698a5d
18
svg.py
121
Clean up rack elevation rendering
77,991
0
102
76
24
265,105
38
netbox
13
netbox/dcim/svg.py
Python
8
{ "docstring": "\n Return the X, Y coordinates of the top left corner for a device in the specified rack unit.\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 16 }
https://github.com/netbox-community/netbox.git
1
test_notset_idval
def test_notset_idval(self) -> None: assert IdMaker([], [], None, None, None, None)._idval(NOTSET, "a", 0) == "a0"
b21b008118fc8cf65b4bcd9b059f1cd704e05c68
11
metafunc.py
57
Refactor idmaker functions into class IdMaker This commit only refactors, it does not change or add functionality yet. Public API is retained. Reason or refactoring: User provided parameter IDs (e.g. Metafunc.parametrize(ids=...)) had so far only been used to calculate a unique test ID for each test invocation. That test ID was a joined string where each parameter contributed some partial ID. We're soon going to reuse functionality to generate parameter keys for reorder_items and FixtureDef cache. We will be interested in the partial IDs, and only if they originate from explicit user information. Refactoring makes logic and data accessible for reuse, and increases cohesion in general.
46,371
0
29
36
13
190,662
15
pytest
5
testing/python/metafunc.py
Python
7
{ "docstring": "Test that a NOTSET value (used by an empty parameterset) generates\n a proper ID.\n\n Regression test for #7686.\n ", "language": "en", "n_whitespaces": 39, "n_words": 18, "vocab_size": 17 }
https://github.com/pytest-dev/pytest.git
1
fork_env
def fork_env(self, prev_eid, eid): assert isstr(prev_eid), "prev_eid should be a string" assert isstr(eid), "eid should be a string" return self._send(msg={"prev_eid": prev_eid, "eid": eid}, endpoint="fork_env")
5b8b7f267cfaf76a2a39a727ef31a62b3909a093
11
__init__.py
77
apply black py to all python files
22,475
0
52
45
18
106,852
24
visdom
8
py/visdom/__init__.py
Python
4
{ "docstring": "This function allows the user to fork environments.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/fossasia/visdom.git
3
update_event_summary
def update_event_summary(self): avail_resources = self.load_metrics.resources_avail_summary() if not self.readonly_config and avail_resources != self.last_avail_resources: self.event_summarizer.add( "Resized to {}.", # e.g., Resized to 100 CPUs, 4 GPUs. quantity=avail_resources, aggregate=lambda old, new: new, ) self.last_avail_resources = avail_resources
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
12
monitor.py
89
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,289
0
133
54
29
130,510
33
ray
13
python/ray/autoscaler/_private/monitor.py
Python
9
{ "docstring": "Report the current size of the cluster.\n\n To avoid log spam, only cluster size changes (CPU or GPU count change)\n are reported to the event summarizer. The event summarizer will report\n only the latest cluster size per batch.\n ", "language": "en", "n_whitespaces": 66, "n_words": 38, "vocab_size": 30 }
https://github.com/ray-project/ray.git
8
get_formatter_for_filename
def get_formatter_for_filename(fn, **options): fn = basename(fn) for modname, name, _, filenames, _ in FORMATTERS.values(): for filename in filenames: if _fn_matches(fn, filename): if name not in _formatter_cache: _load_formatters(modname) return _formatter_cache[name](**options) for cls in find_plugin_formatters(): for filename in cls.filenames: if _fn_matches(fn, filename): return cls(**options) raise ClassNotFound("no formatter found for file name %r" % fn)
f3166e673fe8d40277b804d35d77dcdb760fc3b3
15
__init__.py
155
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,311
0
167
99
37
20,288
52
pipenv
17
pipenv/patched/notpip/_vendor/pygments/formatters/__init__.py
Python
13
{ "docstring": "Lookup and instantiate a formatter by filename pattern.\n\n Raises ClassNotFound if not found.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
https://github.com/pypa/pipenv.git
4
get_work_queues
async def get_work_queues(self) -> Iterator[WorkQueue]: for name in self.work_queues: try: work_queue = await self.client.read_work_queue_by_name(name) except ObjectNotFound: # if the work queue wasn't found, create it try: work_queue = await self.client.create_work_queue(name=name) # if creating it raises an exception, it was probably just # created by some other agent; rather than entering a re-read # loop with new error handling, we log the exception and # continue. except Exception as exc: self.logger.exception(exc) continue yield work_queue
ab657b5b3e2235e836ef017d9c58d580e1c254c6
17
agent.py
116
Handle errors gracefully and yield queues
11,764
0
297
65
59
58,397
73
prefect
15
src/prefect/agent.py
Python
15
{ "docstring": "\n Loads the work queue objects corresponding to the agent's target work\n queues. If any of them don't exist, they are created.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
https://github.com/PrefectHQ/prefect.git
2
cancel
def cancel(self, msg=None): self.__log_traceback = False if self._state != _PENDING: return False self._state = _CANCELLED self._cancel_message = msg self.__schedule_callbacks() return True
8198943edd73a363c266633e1aa5b2a9e9c9f526
7
futures.py
68
add python 3.10.4 for windows
56,036
0
81
41
16
220,529
21
XX-Net
9
python3.10.4/Lib/asyncio/futures.py
Python
8
{ "docstring": "Cancel the future and schedule callbacks.\n\n If the future is already done or cancelled, return False. Otherwise,\n change the future's state to cancelled, schedule the callbacks and\n return True.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 21 }
https://github.com/XX-net/XX-Net.git
1
test_fed_caching
def test_fed_caching(self): fed_hostname = self.hs.hostname + "2" fed_subspace = "#space:" + fed_hostname fed_room = "#room:" + fed_hostname # Add a room to the space which is on another server. self._add_child(self.space, fed_subspace, self.token, via=[fed_hostname]) federation_requests = 0
af13a3be29dd2d84d9255f8e613ca70c16819436
9
test_room_summary.py
83
Fix a bug that corrupted the cache of federated space hierarchies (#11775) `FederationClient.get_room_hierarchy()` caches its return values, so refactor the code to avoid modifying the returned room summary.
70,985
0
85
238
29
246,073
36
synapse
12
tests/handlers/test_room_summary.py
Python
33
{ "docstring": "\n Federation `/hierarchy` responses should be cached.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/matrix-org/synapse.git