complexity
int64 1
139
| fun_name
stringlengths 1
80
| code
stringlengths 101
62.2k
| commit_id
stringlengths 40
40
| ast_errors
stringlengths 0
3.11k
| ast_levels
int64 6
36
| file_name
stringlengths 5
79
| n_ast_nodes
int64 17
19.2k
| commit_message
stringlengths 3
15.3k
| d_id
int64 12
121k
| n_ast_errors
int64 0
9
| n_whitespaces
int64 4
10.8k
| token_counts
int64 5
3.06k
| vocab_size
int64 4
1.11k
| id
int64 20
338k
| n_words
int64 4
4.82k
| repo
stringlengths 3
22
| n_identifiers
int64 2
176
| path
stringlengths 7
134
| language
stringclasses 1
value | nloc
int64 1
413
| documentation
dict | url
stringlengths 31
59
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4 | spatial_3d_padding | def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError("Unknown data_format: " + str(data_format))
if data_format == "channels_first":
pattern = [
[0, 0],
[0, 0],
[padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]],
[padding[2][0], padding[2][1]],
]
else:
pattern = [
[0, 0],
[padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]],
[padding[2][0], padding[2][1]],
[0, 0],
]
return tf.compat.v1.pad(x, pattern)
@keras_export("keras.backend.stack")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | @keras_export("keras.backend.stack")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs | 13 | backend.py | 396 | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 80,148 | 1 | 262 | 259 | 50 | 269,517 | 83 | keras | 19 | keras/backend.py | Python | 49 | {
"docstring": "Pads 5D tensor with zeros along the depth, height, width dimensions.\n\n Pads these dimensions with respectively\n \"padding[0]\", \"padding[1]\" and \"padding[2]\" zeros left and right.\n\n For 'channels_last' data_format,\n the 2nd, 3rd and 4th dimension will be padded.\n For 'channels_first' data_format,\n the 3rd, 4th and 5th dimension will be padded.\n\n Args:\n x: Tensor or variable.\n padding: Tuple of 3 tuples, padding pattern.\n data_format: One of `channels_last` or `channels_first`.\n\n Returns:\n A padded 5D tensor.\n\n Raises:\n ValueError: if `data_format` is neither\n `channels_last` or `channels_first`.\n\n ",
"language": "en",
"n_whitespaces": 156,
"n_words": 80,
"vocab_size": 59
} | https://github.com/keras-team/keras.git |
2 | list_local | def list_local():
result = []
for dist_name in get_hub_packages_dir().glob(r'*/v*.dist-info'):
result.append(dist_name)
return result
| a5fd192b186c66aa983137ca3a179caac7f6b786 | 9 | hubapi.py | 53 | feat: optimize create hub root (#4388) | 2,043 | 0 | 31 | 30 | 11 | 11,454 | 12 | jina | 6 | jina/hubble/hubapi.py | Python | 5 | {
"docstring": "List the locally-available executor packages.\n\n :return: the list of local executors (if found)\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 12
} | https://github.com/jina-ai/jina.git |
|
1 | test_render_mention_stream_api | def test_render_mention_stream_api(self) -> None:
content = "This mentions #**Denmark** and @**King Hamlet**."
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
response_dict = self.assert_json_success(result)
user_id = self.example_user("hamlet").id
stream_id = get_stream("Denmark", get_realm("zulip")).id
self.assertEqual(
response_dict["rendered"],
f'<p>This mentions <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-Denmark">#Denmark</a> and <span class="user-mention" data-user-id="{user_id}">@King Hamlet</span>.</p>',
)
| a142fbff85302c5e3acb2e204eca2e9c75dbc74b | 12 | test_markdown.py | 149 | tests: Refactor away result.json() calls with helpers.
Signed-off-by: Zixuan James Li <[email protected]> | 17,769 | 0 | 161 | 78 | 36 | 84,081 | 43 | zulip | 15 | zerver/tests/test_markdown.py | Python | 15 | {
"docstring": "Determines whether we're correctly passing the realm context",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | https://github.com/zulip/zulip.git |
|
4 | strip_accents_unicode | def strip_accents_unicode(s):
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
| c925b7e24c4b58e7191eec935d724e23d351c58b | 15 | text.py | 97 | DOC Ensures that strip_accents_unicode passes numpydoc (#24232)
Co-authored-by: Thomas J. Fan <[email protected]> | 76,422 | 0 | 95 | 54 | 40 | 260,694 | 44 | scikit-learn | 11 | sklearn/feature_extraction/text.py | Python | 7 | {
"docstring": "Transform accentuated unicode symbols into their simple counterpart.\n\n Warning: the python-level loop and join operations make this\n implementation 20 times slower than the strip_accents_ascii basic\n normalization.\n\n Parameters\n ----------\n s : str\n The string to strip.\n\n Returns\n -------\n s : str\n The stripped string.\n\n See Also\n --------\n strip_accents_ascii : Remove accentuated char for any unicode symbol that\n has a direct ASCII equivalent.\n ",
"language": "en",
"n_whitespaces": 121,
"n_words": 61,
"vocab_size": 52
} | https://github.com/scikit-learn/scikit-learn.git |
|
1 | test_default_float_converter_exception | def test_default_float_converter_exception(self):
c = TextIO("qrs tuv") # Invalid values for default float converter
with pytest.raises(ValueError,
match="could not convert string 'qrs' to float64"):
np.loadtxt(c)
| b8c82404855d317a9ac77b4743d3db39f009c6aa | 11 | test_io.py | 58 | TST: Fixup current loadtxt tests for changes | 38,416 | 0 | 71 | 30 | 23 | 159,748 | 23 | numpy | 10 | numpy/lib/tests/test_io.py | Python | 5 | {
"docstring": "\n Ensure that the exception message raised during failed floating point\n conversion is correct. Regression test related to gh-19598.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 18
} | https://github.com/numpy/numpy.git |
|
5 | _check_axes_shape | def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=None):
from pandas.plotting._matplotlib.tools import flatten_axes
if figsize is None:
figsize = (6.4, 4.8)
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
assert len(visible_axes) == axes_num
for ax in visible_axes:
# check something drawn on visible axes
assert len(ax.get_children()) > 0
if layout is not None:
result = self._get_axes_layout(flatten_axes(axes))
assert result == layout
tm.assert_numpy_array_equal(
visible_axes[0].figure.get_size_inches(),
np.array(figsize, dtype=np.float64),
)
| 03fef5f0e35200aa5828975b62782bcf11faa0d2 | 14 | common.py | 196 | TST: Clean tests/plotting (#45992) | 39,622 | 0 | 225 | 133 | 46 | 164,926 | 62 | pandas | 26 | pandas/tests/plotting/common.py | Python | 16 | {
"docstring": "\n Check expected number of axes is drawn in expected layout\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n axes_num : number\n expected number of axes. Unnecessary axes should be set to\n invisible.\n layout : tuple\n expected layout, (expected number of rows , columns)\n figsize : tuple\n expected figsize. default is matplotlib default\n ",
"language": "en",
"n_whitespaces": 155,
"n_words": 54,
"vocab_size": 35
} | https://github.com/pandas-dev/pandas.git |
|
9 | cg_command | async def cg_command(ctx, ticker="", length="14", start="", end=""):
try:
# Debug
if cfg.DEBUG:
logger.debug(
"!stocks.ta.cg %s %s %s %s",
ticker,
length,
start,
end,
)
# Check for argument
if ticker == "":
raise Exception("Stock ticker is required")
if start == "":
start = datetime.now() - timedelta(days=365)
else:
start = datetime.strptime(start, cfg.DATE_FORMAT)
if end == "":
end = datetime.now()
else:
end = datetime.strptime(end, cfg.DATE_FORMAT)
if not length.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
length = float(length)
ticker = ticker.upper()
df_stock = discordbot.helpers.load(ticker, start)
if df_stock.empty:
raise Exception("Stock ticker is invalid")
# Retrieve Data
df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
df_ta = momentum_model.cg("1440min", df_stock, length)
# Output Data
fig, axes = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI)
ax = axes[0]
ax.set_title(f"{ticker} Centre of Gravity")
ax.plot(df_stock.index, df_stock["Adj Close"].values, "k", lw=1)
ax.set_xlim(df_stock.index[0], df_stock.index[-1])
ax.set_ylabel("Share Price ($)")
ax.grid(b=True, which="major", color="#666666", linestyle="-")
ax2 = axes[1]
ax2.plot(df_ta.index, df_ta.values, "b", lw=2, label="CG")
# shift cg 1 bar forward for signal
signal = df_ta.values
signal = np.roll(signal, 1)
ax2.plot(df_ta.index, signal, "g", lw=1, label="Signal")
ax2.set_xlim(df_stock.index[0], df_stock.index[-1])
ax2.grid(b=True, which="major", color="#666666", linestyle="-")
plt.gcf().autofmt_xdate()
fig.tight_layout(pad=1)
plt.legend()
plt.savefig("ta_cg.png")
uploaded_image = gst_imgur.upload_image("ta_cg.png", title="something")
image_link = uploaded_image.link
if cfg.DEBUG:
logger.debug("Image URL: %s", image_link)
title = "Stocks: Center-of-Gravity " + ticker
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_image(url=image_link)
os.remove("ta_cg.png")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(
title="ERROR Stocks: Center-of-Gravity",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
| f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704 | 14 | cg.py | 948 | Bot logging fix (#1105)
* Write bot logs to stdout instead of a file
Heroku's logging uses the stdout and has problems with files
* Send "you snooze you lose" only if debug flag is enabled
* Replace print statements with logger entries in the economy menu
* Add logging to bot menu command calls
* Silence bandit warnings about the REPLACE_ME token
* Organize imports and update logging in economy menu
* Organize imports and update logging in dps menu
* Organize imports and update logging in dd menu
* Organize imports and update logging in gov menu
* Organize imports and update logging in options menu
* Organize imports and update logging in screener menu
* Organize imports and update logging in ta menu
* Revert automatic import sorting
* Add logging to the options reaction helper | 83,602 | 0 | 854 | 574 | 155 | 281,196 | 226 | OpenBBTerminal | 85 | discordbot/stocks/technical_analysis/cg.py | Python | 71 | {
"docstring": "Displays chart with centre of gravity [Yahoo Finance]",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | https://github.com/OpenBB-finance/OpenBBTerminal.git |
|
1 | csv_io_kwargs | def csv_io_kwargs(mode):
# type: (str) -> Dict[str, Any]
return {'mode': mode, 'newline': '', 'encoding': 'utf-8'}
| f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | 8 | wheel.py | 43 | upd; format | 12,349 | 0 | 24 | 20 | 15 | 60,934 | 15 | transferlearning | 2 | .venv/lib/python3.8/site-packages/pip/_internal/operations/install/wheel.py | Python | 2 | {
"docstring": "Return keyword arguments to properly open a CSV file\n in the given mode.\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 13
} | https://github.com/jindongwang/transferlearning.git |
|
12 | change_node_label | def change_node_label(self, label, new_label):
if label not in self._node_labels:
raise ValueError("No such node exists for the Truss")
else:
for node in self._nodes:
if node[0] == label:
if self._supports[label] == 'pinned':
if 'R_'+str(label)+'_x' in list(self._reaction_loads) and 'R_'+str(label)+'_y' in list(self._reaction_loads):
self._reaction_loads['R_'+str(new_label)+'_x'] = self._reaction_loads['R_'+str(label)+'_x']
self._reaction_loads['R_'+str(new_label)+'_y'] = self._reaction_loads['R_'+str(label)+'_y']
self._reaction_loads.pop('R_'+str(label)+'_x')
self._reaction_loads.pop('R_'+str(label)+'_y')
self._loads['R_'+str(new_label)+'_x'] = self._loads['R_'+str(label)+'_x']
self._loads['R_'+str(new_label)+'_y'] = self._loads['R_'+str(label)+'_y']
self._loads.pop('R_'+str(label)+'_x')
self._loads.pop('R_'+str(label)+'_y')
elif self._supports[label] == 'roller':
if 'R_'+str(label)+'_y' in list(self._reaction_loads):
self._reaction_loads['R_'+str(new_label)+'_y'] = self._reaction_loads['R_'+str(label)+'_y']
self._reaction_loads.pop('R_'+str(label)+'_y')
self._loads['R_'+str(new_label)+'_y'] = self._loads['R_'+str(label)+'_y']
self._loads.pop('R_'+str(label)+'_y')
for member in self._members:
if member[1] == node[0]:
member[1] = new_label
self._member_nodes[member[0]] = [new_label, member[2]]
self._nodes_occupied[(new_label, member[2])] = True
self._nodes_occupied[(member[2], new_label)] = True
self._nodes_occupied.pop(tuple([label, member[2]]))
self._nodes_occupied.pop(tuple([member[2], label]))
elif member[2] == node[0]:
member[2] = new_label
self._member_nodes[member[0]] = [member[1], new_label]
self._nodes_occupied[(member[1], new_label)] = True
self._nodes_occupied[(new_label, member[1])] = True
self._nodes_occupied.pop(tuple([member[1], label]))
self._nodes_occupied.pop(tuple([label, member[1]]))
self._nodes[self._nodes.index((label, node[1], node[2]))] = (new_label, node[1], node[2])
self._node_labels[self._node_labels.index(node[0])] = new_label
self._loads[new_label] = self._loads[node[0]]
self._loads.pop(node[0])
self._supports[new_label] = self._supports[node[0]]
self._supports.pop(node[0])
| 99ede53223eafb56b2c2b4ab7b8a6764b628c9d9 | 23 | truss.py | 1,114 | remove_load method added along with other changes | 48,980 | 0 | 1,084 | 682 | 80 | 198,528 | 138 | sympy | 20 | sympy/physics/continuum_mechanics/truss.py | Python | 43 | {
"docstring": "\n This method changes the label of a node.\n\n Parameters\n ==========\n label: String or Symbol\n The label of the node for which the label has\n to be changed.\n\n new_label: String or Symbol\n The new label of the node.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.nodes\n [('A', 0, 0), ('B', 3, 0)]\n >>> t.change_node_label('A', 'C')\n >>> t.nodes\n [('C', 0, 0), ('B', 3, 0)]\n >>> t.add_member('BC', 'B', 'C')\n >>> t.members\n [['BC', 'B', 'C']]\n >>> t.change_member_label('BC', 'BC_new')\n >>> t.members\n [['BC_new', 'B', 'C']]\n ",
"language": "en",
"n_whitespaces": 287,
"n_words": 92,
"vocab_size": 55
} | https://github.com/sympy/sympy.git |
|
1 | get_cat_ids | def get_cat_ids(self, idx):
return self.get_ann_info(idx)['labels'].astype(np.int).tolist()
| 4aaaf4dccfe26ac2265f77069f09aa8204f23337 | 12 | openimages.py | 50 | [Feature] Support Class Aware Sampler (#7436)
* [Feature] Support Class Aware Sampler
* minor fix
* minor fix
* rename get_label_dict to get_index_dict
* fix cas logic
* minor fix
* minor fix
* minor fix
* minor fix
* minor fix | 70,290 | 0 | 19 | 29 | 5 | 244,226 | 5 | mmdetection | 8 | mmdet/datasets/openimages.py | Python | 2 | {
"docstring": "Get category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 21,
"vocab_size": 19
} | https://github.com/open-mmlab/mmdetection.git |
|
1 | print_stack | def print_stack(self, *, limit=None, file=None):
return base_tasks._task_print_stack(self, limit, file)
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 7 | tasks.py | 41 | add python 3.10.4 for windows | 56,120 | 0 | 23 | 27 | 9 | 220,797 | 9 | XX-Net | 6 | python3.10.4/Lib/asyncio/tasks.py | Python | 2 | {
"docstring": "Print the stack or traceback for this task's coroutine.\n\n This produces output similar to that of the traceback module,\n for the frames retrieved by get_stack(). The limit argument\n is passed to get_stack(). The file argument is an I/O stream\n to which the output is written; by default output is written\n to sys.stderr.\n ",
"language": "en",
"n_whitespaces": 96,
"n_words": 52,
"vocab_size": 35
} | https://github.com/XX-net/XX-Net.git |
|
13 | _get_aligned_offsets | def _get_aligned_offsets(hd_list, height, align="baseline"):
if height is None:
height = max(h for h, d in hd_list)
_api.check_in_list(
["baseline", "left", "top", "right", "bottom", "center"], align=align)
if align == "baseline":
height_descent = max(h - d for h, d in hd_list)
descent = max(d for h, d in hd_list)
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "bottom"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "top"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
return height, descent, offsets
| b51c471f0bea8ad2bd3e295ebf896cba0efbb5ef | 14 | offsetbox.py | 294 | FIX: VPacker and HPacker bottom/top alignment
The bottom and top alignments were incorrectly defined before,
this updates them to have the expected alignment. | 24,176 | 0 | 228 | 186 | 50 | 110,473 | 120 | matplotlib | 12 | lib/matplotlib/offsetbox.py | Python | 20 | {
"docstring": "\n Align boxes each specified by their ``(height, descent)`` pair.\n\n For simplicity of the description, the terminology used here assumes a\n horizontal layout (i.e., vertical alignment), but the function works\n equally for a vertical layout.\n\n Parameters\n ----------\n hd_list\n List of (height, xdescent) of boxes to be aligned.\n height : float or None\n Intended total height. If None, the maximum of the heights in *hd_list*\n is used.\n align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'}\n The alignment anchor of the boxes.\n\n Returns\n -------\n height\n The total height of the packing (if a value was originally passed in,\n it is returned without checking that it is actually large enough).\n descent\n The descent of the packing.\n offsets\n The bottom offsets of the boxes.\n ",
"language": "en",
"n_whitespaces": 221,
"n_words": 119,
"vocab_size": 87
} | https://github.com/matplotlib/matplotlib.git |
|
1 | duplicate_interleave | def duplicate_interleave(m):
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
# Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb | d6b6fb9963e094216daa30ebf61224ca1c46921e | 9 | modeling_codegen.py | 81 | Add CodeGen model (#17443)
* Add CodeGen model
* Add missing key and switch order of super()
* Fix torch.ones init with uint8 instead of bool
* Address comments: copy statements and doc
* update tests
* remove old model parallel
* fix batch gen tests
* fix batch gen test
* update test_gpt2_sample_max_time
* fix codgen test and revert gpt2 test change
* Fix incorrect tie_word_embedding value, typo, URL
* Fix model order in README and styling
* Reorder model list alphabetically
* Set tie_word_embedding to False by default
* Apply suggestions from code review
* Better attn mask name & remove attn masked_bias
* add tokenizer for codegen
* quality
* doc tokenizer
* fix-copies
* add CodeGenTokenizer in converter
* make truncation optional
* add test for truncation
* add copyright
* fix-copies
* fix fast tokenizer decode
* Update src/transformers/models/codegen/tokenization_codegen.py
Co-authored-by: Patrick von Platen <[email protected]>
* increase vocab_size in tests
Co-authored-by: patil-suraj <[email protected]>
Co-authored-by: Patrick von Platen <[email protected]> | 5,777 | 0 | 63 | 48 | 31 | 31,640 | 43 | transformers | 6 | src/transformers/models/codegen/modeling_codegen.py | Python | 6 | {
"docstring": "\n A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 13,
"vocab_size": 13
} | https://github.com/huggingface/transformers.git |
|
1 | get_granger_causality | def get_granger_causality(time_series_y, time_series_x, lags):
granger_set = pd.concat([time_series_y, time_series_x], axis=1)
granger = grangercausalitytests(granger_set, [lags], verbose=False)
return granger
| f2ca215132de40804667feb4deaa0c6b8bfc3d25 | 9 | econometrics_model.py | 63 | Econometrics Menu (#1403)
* Add Statistics Menu
* Add Granger Causality test
* Apply Black formatting
* Add Cointegration Tests
* Adjust plotting for Cointegration test
* Add Significant parameter to Cointegration tests
* Move regression functions to seperate .py files
* Progress on Panel Data
* A lot of progress for Panel Data
* Make functions robust and improve documentation
* Re-enable Breusch-Godfrey
* Add modify functionality
* Improve modify functionality
* Add Breusch-Pagan heteroscedasticity test
* Capitalize a word
* Include documentatin for the Statistics Menu
* Update _index.md
* Update _index.md
* Update _index.md
* Fix export statements and add Example datasets
* Update example with Longley's dataset
* Update documentation with a legit source
* Compare the results from each regression models based on the wage_panel dataset
* Updated with all relevant types of regression tables
* Update with all relevant regression types for Panel data
* Update _index.md
* Add change column type, improve OLS, add time and entity effects for FE
* Update documentation and fix a small bug
* Add in Statistics menu, replacing Custom menu
* Remove custom menu
* Add in documentation
* Add in gst files
* Cointegration can be used on any amount of columns
* Add Tests
* Make tests time invariant
* Update Poetry and Requirements
* Change name of Statistics menu to Econometrics menu
* Rename scripts
* Add type in Documentation
* Change names from Statistics to Econometrics
* Add graph
* Update tests with rounding and naming
* Make minor adjustments to fix the tests
* Updating tests : allow extra args for capture
* Apply recorder formatting
* Adding some minor formatting
* Fix error with MyPy
* Attempt to fix MyPy annoyance
* super small style things
* Fix small bugs and add plot command to dwat
* Small description mistake
* Update documentation with missing argument
* Update styling
* Add error handling and add improve show functionality
* Fix merge issue
* Allow import from custom_imports
Co-authored-by: Jeroen Bouma <[email protected]>
Co-authored-by: jmaslek <[email protected]>
Co-authored-by: Chavithra PARANA <[email protected]> | 84,280 | 0 | 28 | 42 | 14 | 282,731 | 16 | OpenBBTerminal | 11 | gamestonk_terminal/econometrics/econometrics_model.py | Python | 4 | {
"docstring": "Calculate granger tests\n\n Parameters\n ----------\n time_series_y : Series\n The series you want to test Granger Causality for.\n time_series_x : Series\n The series that you want to test whether it Granger-causes time_series_y\n lags : int\n The amoiunt of lags for the Granger test. By default, this is set to 3.\n ",
"language": "en",
"n_whitespaces": 88,
"n_words": 49,
"vocab_size": 35
} | https://github.com/OpenBB-finance/OpenBBTerminal.git |
|
5 | _load_bboxes | def _load_bboxes(self, results):
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes'].copy()
if self.denorm_bbox:
h, w = results['img_shape'][:2]
bbox_num = results['gt_bboxes'].shape[0]
if bbox_num != 0:
results['gt_bboxes'][:, 0::2] *= w
results['gt_bboxes'][:, 1::2] *= h
results['gt_bboxes'] = results['gt_bboxes'].astype(np.float32)
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
gt_is_group_ofs = ann_info.get('gt_is_group_ofs', None)
if gt_is_group_ofs is not None:
results['gt_is_group_ofs'] = gt_is_group_ofs.copy()
return results
| 1516986a616fee8bb741d0ab2be40683045efccd | 13 | loading.py | 303 | [Feature] Support OpenImages Dataset (#6331)
* [Feature] support openimage group of eval
* [Feature] support openimage group of eval
* support openimage dataset
* support openimage challenge dataset
* fully support OpenImages-V6 and OpenImages Challenge 2019
* Fix some logic error
* update config file
* fix get data_infos error
* fully support OpenImages evaluation
* update OpenImages config files
* [Feature] support OpenImages datasets
* fix bug
* support load image metas from pipeline
* fix bug
* fix get classes logic error
* update code
* support get image metas
* support openimags
* support collect image metas
* support Open Images
* fix openimages logic
* minor fix
* add a new function to compute openimages tpfp
* minor fix
* fix ci error
* minor fix
* fix indication
* minor fix
* fix returns
* fix returns
* fix returns
* fix returns
* fix returns
* minor fix
* update readme
* support loading image level labels and fix some logic
* minor fix
* minor fix
* add class names
* minor fix
* minor fix
* minor fix
* add openimages test unit
* minor fix
* minor fix
* fix test unit
* minor fix
* fix logic error
* minor fix
* fully support openimages
* minor fix
* fix docstring
* fix docstrings in readthedocs
* update get image metas script
* label_description_file -> label_file
* update openimages readme
* fix test unit
* fix test unit
* minor fix
* update readme file
* Update get_image_metas.py | 70,196 | 0 | 238 | 176 | 39 | 244,007 | 61 | mmdetection | 17 | mmdet/datasets/pipelines/loading.py | Python | 19 | {
"docstring": "Private function to load bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n Returns:\n dict: The dict contains loaded bounding box annotations.\n ",
"language": "en",
"n_whitespaces": 66,
"n_words": 23,
"vocab_size": 19
} | https://github.com/open-mmlab/mmdetection.git |
|
5 | hstack | def hstack(tup, *, dtype=None, casting="same_kind"):
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
else:
return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)
| 126046f84449fffeb0c75ae88657ce6b90236eee | 11 | shape_base.py | 154 | ENH: adding casting option to numpy.stack. (#21627)
np.concatenate and np.stack are similar methods, but only np.concatenate has the casting option.
This PR puts the casting option into the np.stack method to control what kind of data casting may occur
Closes gh-20959
* ENH: adding casting option to numpy.stack.
See #20959
* ENH: adding dtype option to numpy.stack.
See #20959
* REV: removing auto-generated file loops_modulo.dispatch.c
See numpy#20959
* REV: removing auto-generated file loops_modulo.dispatch.c
See numpy#20959
* REV: removing inserted newlines
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* DOC: inserting versionadded info in dtype and casting parameters.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* TST: writing tests to stack method with dtype and casting options
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* DOC: adding upcoming_change file for new options casting and dtype in method stack.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* REV: reverting lint errors.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* DOC: inserting hstack and vstack methods in upcoming changes
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* ENH: adding dtype and casting keyword arguments to numpy.vstack and numpy.hstack.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* TST: writing tests to vstack and hstack methods with dtype and casting keyword arguments.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* REV: reverting the 'out' option type in stack method.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
* REV: Reverting out type changes in overload of shape_base.pyi file.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: jhonatancunha <[email protected]>
Co-authored-by: patriarka <[email protected]>
* DOC: correcting some english erros in upcoming_changes file.
See numpy#20959
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: alescrocaro <[email protected]>
Co-authored-by: JessePires <[email protected]>
Co-authored-by: patriarka <[email protected]> | 38,661 | 0 | 110 | 99 | 42 | 160,598 | 54 | numpy | 15 | numpy/core/shape_base.py | Python | 10 | {
"docstring": "\n Stack arrays in sequence horizontally (column wise).\n\n This is equivalent to concatenation along the second axis, except for 1-D\n arrays where it concatenates along the first axis. Rebuilds arrays divided\n by `hsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the second axis,\n except 1-D arrays which can be any length.\n\n dtype : str or dtype\n If provided, the destination array will have this dtype. Cannot be\n provided together with `out`.\n\n .. versionadded:: 1.24\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'same_kind'.\n\n .. versionadded:: 1.24\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n block : Assemble an nd-array from nested lists of blocks.\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third axis).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n hsplit : Split an array into multiple sub-arrays horizontally (column-wise).\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((4,5,6))\n >>> np.hstack((a,b))\n array([1, 2, 3, 4, 5, 6])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[4],[5],[6]])\n >>> np.hstack((a,b))\n array([[1, 4],\n [2, 5],\n [3, 6]])\n\n ",
"language": "en",
"n_whitespaces": 447,
"n_words": 270,
"vocab_size": 172
} | https://github.com/numpy/numpy.git |
|
3 | d2_tweedie_score | def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0):
y_type, y_true, y_pred, _ = _check_reg_targets(
y_true, y_pred, None, dtype=[np.float64, np.float32]
)
if y_type == "continuous-multioutput":
raise ValueError("Multioutput not supported in d2_tweedie_score")
if _num_samples(y_pred) < 2:
msg = "D^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float("nan")
y_true, y_pred = np.squeeze(y_true), np.squeeze(y_pred)
numerator = mean_tweedie_deviance(
y_true, y_pred, sample_weight=sample_weight, power=power
)
y_avg = np.average(y_true, weights=sample_weight)
denominator = _mean_tweedie_deviance(
y_true, y_avg, sample_weight=sample_weight, power=power
)
return 1 - numerator / denominator
| 75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc | 11 | _regression.py | 225 | ENH migrate GLMs / TweedieRegressor to linear loss (#22548)
Co-authored-by: Olivier Grisel <[email protected]>
Co-authored-by: Thomas J. Fan <[email protected]> | 75,790 | 0 | 164 | 147 | 58 | 259,460 | 79 | scikit-learn | 27 | sklearn/metrics/_regression.py | Python | 19 | {
"docstring": "D^2 regression score function, percentage of Tweedie deviance explained.\n\n Best possible score is 1.0 and it can be negative (because the model can be\n arbitrarily worse). A model that always uses the empirical mean of `y_true` as\n constant prediction, disregarding the input features, gets a D^2 score of 0.0.\n\n Read more in the :ref:`User Guide <d2_tweedie_score>`.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n Ground truth (correct) target values.\n\n y_pred : array-like of shape (n_samples,)\n Estimated target values.\n\n sample_weight : array-like of shape (n_samples,), optional\n Sample weights.\n\n power : float, default=0\n Tweedie power parameter. Either power <= 0 or power >= 1.\n\n The higher `p` the less weight is given to extreme\n deviations between true and predicted targets.\n\n - power < 0: Extreme stable distribution. Requires: y_pred > 0.\n - power = 0 : Normal distribution, output corresponds to r2_score.\n y_true and y_pred can be any real numbers.\n - power = 1 : Poisson distribution. Requires: y_true >= 0 and\n y_pred > 0.\n - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0\n and y_pred > 0.\n - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.\n - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0\n and y_pred > 0.\n - otherwise : Positive stable distribution. Requires: y_true > 0\n and y_pred > 0.\n\n Returns\n -------\n z : float or ndarray of floats\n The D^2 score.\n\n Notes\n -----\n This is not a symmetric function.\n\n Like R^2, D^2 score may be negative (it need not actually be the square of\n a quantity D).\n\n This metric is not well-defined for single samples and will return a NaN\n value if n_samples is less than two.\n\n References\n ----------\n .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.\n Wainwright. \"Statistical Learning with Sparsity: The Lasso and\n Generalizations.\" (2015). https://trevorhastie.github.io\n\n Examples\n --------\n >>> from sklearn.metrics import d2_tweedie_score\n >>> y_true = [0.5, 1, 2.5, 7]\n >>> y_pred = [1, 1, 5, 3.5]\n >>> d2_tweedie_score(y_true, y_pred)\n 0.285...\n >>> d2_tweedie_score(y_true, y_pred, power=1)\n 0.487...\n >>> d2_tweedie_score(y_true, y_pred, power=2)\n 0.630...\n >>> d2_tweedie_score(y_true, y_true, power=2)\n 1.0\n ",
"language": "en",
"n_whitespaces": 630,
"n_words": 353,
"vocab_size": 196
} | https://github.com/scikit-learn/scikit-learn.git |
|
11 | find_requirement | def find_requirement(self, req, upgrade):
# type: (InstallRequirement, bool) -> Optional[InstallationCandidate]
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name, specifier=req.specifier, hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version = None # type: Optional[_BaseVersion]
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
| f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | 12 | package_finder.py | 106 | upd; format | 12,265 | 0 | 118 | 214 | 30 | 60,728 | 37 | transferlearning | 15 | .venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py | Python | 55 | {
"docstring": "Try to find a Link matching req\n\n Expects req, an InstallRequirement and upgrade, a boolean\n Returns a InstallationCandidate if found,\n Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 25,
"vocab_size": 23
} | https://github.com/jindongwang/transferlearning.git |
|
1 | add_handler | def add_handler(self, handler):
sympy_deprecation_warning(
,
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
)
self.handlers.append(handler)
| ad766d1c02943e86f50559abfd0c72e582c9ca6a | 9 | assume.py | 48 | Update the AskHandler deprecation warnings
n.b., the issue number in the original warning message was wrong. It should
have been #20837. | 48,153 | 0 | 62 | 28 | 9 | 196,757 | 9 | sympy | 8 | sympy/assumptions/assume.py | Python | 10 | {
"docstring": "\n The AskHandler system is deprecated. Predicate.add_handler()\n should be replaced with the multipledispatch handler of Predicate.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 15,
"vocab_size": 15
} | https://github.com/sympy/sympy.git |
|
1 | get_revision | def get_revision(cls, location):
# type: (str) -> str
raise NotImplementedError
| f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | 6 | versioncontrol.py | 19 | upd; format | 12,574 | 0 | 31 | 10 | 10 | 61,435 | 10 | transferlearning | 4 | .venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py | Python | 2 | {
"docstring": "\n Return the current commit id of the files at the given location.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 10
} | https://github.com/jindongwang/transferlearning.git |
|
1 | get_config_profile_type_map | def get_config_profile_type_map() -> t.Dict[t.Type[HostConfig], t.Type[HostProfile]]:
return get_type_map(HostProfile, HostConfig)
| 3eb0485dd92c88cc92152d3656d94492db44b183 | 7 | host_profiles.py | 48 | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annotation type comments to native type hints.
* ansible-test - Use more native type hints.
Conversion of single-line function annotation type comments with default values to native type hints.
* ansible-test - Use more native type hints.
Manual conversion of type annotation comments for functions which have pylint directives. | 79,291 | 0 | 14 | 31 | 8 | 268,017 | 8 | ansible | 7 | test/lib/ansible_test/_internal/host_profiles.py | Python | 3 | {
"docstring": "Create and return a mapping of HostConfig types to HostProfile types.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | https://github.com/ansible/ansible.git |
|
2 | query_task | def query_task(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import build_match_conditions
search_string = "%%%s%%" % txt
order_by_string = "%s%%" % txt
match_conditions = build_match_conditions("Task")
match_conditions = ("and" + match_conditions) if match_conditions else ""
return frappe.db.sql(
% (searchfield, "%s", "%s", match_conditions, "%s", searchfield, "%s", searchfield, "%s", "%s"),
(search_string, search_string, order_by_string, order_by_string, page_len, start),
)
| 00ef499739959630cd7cf97419fbb6ca59be05f2 | 10 | utils.py | 150 | refactor: use db independent offset syntax (#31345)
* chore: use db independent offset syntax
* fix: typo
* style: reformat code to black spec
Co-authored-by: Ankush Menat <[email protected]> | 14,887 | 0 | 43 | 96 | 37 | 68,806 | 53 | erpnext | 16 | erpnext/projects/utils.py | Python | 18 | {
"docstring": "select name, subject from `tabTask`\n\t\twhere (`%s` like %s or `subject` like %s) %s\n\t\torder by\n\t\t\tcase when `subject` like %s then 0 else 1 end,\n\t\t\tcase when `%s` like %s then 0 else 1 end,\n\t\t\t`%s`,\n\t\t\tsubject\n\t\tlimit %s offset %s",
"language": "en",
"n_whitespaces": 34,
"n_words": 42,
"vocab_size": 25
} | https://github.com/frappe/erpnext.git |
|
3 | _project_out | def _project_out(basis, U):
# See Sec. 6.9 of The Symmetric Eigenvalue Problem by Beresford Parlett [1]
# which motivates two loop iterations for basis subtraction. This
# "twice is enough" approach is due to Kahan. See also a practical note
# by SLEPc developers [2].
#
# Interspersing with orthonormalization isn't directly grounded in the
# original analysis, but taken from Algorithm 5 of [3]. In practice, due to
# normalization, I have noticed that that the orthonormalized basis
# does not always end up as a subspace of the starting basis in practice.
# There may be room to refine this procedure further, but the adjustment
# in the subsequent block handles this edge case well enough for now.
#
# [1]: https://epubs.siam.org/doi/abs/10.1137/1.9781611971163
# [2]: http://slepc.upv.es/documentation/reports/str1.pdf
# [3]: https://arxiv.org/abs/1704.07458
for _ in range(2):
U -= _mm(basis, _mm(basis.T, U))
U = _orthonormalize(U)
# It's crucial to end on a subtraction of the original basis.
# This seems to be a detail not present in [2], possibly because of
# of reliance on soft locking.
#
# Near convergence, if the residuals R are 0 and our last
# operation when projecting (X, P) out from R is the orthonormalization
# done above, then due to catastrophic cancellation we may re-introduce
# (X, P) subspace components into U, which can ruin the Rayleigh-Ritz
# conditioning.
#
# We zero out any columns that are even remotely suspicious, so the invariant
# that [basis, U] is zero-or-orthogonal is ensured.
for _ in range(2):
U -= _mm(basis, _mm(basis.T, U))
normU = jnp.linalg.norm(U, ord=2, axis=0, keepdims=True)
U *= (normU >= 0.99).astype(U.dtype)
return U
| 76fcf63fb4e53fd82faece677ed46db8b0c71707 | 13 | linalg.py | 176 | Add initial LOBPCG top-k eigenvalue solver (#3112)
This initial version is f32-only for accelerators, since it relies on an eigh call (which itself is f32 at most) in its inner loop.
For details, see jax.experimental.linalg.standard_lobpcg documentation.
This is a partial implementation of the similar [scipy lobpcg
function](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html). | 27,026 | 0 | 311 | 101 | 171 | 121,063 | 269 | jax | 17 | jax/experimental/sparse/linalg.py | Python | 9 | {
"docstring": "Derives component of U in the orthogonal complement of basis.\n\n This method iteratively subtracts out the basis component and orthonormalizes\n the remainder. To an extent, these two operations can oppose each other\n when the remainder norm is near-zero (since normalization enlarges a vector\n which may possibly lie in the subspace `basis` to be subtracted).\n\n We make sure to prioritize orthogonality between `basis` and `U`, favoring\n to return a lower-rank space thank `rank(U)`, in this tradeoff.\n\n Args:\n basis : An `(n, m)` array which describes a linear subspace of R^n, this\n is assumed to be orthonormal but zero columns are allowed.\n U : An `(n, k)` array representing another subspace of R^n, whose `basis`\n component is to be projected out.\n\n Returns:\n An `(n, k)` array, with some columns possibly zeroed out, representing\n the component of `U` in the complement of `basis`. The nonzero columns\n are mutually orthonormal.\n ",
"language": "en",
"n_whitespaces": 184,
"n_words": 146,
"vocab_size": 96
} | https://github.com/google/jax.git |
|
7 | filter_empty_contents | def filter_empty_contents(self, ocr_info):
new_ocr_info = []
empty_index = []
for idx, info in enumerate(ocr_info):
if len(info["transcription"]) > 0:
new_ocr_info.append(copy.deepcopy(info))
else:
empty_index.append(info["id"])
for idx, info in enumerate(new_ocr_info):
new_link = []
for link in info["linking"]:
if link[0] in empty_index or link[1] in empty_index:
continue
new_link.append(link)
new_ocr_info[idx]["linking"] = new_link
return new_ocr_info
| 7a99588dd8b678233eff0391aac13ebd0f7000f6 | 14 | label_ops.py | 188 | add more dataset yamls and fix re exceptions (#6791)
* add more dataset yamls and fix re exceptions | 4,713 | 0 | 224 | 114 | 31 | 24,246 | 48 | PaddleOCR | 14 | ppocr/data/imaug/label_ops.py | Python | 16 | {
"docstring": "\n find out the empty texts and remove the links\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 8
} | https://github.com/PaddlePaddle/PaddleOCR.git |
|
1 | test_performance_issue_alert_user | def test_performance_issue_alert_user(self, mock_func):
event = self.create_performance_issue()
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Member",
"targetIdentifier": str(self.user.id),
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification = AlertRuleNotification(
Notification(event=event, rule=rule), ActionTargetType.MEMBER, self.user.id
)
with self.feature("organizations:performance-issues"), self.tasks():
notification.send()
attachment, text = get_attachment()
assert attachment["title"] == "N+1 Query"
assert (
attachment["text"]
== "db - SELECT `books_author`.`id`, `books_author`.`name` FROM `books_author` WHERE `books_author`.`id` = %s LIMIT 21"
)
assert (
attachment["footer"]
== f"{self.project.slug} | production | <http://testserver/settings/account/notifications/alerts/?referrer=issue_alert-slack-user|Notification Settings>"
)
| 495d45c6547e398a5d4d3c1fa8cb97e69b1751f8 | 12 | test_issue_alert.py | 268 | ref(slack): Update workflow alerts for perf issues (#40463)
Slack workflow alerts for performance issues are showing a text value of
"no value". This PR adds feature parity with error issues for workflow
alerts so that they are shown with the proper data. | 18,279 | 0 | 357 | 149 | 65 | 87,324 | 79 | sentry | 28 | tests/sentry/integrations/slack/notifications/test_issue_alert.py | Python | 30 | {
"docstring": "Test that performance issue alerts are sent to a Slack user.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | https://github.com/getsentry/sentry.git |
|
1 | save_pretrained | def save_pretrained(self, save_directory):
self.feature_extractor.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
| ac227093e41cecb07c7e0f2fc9a504850907bd06 | 8 | processing_vilt.py | 41 | Add ViLT (#14895)
* First commit
* Add conversion script
* Make conversion script work for base model
* More improvements
* Update conversion script, works for vqa
* Add indexing argument to meshgrid
* Make conversion script work for ViltForPreTraining
* Add ViltForPreTraining to docs
* Fix device issue
* Add processor
* Add MinMaxResize to feature extractor
* Implement call method of ViltProcessor
* Fix tests
* Add integration test
* Add loss calculation for VQA
* Improve tests
* Improve some more tests
* Debug tests
* Small improvements
* Add support for attention_mask
* Remove mask_it
* Add pixel_mask
* Add tests for ViltFeatureExtractor
* Improve tests
* Add ViltForNaturalLanguageVisualReasoning
* Add ViltForNaturalLanguageVisualReasoning to conversion script
* Minor fixes
* Add support for image_embeds, update docstrings to markdown
* Update docs to markdown
* Improve conversion script
* Rename ViltForPreTraining to ViltForMaskedLM
* Improve conversion script
* Convert docstrings to markdown
* Fix code example of retrieval model
* Properly convert masked language model
* Add integration test for nlvr
* Fix code quality
* Apply suggestions from code review
* Add copied from statements
* Fix pretrained_config_archive_map
* Fix docs
* Add model to README
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <[email protected]>
* Apply more suggestions from code review
* Make code more readable
* Add ViltForNaturalLanguageVisualReasoning to the tests
* Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering
* Replace pixel_values_2 by single tensor
* Add hidden_states and attentions
* Fix one more test
* Fix all tests
* Update year
* Fix rebase issues
* Fix another rebase issue
* Remove ViltForPreTraining from auto mapping
* Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval
* Make it possible to use BertTokenizerFast in the processor
* Use BertTokenizerFast by default
* Rename ViltForNaturalLanguageVisualReasoning, define custom model output
Co-authored-by: Sylvain Gugger <[email protected]> | 6,259 | 0 | 26 | 24 | 5 | 34,328 | 5 | transformers | 5 | src/transformers/models/vilt/processing_vilt.py | Python | 3 | {
"docstring": "\n Save a ViLT feature_extractor object and BERT tokenizer object to the directory `save_directory`, so that it\n can be re-loaded using the [`~ViltProcessor.from_pretrained`] class method.\n\n <Tip>\n\n This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and\n [`~tokenization_utils_base.PreTrainedTokenizer.save_pretrained`]. Please refer to the docstrings of the methods\n above for more information.\n\n </Tip>\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will\n be created if it does not exist).\n ",
"language": "en",
"n_whitespaces": 180,
"n_words": 75,
"vocab_size": 60
} | https://github.com/huggingface/transformers.git |
|
1 | client_entity_removed_fixture | def client_entity_removed_fixture(hass):
with patch(
"homeassistant.components.webostv.WebOsClient", autospec=True
) as mock_client_class:
client = mock_client_class.return_value
client.hello_info = {"deviceUUID": "some-fake-uuid"}
client.connected = False
| eb487480381bff3aa87f4d80145b162863dc0a27 | 11 | conftest.py | 70 | Add webostv 100% tests coverage for init (#64801) | 109,493 | 0 | 56 | 64 | 17 | 310,821 | 19 | core | 9 | tests/components/webostv/conftest.py | Python | 12 | {
"docstring": "Patch of client library, entity removed waiting for connect.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/home-assistant/core.git |
|
1 | test_calling_start_ray_head | def test_calling_start_ray_head(call_ray_stop_only):
# Test that we can call ray start with various command line
# parameters.
# Test starting Ray with a redis port specified.
check_call_ray(["start", "--head", "--port", "0"])
check_call_ray(["stop"])
# Test starting Ray with a node IP address specified.
check_call_ray(
["start", "--head", "--node-ip-address", "127.0.0.1", "--port", "0"])
check_call_ray(["stop"])
# Test starting Ray with a system config parameter set.
check_call_ray([
"start", "--head", "--system-config",
"{\"metrics_report_interval_ms\":100}", "--port", "0"
])
check_call_ray(["stop"])
# Test starting Ray with the object manager and node manager ports
# specified.
check_call_ray([
"start", "--head", "--object-manager-port", "22345",
"--node-manager-port", "54321", "--port", "0"
])
check_call_ray(["stop"])
# Test starting Ray with the worker port range specified.
check_call_ray([
"start", "--head", "--min-worker-port", "51000", "--max-worker-port",
"51050", "--port", "0"
])
check_call_ray(["stop"])
# Test starting Ray with a worker port list.
check_call_ray(["start", "--head", "--worker-port-list", "10002,10003"])
check_call_ray(["stop"])
# Test starting Ray with a non-int in the worker port list.
with pytest.raises(subprocess.CalledProcessError):
check_call_ray(["start", "--head", "--worker-port-list", "10002,a"])
check_call_ray(["stop"])
# Test starting Ray with an invalid port in the worker port list.
with pytest.raises(subprocess.CalledProcessError):
check_call_ray(["start", "--head", "--worker-port-list", "100"])
check_call_ray(["stop"])
# Test starting Ray with the number of CPUs specified.
check_call_ray(["start", "--head", "--num-cpus", "2", "--port", "0"])
check_call_ray(["stop"])
# Test starting Ray with the number of GPUs specified.
check_call_ray(["start", "--head", "--num-gpus", "100", "--port", "0"])
check_call_ray(["stop"])
# Test starting Ray with redis shard ports specified.
check_call_ray([
"start", "--head", "--redis-shard-ports", "6380,6381,6382", "--port",
"0"
])
check_call_ray(["stop"])
# Test starting Ray with all arguments specified.
check_call_ray([
"start", "--head", "--redis-shard-ports", "6380,6381,6382",
"--object-manager-port", "22345", "--num-cpus", "2", "--num-gpus", "0",
"--resources", "{\"Custom\": 1}", "--port", "0"
])
check_call_ray(["stop"])
# Test starting Ray with invalid external address.
# It will fall back to creating a new one.
check_call_ray(
["start", "--head", "--address", "127.0.0.1:6379", "--port", "0"])
check_call_ray(["stop"])
# Test starting Ray with RAY_REDIS_ADDRESS env.
os.environ["RAY_REDIS_ADDRESS"] = "127.0.0.1:6379"
check_call_ray(["start", "--head", "--port", "0"])
check_call_ray(["stop"])
del os.environ["RAY_REDIS_ADDRESS"]
# Test --block. Killing a child process should cause the command to exit.
blocked = subprocess.Popen(
["ray", "start", "--head", "--block", "--port", "0"])
blocked.poll()
assert blocked.returncode is None
# Make sure ray cluster is up
run_string_as_driver()
# Make sure ray cluster is up
run_string_as_driver()
kill_process_by_name("raylet", SIGKILL=True)
wait_for_children_of_pid_to_exit(blocked.pid, timeout=30)
blocked.wait()
assert blocked.returncode != 0, "ray start shouldn't return 0 on bad exit"
# Test --block. Killing the command should clean up all child processes.
blocked = subprocess.Popen(
["ray", "start", "--head", "--block", "--port", "0"])
blocked.poll()
assert blocked.returncode is None
wait_for_children_of_pid(blocked.pid, num_children=7, timeout=30)
blocked.terminate()
wait_for_children_of_pid_to_exit(blocked.pid, timeout=30)
blocked.wait()
assert blocked.returncode != 0, "ray start shouldn't return 0 on bad exit"
| 8cc268096cb79ededff67b90806b4e4d996ca775 | 11 | test_multi_node_3.py | 970 | [GCS][Bootstrap 3/n] Refactor to support GCS bootstrap (#21295)
This PR refactors several components to support switching to GCS address bootstrapping later:
- Treat address from `ray.init()` and `ray` CLI as bootstrap address instead of assuming it is Redis address.
- Ray client servers support `--address` flag instead of `--redis-address`.
- A few other miscellaneous cleanup.
Also, add a test for starting non-head node with `ray start`. | 28,858 | 0 | 732 | 495 | 143 | 128,960 | 392 | ray | 23 | python/ray/tests/test_multi_node_3.py | Python | 88 | {
"docstring": "\nimport ray\nfrom time import sleep\nfor i in range(0, 5):\n try:\n ray.init(address='auto')\n break\n except:\n sleep(1)\n\nimport ray\nfrom time import sleep\nfor i in range(0, 5):\n try:\n ray.init(address='auto')\n break\n except:\n sleep(1)\n",
"language": "en",
"n_whitespaces": 80,
"n_words": 32,
"vocab_size": 15
} | https://github.com/ray-project/ray.git |
|
3 | check_task_fail_for_duplicates | def check_task_fail_for_duplicates(session):
metadata = reflect_tables([TaskFail], session)
task_fail = metadata.tables.get(TaskFail.__tablename__) # type: ignore
if task_fail is None: # table not there
return
if "run_id" in task_fail.columns: # upgrade already applied
return
yield from check_table_for_duplicates(
table_name=task_fail.name,
uniqueness=['dag_id', 'task_id', 'execution_date'],
session=session,
)
| f06b3955b1d937138fb38021a6a373b94ae8f9e8 | 11 | db.py | 115 | Add map_index and run_id to TaskFail (#22260)
TaskFail entities always belong to a TaskInstance. The PK for TaskInstance has changed, so we need to update TaskFail to have the new columns. | 8,873 | 0 | 98 | 67 | 33 | 46,402 | 39 | airflow | 14 | airflow/utils/db.py | Python | 12 | {
"docstring": "Check that there are no duplicates in the task_fail table before creating FK",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | https://github.com/apache/airflow.git |
|
1 | add_to_apply_calls | def add_to_apply_calls(self, func, length=None, width=None, *args, **kwargs):
return cuDFOnRayDataframePartition(
self.gpu_manager,
self.apply(func, *args, **kwargs),
length=length,
width=width,
)
| d6d503ac7c3028d871c34d9e99e925ddb0746df6 | 10 | partition.py | 73 | FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715)
Co-authored-by: Iaroslav Igoshev <[email protected]>
Signed-off-by: Jonathan Shi <[email protected]> | 36,023 | 0 | 81 | 51 | 15 | 154,500 | 16 | modin | 10 | modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition.py | Python | 7 | {
"docstring": "\n Apply `func` to this partition and create new.\n\n Parameters\n ----------\n func : callable\n A function to apply.\n length : ray.ObjectRef or int, optional\n Length, or reference to length, of wrapped ``pandas.DataFrame``.\n width : ray.ObjectRef or int, optional\n Width, or reference to width, of wrapped ``pandas.DataFrame``.\n *args : tuple\n Positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keywords arguments to be passed in `func`.\n\n Returns\n -------\n cuDFOnRayDataframePartition\n New partition based on result of `func`.\n\n Notes\n -----\n We eagerly schedule the apply `func` and produce a new ``cuDFOnRayDataframePartition``.\n ",
"language": "en",
"n_whitespaces": 261,
"n_words": 89,
"vocab_size": 60
} | https://github.com/modin-project/modin.git |
|
13 | get_feature_names_out | def get_feature_names_out(self, input_features=None):
check_is_fitted(self)
input_features = _check_feature_names_in(self, input_features)
# List of tuples (name, feature_names_out)
transformer_with_feature_names_out = []
for name, trans, column, _ in self._iter(fitted=True):
feature_names_out = self._get_feature_name_out_for_transformer(
name, trans, column, input_features
)
if feature_names_out is None:
continue
transformer_with_feature_names_out.append((name, feature_names_out))
if not transformer_with_feature_names_out:
# No feature names
return np.array([], dtype=object)
if self.verbose_feature_names_out:
# Prefix the feature names out with the transformers name
names = list(
chain.from_iterable(
(f"{name}__{i}" for i in feature_names_out)
for name, feature_names_out in transformer_with_feature_names_out
)
)
return np.asarray(names, dtype=object)
# verbose_feature_names_out is False
# Check that names are all unique without a prefix
feature_names_count = Counter(
chain.from_iterable(s for _, s in transformer_with_feature_names_out)
)
top_6_overlap = [
name for name, count in feature_names_count.most_common(6) if count > 1
]
top_6_overlap.sort()
if top_6_overlap:
if len(top_6_overlap) == 6:
# There are more than 5 overlapping names, we only show the 5
# of the feature names
names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
else:
names_repr = str(top_6_overlap)
raise ValueError(
f"Output feature names: {names_repr} are not unique. Please set "
"verbose_feature_names_out=True to add prefixes to feature names"
)
return np.concatenate(
[name for _, name in transformer_with_feature_names_out],
)
| 279388d9ed2ea83194dd45a2d78161be30b43aa7 | 16 | _column_transformer.py | 380 | DOC Improve get_feature_names_out docstrings (#22718)
Co-authored-by: Thomas J. Fan <[email protected]> | 75,571 | 0 | 683 | 231 | 113 | 259,112 | 182 | scikit-learn | 38 | sklearn/compose/_column_transformer.py | Python | 40 | {
"docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n ",
"language": "en",
"n_whitespaces": 221,
"n_words": 76,
"vocab_size": 53
} | https://github.com/scikit-learn/scikit-learn.git |
|
7 | parse_query_string | def parse_query_string(query_string, operator=None, zero_terms=MATCH_NONE):
filters, query_string = separate_filters_from_query(query_string)
is_phrase = False
tokens = []
for part in query_string.split('"'):
part = part.strip()
if part:
if is_phrase:
tokens.append(Phrase(part))
else:
tokens.append(
PlainText(part, operator=operator or PlainText.DEFAULT_OPERATOR)
)
is_phrase = not is_phrase
if tokens:
if operator == "or":
search_query = OR(tokens)
else:
search_query = AND(tokens)
else:
search_query = zero_terms
return filters, search_query
| d10f15e55806c6944827d801cd9c2d53f5da4186 | 19 | utils.py | 193 | Reformat with black | 16,438 | 0 | 231 | 115 | 38 | 75,890 | 57 | wagtail | 19 | wagtail/search/utils.py | Python | 22 | {
"docstring": "\n This takes a query string typed in by a user and extracts the following:\n\n - Quoted terms (for phrase search)\n - Filters\n\n For example, the following query:\n\n `hello \"this is a phrase\" live:true` would be parsed into:\n\n filters: {'live': 'true'}\n tokens: And([PlainText('hello'), Phrase('this is a phrase')])\n ",
"language": "en",
"n_whitespaces": 75,
"n_words": 46,
"vocab_size": 40
} | https://github.com/wagtail/wagtail.git |
|
1 | preprocess_input | def preprocess_input(x, data_format=None):
return x
@keras_export("keras.applications.resnet_rs.decode_predictions") | 9c24fc4057303172ad977cebd626da2b7adb63d4 | @keras_export("keras.applications.resnet_rs.decode_predictions") | 7 | resnet_rs.py | 32 | Add ResNet-RS to keras.applications - code refactor | 79,882 | 1 | 11 | 12 | 6 | 269,080 | 6 | keras | 4 | keras/applications/resnet_rs.py | Python | 2 | {
"docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the ResnetRS model\n implementation. Users are no longer required to call this method to normalize\n the input data. This method does nothing and only kept as a placeholder to\n align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it, it\n defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ",
"language": "en",
"n_whitespaces": 152,
"n_words": 95,
"vocab_size": 76
} | https://github.com/keras-team/keras.git |
10 | check_credit_limit | def check_credit_limit(customer, company, ignore_outstanding_sales_order=False, extra_amount=0):
credit_limit = get_credit_limit(customer, company)
if not credit_limit:
return
customer_outstanding = get_customer_outstanding(customer, company, ignore_outstanding_sales_order)
if extra_amount > 0:
customer_outstanding += flt(extra_amount)
if credit_limit > 0 and flt(customer_outstanding) > credit_limit:
msgprint(
_("Credit limit has been crossed for customer {0} ({1}/{2})").format(
customer, customer_outstanding, credit_limit
)
)
# If not authorized person raise exception
credit_controller_role = frappe.db.get_single_value("Accounts Settings", "credit_controller")
if not credit_controller_role or credit_controller_role not in frappe.get_roles():
# form a list of emails for the credit controller users
credit_controller_users = get_users_with_role(credit_controller_role or "Sales Master Manager")
# form a list of emails and names to show to the user
credit_controller_users_formatted = [
get_formatted_email(user).replace("<", "(").replace(">", ")")
for user in credit_controller_users
]
if not credit_controller_users_formatted:
frappe.throw(
_("Please contact your administrator to extend the credit limits for {0}.").format(customer)
)
message = .format(
customer, "<li>".join(credit_controller_users_formatted)
)
# if the current user does not have permissions to override credit limit,
# prompt them to send out an email to the controller users
frappe.msgprint(
message,
title="Notify",
raise_exception=1,
primary_action={
"label": "Send Email",
"server_action": "erpnext.selling.doctype.customer.customer.send_emails",
"args": {
"customer": customer,
"customer_outstanding": customer_outstanding,
"credit_limit": credit_limit,
"credit_controller_users_list": credit_controller_users,
},
},
)
@frappe.whitelist() | 494bd9ef78313436f0424b918f200dab8fc7c20b | @frappe.whitelist() | 18 | customer.py | 383 | style: format code with black | 14,497 | 1 | 133 | 218 | 118 | 67,329 | 181 | erpnext | 31 | erpnext/selling/doctype/customer/customer.py | Python | 43 | {
"docstring": "Please contact any of the following users to extend the credit limits for {0}:\n\t\t\t\t<br><br><ul><li>{1}</li></ul>",
"language": "en",
"n_whitespaces": 13,
"n_words": 15,
"vocab_size": 14
} | https://github.com/frappe/erpnext.git |
1 | test__linux_lvm_no_logical_volumes | def test__linux_lvm_no_logical_volumes(self):
vgs_out = {"pid": 123, "retcode": 0, "stdout": " vg00\n vg01", "stderr": ""}
lvs_out = {"pid": 456, "retcode": 0, "stdout": "", "stderr": ""}
cmd_out = MagicMock(autospec=True, side_effect=[vgs_out, lvs_out, lvs_out])
patch_which = patch(
"salt.utils.path.which", autospec=True, return_value="/usr/sbin/lvm"
)
patch_cmd_lvm = patch.dict(lvm.__salt__, {"cmd.run_all": cmd_out})
with patch_which, patch_cmd_lvm:
ret = lvm._linux_lvm()
assert ret == {"lvm": {"vg00": [], "vg01": []}}, ret
| 637e74f0f2e80723fa30eb8f83a86af440c6ba4e | 11 | test_lvm.py | 222 | Uses only command's stdout to populate lvm grain | 53,878 | 0 | 144 | 124 | 44 | 215,192 | 57 | salt | 17 | tests/unit/grains/test_lvm.py | Python | 11 | {
"docstring": "\n Test grains._linux_lvm, lvm is installed, volume groups created but\n no logical volumes present.\n Should return a dictionary only with the header\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 21,
"vocab_size": 21
} | https://github.com/saltstack/salt.git |
|
4 | jaxpr_collectives | def jaxpr_collectives(jaxpr):
for eqn in jaxpr.eqns:
if eqn.primitive in _collective_primitives:
yield eqn.primitive
for subjaxpr in core.subjaxprs(jaxpr): yield from jaxpr_collectives(subjaxpr)
### xla_call underlying jit
| 4354f355a858e6e99a0853dde90812bf8e846ee5 | 11 | xla.py | 67 | prototyping dynamic shapes
Co-authored-by: Dougal Maclaurin <[email protected]> | 26,784 | 0 | 33 | 40 | 18 | 120,148 | 23 | jax | 9 | jax/interpreters/xla.py | Python | 5 | {
"docstring": "Generates all the collective primitives anywhere inside a Jaxpr.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/google/jax.git |
|
3 | _conda_version | def _conda_version(self):
if not self._is_conda:
return "N/A"
with Popen("conda --version", shell=True, stdout=PIPE, stderr=PIPE) as conda:
stdout, stderr = conda.communicate()
if stderr:
return "Conda is used, but version not found"
version = stdout.decode(self._encoding, errors="replace").splitlines()
return "\n".join(version)
| 48c886b3dce3d3117ad16edaf35c8abd28dc51f5 | 12 | sysinfo.py | 130 | Allow decoding errors | 21,436 | 0 | 110 | 73 | 29 | 102,071 | 35 | faceswap | 16 | lib/sysinfo.py | Python | 9 | {
"docstring": " str: The installed version of Conda, or `N/A` if Conda is not installed. ",
"language": "en",
"n_whitespaces": 14,
"n_words": 13,
"vocab_size": 13
} | https://github.com/deepfakes/faceswap.git |
|
2 | _multi_dot | def _multi_dot(arrays, order, i, j, precision):
if i == j:
return arrays[i]
else:
return jnp.dot(_multi_dot(arrays, order, i, order[i, j], precision),
_multi_dot(arrays, order, order[i, j] + 1, j, precision),
precision=precision)
| 2416d154355f19e77b5c1ddf1de1f8552e4a98ad | 14 | linalg.py | 99 | Call _check_arraylike for jnp.linalg & jnp.fft functions | 27,181 | 0 | 74 | 73 | 21 | 122,412 | 29 | jax | 8 | jax/_src/third_party/numpy/linalg.py | Python | 7 | {
"docstring": "Actually do the multiplication with the given order.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 7
} | https://github.com/google/jax.git |
|
2 | unique_id | def unique_id(self):
if self.serial is None:
return f"{self._bridge_unique_id}_{self.device_id}"
return super().unique_id
| 8b1713a691bd0c90824261be785f1998ad89f66f | 10 | __init__.py | 53 | Add support for non-serialized devices (light, switch, cover, fan in RA3 Zones) (#75323)
Co-authored-by: J. Nick Koston <[email protected]> | 103,304 | 0 | 42 | 22 | 9 | 304,497 | 10 | core | 6 | homeassistant/components/lutron_caseta/__init__.py | Python | 4 | {
"docstring": "Return a unique identifier if serial number is None.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/home-assistant/core.git |
|
7 | eval | def eval(self, args, assumptions=True):
# Support for deprecated design
# When old design is removed, this will always return None
sympy_deprecation_warning(
,
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
stacklevel=5,
)
expr, = args
res, _res = None, None
mro = inspect.getmro(type(expr))
for handler in self.handlers:
cls = get_class(handler)
for subclass in mro:
eval_ = getattr(cls, subclass.__name__, None)
if eval_ is None:
continue
res = eval_(expr, assumptions)
# Do not stop if value returned is None
# Try to check for higher classes
if res is None:
continue
if _res is None:
_res = res
else:
# only check consistency if both resolutors have concluded
if _res != res:
raise ValueError('incompatible resolutors')
break
return res
@contextmanager | ad766d1c02943e86f50559abfd0c72e582c9ca6a | @contextmanager | 17 | assume.py | 206 | Update the AskHandler deprecation warnings
n.b., the issue number in the original warning message was wrong. It should
have been #20837. | 48,154 | 1 | 491 | 123 | 73 | 196,758 | 111 | sympy | 25 | sympy/assumptions/assume.py | Python | 30 | {
"docstring": "\n The AskHandler system is deprecated. Evaluating UndefinedPredicate\n objects should be replaced with the multipledispatch handler of\n Predicate.\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 17,
"vocab_size": 17
} | https://github.com/sympy/sympy.git |
9 | _translate_tick_params | def _translate_tick_params(kw, reverse=False):
kw_ = {**kw}
# The following lists may be moved to a more accessible location.
allowed_keys = [
'size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On',
'length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop',
'labelrotation',
*_gridline_param_names]
keymap = {
# tick_params key -> axis key
'length': 'size',
'direction': 'tickdir',
'rotation': 'labelrotation',
'left': 'tick1On',
'bottom': 'tick1On',
'right': 'tick2On',
'top': 'tick2On',
'labelleft': 'label1On',
'labelbottom': 'label1On',
'labelright': 'label2On',
'labeltop': 'label2On',
}
if reverse:
kwtrans = {
oldkey: kw_.pop(newkey)
for oldkey, newkey in keymap.items() if newkey in kw_
}
else:
kwtrans = {
newkey: kw_.pop(oldkey)
for oldkey, newkey in keymap.items() if oldkey in kw_
}
if 'colors' in kw_:
c = kw_.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw_:
if key not in allowed_keys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, allowed_keys))
kwtrans.update(kw_)
return kwtrans
| 6c88dd95935bcc5125ebaa81cd8202c347f3941c | 13 | axis.py | 426 | Add translation from internal kw to ones in tick_params() | 23,860 | 0 | 663 | 230 | 109 | 109,967 | 162 | matplotlib | 16 | lib/matplotlib/axis.py | Python | 44 | {
"docstring": "\n Translate the kwargs supported by `.Axis.set_tick_params` to kwargs\n supported by `.Tick._apply_params`.\n\n In particular, this maps axis specific names like 'top', 'left'\n to the generic tick1, tick2 logic of the axis. Additionally, there\n are some other name translations.\n\n Returns a new dict of translated kwargs.\n\n Note: Use reverse=True to translate from those supported by\n `.Tick._apply_params` back to those supported by\n `.Axis.set_tick_params`.\n ",
"language": "en",
"n_whitespaces": 131,
"n_words": 60,
"vocab_size": 46
} | https://github.com/matplotlib/matplotlib.git |
|
14 | get_order_by | def get_order_by(self):
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 21 | compiler.py | 469 | Refs #33476 -- Reformatted code with Black. | 51,231 | 0 | 948 | 288 | 147 | 205,832 | 222 | django | 48 | django/db/models/sql/compiler.py | Python | 37 | {
"docstring": "\n Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for\n the ORDER BY clause.\n\n The order_by clause can alter the select clause (for example it can add\n aliases to clauses that do not yet have one, or it can add totally new\n select clauses).\n ",
"language": "en",
"n_whitespaces": 90,
"n_words": 47,
"vocab_size": 38
} | https://github.com/django/django.git |
|
6 | engine | async def engine(self) -> AsyncEngine:
if sqlite3.sqlite_version_info < self.MIN_SQLITE_VERSION:
required = ".".join(str(v) for v in self.MIN_SQLITE_VERSION)
raise RuntimeError(
f"Orion requires sqlite >= {required} but we found version "
f"{sqlite3.sqlite_version}"
)
kwargs = {}
loop = get_running_loop()
cache_key = (
loop,
self.connection_url,
self.echo,
self.timeout,
)
if cache_key not in self.ENGINES:
# apply database timeout
if self.timeout is not None:
kwargs["connect_args"] = dict(timeout=self.timeout)
# use `named` paramstyle because of edge cases where `qmark`
# results in params being sent in the wrong positional order
# https://github.com/PrefectHQ/prefect/pull/6645
kwargs["paramstyle"] = "named"
# ensure a long-lasting pool is used with in-memory databases
# because they disappear when the last connection closes
if ":memory:" in self.connection_url:
kwargs.update(poolclass=sa.pool.SingletonThreadPool)
engine = create_async_engine(self.connection_url, echo=self.echo, **kwargs)
sa.event.listen(engine.sync_engine, "engine_connect", self.setup_sqlite)
self.ENGINES[cache_key] = engine
await self.schedule_engine_disposal(cache_key)
return self.ENGINES[cache_key]
| 6ec2dcd6d424da1d069b2ecf378b4e4ddfdd43e3 | 14 | configurations.py | 305 | Resolve SQLite param ordering issue | 11,816 | 0 | 462 | 178 | 99 | 58,817 | 126 | prefect | 32 | src/prefect/orion/database/configurations.py | Python | 39 | {
"docstring": "Retrieves an async SQLAlchemy engine.\n\n Args:\n connection_url (str, optional): The database connection string.\n Defaults to self.connection_url\n echo (bool, optional): Whether to echo SQL sent\n to the database. Defaults to self.echo\n timeout (float, optional): The database statement timeout, in seconds.\n Defaults to self.timeout\n\n Returns:\n AsyncEngine: a SQLAlchemy engine\n ",
"language": "en",
"n_whitespaces": 157,
"n_words": 47,
"vocab_size": 35
} | https://github.com/PrefectHQ/prefect.git |
|
2 | test_highlighted | def test_highlighted(qtbot):
doc = QTextDocument()
completiondelegate._Highlighter(doc, 'Hello', Qt.GlobalColor.red)
doc.setPlainText('Hello World')
# Needed so the highlighting actually works.
edit = QTextEdit()
qtbot.add_widget(edit)
edit.setDocument(doc)
colors = [f.foreground().color() for f in doc.allFormats()]
assert QColor('red') in colors
| 0877fb0d78635692e481c8bde224fac5ad0dd430 | 11 | test_completiondelegate.py | 133 | Run scripts/dev/rewrite_enums.py | 117,670 | 0 | 63 | 76 | 29 | 321,337 | 33 | qutebrowser | 20 | tests/unit/completion/test_completiondelegate.py | Python | 9 | {
"docstring": "Make sure highlighting works.\n\n Note that with Qt > 5.12.1 we need to call setPlainText *after*\n creating the highlighter for highlighting to work. Ideally, we'd test\n whether CompletionItemDelegate._get_textdoc() works properly, but testing\n that is kind of hard, so we just test it in isolation here.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 45,
"vocab_size": 40
} | https://github.com/qutebrowser/qutebrowser.git |
|
10 | topological_sort | def topological_sort(cls, assignments):
if not all(isinstance(i, Assignment) for i in assignments):
# Will support more things later
raise NotImplementedError("CodeBlock.topological_sort only supports Assignments")
if any(isinstance(i, AugmentedAssignment) for i in assignments):
raise NotImplementedError("CodeBlock.topological_sort does not yet work with AugmentedAssignments")
# Create a graph where the nodes are assignments and there is a directed edge
# between nodes that use a variable and nodes that assign that
# variable, like
# [(x := 1, y := x + 1), (x := 1, z := y + z), (y := x + 1, z := y + z)]
# If we then topologically sort these nodes, they will be in
# assignment order, like
# x := 1
# y := x + 1
# z := y + z
# A = The nodes
#
# enumerate keeps nodes in the same order they are already in if
# possible. It will also allow us to handle duplicate assignments to
# the same variable when those are implemented.
A = list(enumerate(assignments))
# var_map = {variable: [nodes for which this variable is assigned to]}
# like {x: [(1, x := y + z), (4, x := 2 * w)], ...}
var_map = defaultdict(list)
for node in A:
i, a = node
var_map[a.lhs].append(node)
# E = Edges in the graph
E = []
for dst_node in A:
i, a = dst_node
for s in a.rhs.free_symbols:
for src_node in var_map[s]:
E.append((src_node, dst_node))
ordered_assignments = topological_sort([A, E])
# De-enumerate the result
return cls(*[a for i, a in ordered_assignments])
| 65be461082dda54c8748922f9c29a19af1279fe1 | 14 | ast.py | 254 | Remove abbreviations in documentation | 48,439 | 0 | 558 | 150 | 130 | 197,296 | 251 | sympy | 26 | sympy/codegen/ast.py | Python | 18 | {
"docstring": "\n Return a CodeBlock with topologically sorted assignments so that\n variables are assigned before they are used.\n\n Examples\n ========\n\n The existing order of assignments is preserved as much as possible.\n\n This function assumes that variables are assigned to only once.\n\n This is a class constructor so that the default constructor for\n CodeBlock can error when variables are used before they are assigned.\n\n Examples\n ========\n\n >>> from sympy import symbols\n >>> from sympy.codegen.ast import CodeBlock, Assignment\n >>> x, y, z = symbols('x y z')\n\n >>> assignments = [\n ... Assignment(x, y + z),\n ... Assignment(y, z + 1),\n ... Assignment(z, 2),\n ... ]\n >>> CodeBlock.topological_sort(assignments)\n CodeBlock(\n Assignment(z, 2),\n Assignment(y, z + 1),\n Assignment(x, y + z)\n )\n\n ",
"language": "en",
"n_whitespaces": 315,
"n_words": 115,
"vocab_size": 71
} | https://github.com/sympy/sympy.git |
|
1 | adapt | def adapt(self, data, batch_size=None, steps=None):
super().adapt(data, batch_size=batch_size, steps=steps)
| 84afc5193d38057e2e2badf9c889ea87d80d8fbf | 9 | discretization.py | 49 | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 81,075 | 0 | 22 | 32 | 8 | 272,918 | 8 | keras | 6 | keras/layers/preprocessing/discretization.py | Python | 2 | {
"docstring": "Computes bin boundaries from quantiles in a input dataset.\n\n Calling `adapt()` on a `Discretization` layer is an alternative to passing\n in a `bin_boundaries` argument during construction. A `Discretization` layer\n should always be either adapted over a dataset or passed `bin_boundaries`.\n\n During `adapt()`, the layer will estimate the quantile boundaries of the\n input dataset. The number of quantiles can be controlled via the `num_bins`\n argument, and the error tolerance for quantile boundaries can be controlled\n via the `epsilon` argument.\n\n In order to make `Discretization` efficient in any distribution context, the\n computed boundaries are kept static with respect to any compiled `tf.Graph`s\n that call the layer. As a consequence, if the layer is adapted a second\n time, any models using the layer should be re-compiled. For more information\n see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.\n\n `adapt()` is meant only as a single machine utility to compute layer state.\n To analyze a dataset that cannot fit on a single machine, see\n [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)\n for a multi-machine, map-reduce solution.\n\n Arguments:\n data: The data to train on. It can be passed either as a\n `tf.data.Dataset`, or as a numpy array.\n batch_size: Integer or `None`.\n Number of samples per state update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` if your data is in the\n form of datasets, generators, or `keras.utils.Sequence` instances\n (since they generate batches).\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined. If x is a\n `tf.data` dataset, and 'steps' is None, the epoch will run until\n the input dataset is exhausted. When passing an infinitely\n repeating dataset, you must specify the `steps` argument. This\n argument is not supported with array inputs.\n ",
"language": "en",
"n_whitespaces": 653,
"n_words": 305,
"vocab_size": 175
} | https://github.com/keras-team/keras.git |
|
10 | parse_datetime | def parse_datetime(value):
try:
return datetime.datetime.fromisoformat(value)
except ValueError:
if match := datetime_re.match(value):
kw = match.groupdict()
kw["microsecond"] = kw["microsecond"] and kw["microsecond"].ljust(6, "0")
tzinfo = kw.pop("tzinfo")
if tzinfo == "Z":
tzinfo = utc
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == "-":
offset = -offset
tzinfo = get_fixed_timezone(offset)
kw = {k: int(v) for k, v in kw.items() if v is not None}
return datetime.datetime(**kw, tzinfo=tzinfo)
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 20 | dateparse.py | 282 | Refs #33476 -- Reformatted code with Black. | 51,588 | 0 | 272 | 170 | 53 | 206,617 | 78 | django | 21 | django/utils/dateparse.py | Python | 18 | {
"docstring": "Parse a string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raise ValueError if the input is well formatted but not a valid datetime.\n Return None if the input isn't well formatted.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 50,
"vocab_size": 39
} | https://github.com/django/django.git |
|
1 | create_module | def create_module(self, spec):
# By default, defer to default semantics for the new module.
return None
# We don't define exec_module() here since that would break
# hasattr checks we do to support backward compatibility.
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 6 | _abc.py | 21 | add python 3.10.4 for windows | 55,084 | 0 | 62 | 10 | 32 | 218,022 | 35 | XX-Net | 3 | python3.10.4/Lib/importlib/_abc.py | Python | 2 | {
"docstring": "Return a module to initialize and into which to load.\n\n This method should raise ImportError if anything prevents it\n from creating a new module. It may return None to indicate\n that the spec should create the new module.\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 38,
"vocab_size": 31
} | https://github.com/XX-net/XX-Net.git |
|
3 | responder | def responder(request):
# Find an available port
with socket.socket() as sock:
sock.bind(("localhost", 0))
port = sock.getsockname()[1]
server_process = multiprocessing.Process(
target=process_server, args=(request.param, port)
)
server_process.start()
yield port
server_process.join(10)
server_process.terminate()
kill_time = 5
wait_time = 0
while server_process.is_alive():
if wait_time > kill_time:
server_process.kill()
break
else:
wait_time += 0.1
time.sleep(0.1)
server_process.close()
@pytest.mark.parametrize(
"responder, read_method, parquet_engine",
[
(CSVUserAgentResponder, pd.read_csv, None),
(JSONUserAgentResponder, pd.read_json, None),
(ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"),
pytest.param(
ParquetFastParquetUserAgentResponder,
pd.read_parquet,
"fastparquet",
# TODO(ArrayManager) fastparquet
marks=[
td.skip_array_manager_not_yet_implemented,
pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"),
],
),
(PickleUserAgentResponder, pd.read_pickle, None),
(StataUserAgentResponder, pd.read_stata, None),
(GzippedCSVUserAgentResponder, pd.read_csv, None),
(GzippedJSONUserAgentResponder, pd.read_json, None),
],
indirect=["responder"],
) | c5ff649b11bd625ca36ad218539badb1c2057668 | @pytest.mark.parametrize(
"responder, read_method, parquet_engine",
[
(CSVUserAgentResponder, pd.read_csv, None),
(JSONUserAgentResponder, pd.read_json, None),
(ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"),
pytest.param(
ParquetFastParquetUserAgentResponder,
pd.read_parquet,
"fastparquet",
# TODO(ArrayManager) fastparquet
marks=[
td.skip_array_manager_not_yet_implemented,
pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"),
],
),
(PickleUserAgentResponder, pd.read_pickle, None),
(StataUserAgentResponder, pd.read_stata, None),
(GzippedCSVUserAgentResponder, pd.read_csv, None),
(GzippedJSONUserAgentResponder, pd.read_json, None),
],
indirect=["responder"],
) | 15 | test_user_agent.py | 366 | CI/TST: Call join on server process test (#45628) | 39,497 | 1 | 380 | 117 | 75 | 163,775 | 93 | pandas | 48 | pandas/tests/io/test_user_agent.py | Python | 21 | {
"docstring": "\n Fixture that starts a local http server in a separate process on localhost\n and returns the port.\n\n Running in a separate process instead of a thread to allow termination/killing\n of http server upon cleanup.\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 34,
"vocab_size": 25
} | https://github.com/pandas-dev/pandas.git |
3 | _dirmatch | def _dirmatch(path, matchwith):
matchlen = len(matchwith)
if (path.startswith(matchwith)
and path[matchlen:matchlen + 1] in [os.sep, '']):
return True
return False
| 4c73560b313821fbfbb8c943e02c8b298b7c1731 | 11 | _clonevirtualenv.py | 73 | [runtime env] Support clone `virtualenv` from an existing `virtualenv` (#22309)
Before this PR, we can't run ray in virtualenv, cause `runtime_env` does not support create a new virtualenv from an existing virtualenv.
More details:https://github.com/ray-project/ray/pull/21801#discussion_r796848499
Co-authored-by: ๆ็ <[email protected]> | 33,344 | 0 | 45 | 45 | 18 | 144,928 | 19 | ray | 8 | python/ray/_private/runtime_env/_clonevirtualenv.py | Python | 6 | {
"docstring": "Check if path is within matchwith's tree.\n >>> _dirmatch('/home/foo/bar', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar2', '/home/foo/bar')\n False\n >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar')\n False\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 27,
"vocab_size": 16
} | https://github.com/ray-project/ray.git |
|
2 | max_mireds | def max_mireds(self) -> int:
if color_temp := self.resource.color_temperature:
return color_temp.mirek_schema.mirek_maximum
# return a fallback value to prevent issues with mired->kelvin conversions
return FALLBACK_MAX_MIREDS
| 10e796e9d5916ce214d23e6aeaf5d757638b07b1 | 9 | light.py | 43 | Fix issues with Color temperature conversions in Hue (#83982) | 96,740 | 0 | 62 | 25 | 21 | 297,779 | 23 | core | 9 | homeassistant/components/hue/v2/light.py | Python | 5 | {
"docstring": "Return the warmest color_temp that this light supports.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | https://github.com/home-assistant/core.git |
|
5 | prde_no_cancel_b_large | def prde_no_cancel_b_large(b, Q, n, DE):
db = b.degree(DE.t)
m = len(Q)
H = [Poly(0, DE.t)]*m
for N, i in itertools.product(range(n, -1, -1), range(m)): # [n, ..., 0]
si = Q[i].nth(N + db)/b.LC()
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if all(qi.is_zero for qi in Q):
dc = -1
M = zeros(0, 2, DE.t)
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i), DE.t)
A, u = constant_system(M, zeros(dc + 1, 1, DE.t), DE)
c = eye(m, DE.t)
A = A.row_join(zeros(A.rows, m, DE.t)).col_join(c.row_join(-c))
return (H, A)
| e94a7b45d7b033ccbd57395dca28b654f875c54c | 15 | prde.py | 421 | Improve loop performance | 48,922 | 0 | 194 | 281 | 70 | 198,413 | 104 | sympy | 39 | sympy/integrals/prde.py | Python | 19 | {
"docstring": "\n Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough.\n\n Explanation\n ===========\n\n Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with\n b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns\n h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that\n if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and\n Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where\n d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.\n ",
"language": "en",
"n_whitespaces": 137,
"n_words": 106,
"vocab_size": 75
} | https://github.com/sympy/sympy.git |
|
5 | _discrete_log_shanks_steps | def _discrete_log_shanks_steps(n, a, b, order=None):
a %= n
b %= n
if order is None:
order = n_order(b, n)
m = isqrt(order) + 1
T = {}
x = 1
for i in range(m):
T[x] = i
x = x * b % n
z = mod_inverse(b, n)
z = pow(z, m, n)
x = a
for i in range(m):
if x in T:
return i * m + T[x]
x = x * z % n
raise ValueError("Log does not exist")
| 9d58006fc0a23afcba38f641c9472917c436428a | 11 | residue_ntheory.py | 194 | Code cleanup | 48,963 | 0 | 167 | 126 | 41 | 198,500 | 82 | sympy | 16 | sympy/ntheory/residue_ntheory.py | Python | 19 | {
"docstring": "\n Baby-step giant-step algorithm for computing the discrete logarithm of\n ``a`` to the base ``b`` modulo ``n``.\n\n The algorithm is a time-memory trade-off of the method of exhaustive\n search. It uses `O(sqrt(m))` memory, where `m` is the group order.\n\n Examples\n ========\n\n >>> from sympy.ntheory.residue_ntheory import _discrete_log_shanks_steps\n >>> _discrete_log_shanks_steps(41, 15, 7)\n 3\n\n See Also\n ========\n\n discrete_log\n\n References\n ==========\n\n .. [1] \"Handbook of applied cryptography\", Menezes, A. J., Van, O. P. C., &\n Vanstone, S. A. (1997).\n ",
"language": "en",
"n_whitespaces": 130,
"n_words": 74,
"vocab_size": 63
} | https://github.com/sympy/sympy.git |
|
7 | get_lead_data | def get_lead_data(filters, based_on):
based_on_field = frappe.scrub(based_on)
conditions = get_filter_conditions(filters)
lead_details = frappe.db.sql(
.format(
based_on_field=based_on_field, conditions=conditions
),
filters,
as_dict=1,
)
lead_map = frappe._dict()
for d in lead_details:
lead_map.setdefault(d.get(based_on_field), []).append(d.name)
data = []
for based_on_value, leads in lead_map.items():
row = {based_on_field: based_on_value, "lead_count": len(leads)}
row["quot_count"] = get_lead_quotation_count(leads)
row["opp_count"] = get_lead_opp_count(leads)
row["order_count"] = get_quotation_ordered_count(leads)
row["order_value"] = get_order_amount(leads) or 0
row["opp_lead"] = flt(row["opp_count"]) / flt(row["lead_count"] or 1.0) * 100.0
row["quot_lead"] = flt(row["quot_count"]) / flt(row["lead_count"] or 1.0) * 100.0
row["order_quot"] = flt(row["order_count"]) / flt(row["quot_count"] or 1.0) * 100.0
data.append(row)
return data
| 494bd9ef78313436f0424b918f200dab8fc7c20b | 15 | campaign_efficiency.py | 381 | style: format code with black | 14,000 | 0 | 61 | 241 | 58 | 65,748 | 86 | erpnext | 31 | erpnext/crm/report/campaign_efficiency/campaign_efficiency.py | Python | 29 | {
"docstring": "\n\t\tselect {based_on_field}, name\n\t\tfrom `tabLead`\n\t\twhere {based_on_field} is not null and {based_on_field} != '' {conditions}\n\t",
"language": "en",
"n_whitespaces": 12,
"n_words": 15,
"vocab_size": 14
} | https://github.com/frappe/erpnext.git |
|
5 | gen_flat_decoded_field_dicts | def gen_flat_decoded_field_dicts(self) -> Generator[Dict, None, None]:
selected_decoding, decoded_val = self.safe_decode_as(self.preferred_decoding, self.try_unpack)
field_desc_dict = {
"tag": self._gen_tag_str(),
"wireType": self._wire_type_str(),
"decoding": self._decoding_str(selected_decoding),
"name": self.name,
}
if isinstance(decoded_val, list):
if (
selected_decoding == ProtoParser.DecodedTypes.message # field is a message with subfields
and not self.is_packed_parent # field is a message, but replaced by packed fields
):
# Field is a message, not packed, thus include it as message header
field_desc_dict["val"] = ""
yield field_desc_dict
# add sub-fields of messages or packed fields
for f in decoded_val:
yield from f.gen_flat_decoded_field_dicts()
else:
field_desc_dict["val"] = decoded_val
yield field_desc_dict
| 9d1e3107e851b3187c1270df189da74236e447f7 | 12 | grpc.py | 204 | `pyupgrade --keep-runtime-typing --py38-plus` | 73,562 | 0 | 423 | 120 | 68 | 250,874 | 91 | mitmproxy | 21 | mitmproxy/contentviews/grpc.py | Python | 28 | {
"docstring": "\n Returns a generator which passes the field as a dict.\n\n In order to return the field value it gets decoded (based on a failover strategy and\n provided ParserRules).\n If the field holds a nested message, the fields contained in the message are appended.\n Ultimately this flattens all fields recursively.\n ",
"language": "en",
"n_whitespaces": 116,
"n_words": 49,
"vocab_size": 39
} | https://github.com/mitmproxy/mitmproxy.git |
|
3 | test_change_view | def test_change_view(self):
change_dict = {
"title": "Ikke fordรธmt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
article_changelist_url = reverse("admin:admin_views_article_changelist")
# add user should not be able to view the list of article or change any of them
self.client.force_login(self.adduser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.client.get(reverse("admin:logout"))
# view user can view articles but not make changes.
self.client.force_login(self.viewuser)
response = self.client.get(article_changelist_url)
self.assertContains(
response,
"<title>Select article to view | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to view</h1>")
self.assertEqual(response.context["title"], "Select article to view")
response = self.client.get(article_change_url)
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<label>Extra form field:</label>")
self.assertContains(
response,
'<a href="/test_admin/admin/admin_views/article/" class="closelink">Close</a>',
)
self.assertEqual(response.context["title"], "View article")
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>Middle content</p>"
)
self.client.get(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.context["title"], "Select article to change")
self.assertContains(
response,
"<title>Select article to change | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to change</h1>")
response = self.client.get(article_change_url)
self.assertEqual(response.context["title"], "Change article")
self.assertContains(
response,
"<title>Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
post = self.client.post(article_change_url, change_dict)
self.assertRedirects(post, article_changelist_url)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>edited article</p>"
)
# one error in form should produce singular error message, multiple errors plural
change_dict["title"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the error below.",
msg_prefix="Singular error message not found in response to post with one error",
)
change_dict["content"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the errors below.",
msg_prefix="Plural error message not found in response to post with multiple errors",
)
self.client.get(reverse("admin:logout"))
# Test redirection when using row-level change permissions. Refs #11513.
r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
r3 = RowLevelChangePermissionModel.objects.create(id=3, name="odd id mult 3")
r6 = RowLevelChangePermissionModel.objects.create(id=6, name="even id mult 3")
change_url_1 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r1.pk,)
)
change_url_2 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r2.pk,)
)
change_url_3 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r3.pk,)
)
change_url_6 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r6.pk,)
)
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1)
self.assertEqual(response.status_code, 403)
response = self.client.post(change_url_1, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertEqual(response.status_code, 403)
response = self.client.get(change_url_2)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_2, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertRedirects(response, self.index_url)
response = self.client.get(change_url_3)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_3, {"name": "changed"})
self.assertEqual(response.status_code, 403)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=3).name,
"odd id mult 3",
)
response = self.client.get(change_url_6)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_6, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=6).name, "changed"
)
self.assertRedirects(response, self.index_url)
self.client.get(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_1, {"name": "changed"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertContains(response, "login-form")
response = self.client.get(change_url_2, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_2, {"name": "changed again"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertContains(response, "login-form")
self.client.get(reverse("admin:logout"))
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 15 | tests.py | 1,865 | Refs #33476 -- Reformatted code with Black. | 52,122 | 0 | 2,165 | 1,131 | 202 | 207,831 | 462 | django | 49 | tests/admin_views/tests.py | Python | 156 | {
"docstring": "Change view should restrict access and allow users to edit items.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | https://github.com/django/django.git |
|
5 | nested_concat | def nested_concat(tensors, new_tensors, padding_index=-100):
assert type(tensors) == type(
new_tensors
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_concat(
t, n, padding_index=padding_index)
for t, n in zip(tensors, new_tensors))
elif isinstance(tensors, paddle.Tensor):
return paddle_pad_and_concatenate(
tensors, new_tensors, padding_index=padding_index)
elif isinstance(tensors, np.ndarray):
return numpy_pad_and_concatenate(
tensors, new_tensors, padding_index=padding_index)
else:
raise TypeError(
f"Unsupported type for concatenation: got {type(tensors)}")
| 44a290e94d1becd1f09fddc3d873f9e19c9d6919 | 14 | helper.py | 200 | [Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)
* add some datasets for finetune.
* support fine tune for all tastks.
* add trainer prototype.
* init verison for paddlenlp trainer.
* refine trainer.
* update for some details.
* support multi-cards training evaluation.
* support load from ckpt.
* support for export inference model.
* first version of trainer.
* seq cls support clue.
* trainer support for token classification and question answersing tasks.
* fix as reviews.
Co-authored-by: Zeyu Chen <[email protected]> | 118,400 | 0 | 192 | 116 | 50 | 323,181 | 64 | PaddleNLP | 18 | paddlenlp/trainer/utils/helper.py | Python | 17 | {
"docstring": "\n Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or\n nested list/tuples of tensors.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 25,
"vocab_size": 22
} | https://github.com/PaddlePaddle/PaddleNLP.git |
|
1 | test_light_none_color_value | async def test_light_none_color_value(hass, light_color_null_values, integration):
entity_id = "light.repeater"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_ON
assert state.attributes[ATTR_SUPPORTED_FEATURES] == LightEntityFeature.TRANSITION
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["hs"]
| fe0120b65a5e685b1aed06e8bd3cf10b561a710b | 9 | test_light.py | 87 | Use light enums in zwave_js (#70791) | 97,970 | 0 | 46 | 53 | 18 | 299,032 | 25 | core | 14 | tests/components/zwave_js/test_light.py | Python | 7 | {
"docstring": "Test the light entity can handle None value in current color Value.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | https://github.com/home-assistant/core.git |
|
2 | contains_points | def contains_points(self, points, transform=None, radius=0.0):
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result.astype('bool')
| 03a0b5ea238014ba87f74ef766928287726aa00a | 10 | path.py | 78 | Doc: Fix grammar and spelling | 24,047 | 0 | 60 | 52 | 19 | 110,307 | 21 | matplotlib | 10 | lib/matplotlib/path.py | Python | 5 | {
"docstring": "\n Return whether the area enclosed by the path contains the given points.\n\n The path is always treated as closed; i.e. if the last code is not\n CLOSEPOLY an implicit segment connecting the last vertex to the first\n vertex is assumed.\n\n Parameters\n ----------\n points : (N, 2) array\n The points to check. Columns contain x and y values.\n transform : `matplotlib.transforms.Transform`, optional\n If not ``None``, *points* will be compared to ``self`` transformed\n by *transform*; i.e. for a correct check, *transform* should\n transform the path into the coordinate system of *points*.\n radius : float, default: 0\n Additional margin on the path in coordinates of *points*.\n The path is extended tangentially by *radius/2*; i.e. if you would\n draw the path with a linewidth of *radius*, all points on the line\n would still be considered to be contained in the area. Conversely,\n negative values shrink the area: Points on the imaginary line\n will be considered outside the area.\n\n Returns\n -------\n length-N bool array\n\n Notes\n -----\n The current algorithm has some limitations:\n\n - The result is undefined for points exactly at the boundary\n (i.e. at the path shifted by *radius/2*).\n - The result is undefined if there is no enclosed area, i.e. all\n vertices are on a straight line.\n - If bounding lines start to cross each other due to *radius* shift,\n the result is not guaranteed to be correct.\n ",
"language": "en",
"n_whitespaces": 496,
"n_words": 225,
"vocab_size": 137
} | https://github.com/matplotlib/matplotlib.git |
|
1 | test_advanced_customization | def test_advanced_customization(scene):
chart = BarChart(values=[10, 40, 10, 20], bar_names=["one", "two", "three", "four"])
c_x_lbls = chart.x_axis.labels
c_x_lbls.set_color_by_gradient(GREEN, RED, YELLOW)
c_y_nums = chart.y_axis.numbers
c_y_nums.set_color_by_gradient(BLUE, WHITE).shift(LEFT)
c_y_axis = chart.y_axis
c_y_axis.ticks.set_color(YELLOW)
c_bar_lbls = chart.get_bar_labels()
scene.add(chart, c_bar_lbls)
@frames_comparison | 149479f9132daf2266c27caa7a3e11ce06be501d | @frames_comparison | 11 | test_probability.py | 162 | Refactored :class:`~.BarChart` and made it inherit from :class:`~.Axes`. (#2387)
* rebase
* fixed None bar_names
* fixed scale issues
* fixed to accept negative bar values
* fixed some bugs
* Added docs for parameters (DRAFT)
* clean up parameters
* more clean up
* clean up __init__
* replace add_x_labels with built-in functionality
* adjust default font_size for labels
* Update docs descriptions
* Add bar_width and adjust get_bar_labels
* Add bar_width and adjust get_bar_labels
* Add docs to class and methods
* remove unecessary imports
* remove getters
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Attempt to fix duplicated parameters section
* adjust BarChart example to include title
* switch order around
* change_bar_values
* back to get_bar_values
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* add docs for _update_default_config and fix method
* remove print(dicts)
* allow negative_numbers to work with bar chart
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* allow negative numbers to work with change_bar_values
* add test_probability.py
* add control data
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* change example
* update examples again
* rewrite test
* rewrite other test
* remove comma after list in example
* improve wording in docs
* add parameter/docs for label_constructor
* change create_label_tex and update methods
* update docs
* use decimal number
* switch default to Tex
* update instances of create_label_tex in coordinate_systems.py
* hardcode for add_labels
* add TODO
* use label_constructor
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix indentation in docs
* Fix minor doc typo
Co-authored-by: Led Me Explain <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | 46,049 | 1 | 62 | 99 | 29 | 189,420 | 33 | manim | 27 | tests/test_graphical_units/test_probability.py | Python | 10 | {
"docstring": "Tests to make sure advanced customization can be done through :class:`~.BarChart`",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | https://github.com/ManimCommunity/manim.git |
3 | newer | def newer(source, target):
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
| f2ccee6761ddcdde2c6502146ca1b37730d46d6d | 13 | _generate_pyx.py | 118 | MAINT: Remove `distutils` usage and add `newer` | 69,778 | 0 | 61 | 72 | 22 | 242,078 | 29 | scipy | 12 | scipy/linalg/_generate_pyx.py | Python | 8 | {
"docstring": "\n Return true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't. Return false if\n both exist and 'target' is the same age or younger than 'source'.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 34,
"vocab_size": 23
} | https://github.com/scipy/scipy.git |
|
1 | test_list_invalid_query_parameter | def test_list_invalid_query_parameter(self) -> None:
channel = self.make_request(
"GET",
self.url + "?valid=x",
{},
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
| 2281427175e4c93a30c39607fb4ac23c2a1f399f | 10 | test_registration_tokens.py | 77 | Use literals in place of `HTTPStatus` constants in tests (#13488)
* Use literals in place of `HTTPStatus` constants in tests
* newsfile
* code style
* code style | 72,831 | 0 | 89 | 48 | 17 | 249,328 | 17 | synapse | 11 | tests/rest/admin/test_registration_tokens.py | Python | 9 | {
"docstring": "Test with `valid` query parameter not `true` or `false`.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/matrix-org/synapse.git |
|
1 | test_get_parsed_simple_text_mail | def test_get_parsed_simple_text_mail(self):
# Parse Test file and check relevant content
parsed1 = self.parser.get_parsed(
os.path.join(self.SAMPLE_FILES, "simple_text.eml"),
)
self.assertEqual(parsed1.date.year, 2022)
self.assertEqual(parsed1.date.month, 10)
self.assertEqual(parsed1.date.day, 12)
self.assertEqual(parsed1.date.hour, 21)
self.assertEqual(parsed1.date.minute, 40)
self.assertEqual(parsed1.date.second, 43)
self.assertEqual(parsed1.date.tzname(), "UTC+02:00")
self.assertEqual(parsed1.from_, "[email protected]")
self.assertEqual(parsed1.subject, "Simple Text Mail")
self.assertEqual(parsed1.text, "This is just a simple Text Mail.\n")
self.assertEqual(parsed1.to, ("[email protected]",))
| 00f39d8b581c358f2484680275222f6ad909758c | 11 | test_parsers.py | 254 | add test comments | 117,103 | 0 | 162 | 157 | 45 | 320,273 | 46 | paperless-ngx | 22 | src/paperless_mail/tests/test_parsers.py | Python | 15 | {
"docstring": "\n GIVEN:\n - Fresh parser\n WHEN:\n - A .eml file should be parsed\n THEN:\n - The content of the mail should be available in the parse result.\n ",
"language": "en",
"n_whitespaces": 88,
"n_words": 26,
"vocab_size": 21
} | https://github.com/paperless-ngx/paperless-ngx.git |
|
1 | from_pydict | def from_pydict(cls, *args, **kwargs):
return cls(pa.Table.from_pydict(*args, **kwargs))
| e35be138148333078284b942ccc9ed7b1d826f97 | 10 | table.py | 46 | Update docs to new frontend/UI (#3690)
* WIP: update docs to new UI
* make style
* Rm unused
* inject_arrow_table_documentation __annotations__
* hasattr(arrow_table_method, "__annotations__")
* Update task_template.rst
* Codeblock PT-TF-SPLIT
* Convert loading scripts
* Convert docs to mdx
* Fix mdx
* Add <Tip>
* Convert mdx tables
* Fix codeblock
* Rm unneded hashlinks
* Update index.mdx
* Redo dev change
* Rm circle ci `build_doc` & `deploy_doc`
* Rm unneeded files
* Update docs reamde
* Standardize to `Example::`
* mdx logging levels doc
* Table properties inject_arrow_table_documentation
* ``` to ```py mdx
* Add Tips mdx
* important,None -> <Tip warning={true}>
* More misc
* Center imgs
* Update instllation page
* `setup.py` docs section
* Rm imgs since they are in hf.co
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <[email protected]>
* Update index mdx
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <[email protected]>
* just `Dataset` obj
* Addedversion just italics
* Update ReadInstruction doc example syntax
* Change docstring for `prepare_for_task`
* Chore
* Remove `code` syntax from headings
* Rm `code` syntax from headings
* Hashlink backward compatability
* S3FileSystem doc
* S3FileSystem doc updates
* index.mdx updates
* Add darkmode gifs
* Index logo img css classes
* Index mdx dataset logo img size
* Docs for DownloadMode class
* Doc DownloadMode table
* format docstrings
* style
* Add doc builder scripts (#3790)
* add doc builder scripts
* fix docker image
* Docs new UI actions no self hosted (#3793)
* No self hosted
* replace doc injection by actual docstrings
* Docstring formatted
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Mishig Davaadorj <[email protected]>
Co-authored-by: Lysandre Debut <[email protected]>
Co-authored-by: Mishig Davaadorj <[email protected]>
* Rm notebooks from docs actions since they dont exi
* Update tsting branch
* More docstring
* Chore
* bump up node version
* bump up node
* ``` -> ```py for audio_process.mdx
* Update .github/workflows/build_documentation.yml
Co-authored-by: Quentin Lhoest <[email protected]>
* Uodate dev doc build
* remove run on PR
* fix action
* Fix gh doc workflow
* forgot this change when merging master
* Update build doc
Co-authored-by: Steven Liu <[email protected]>
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Lysandre Debut <[email protected]> | 21,830 | 0 | 21 | 28 | 7 | 104,393 | 7 | datasets | 6 | src/datasets/table.py | Python | 2 | {
"docstring": "\n Construct a Table from Arrow arrays or columns\n\n Args:\n mapping (:obj:`Union[dict, Mapping]`):\n A mapping of strings to Arrays or Python lists.\n schema (:obj:`Schema`, defaults to :obj:`None`):\n If not passed, will be inferred from the Mapping values\n metadata (:obj:`Union[dict, Mapping]`, default None):\n Optional metadata for the schema (if inferred).\n\n Returns:\n :class:`datasets.table.Table`:\n ",
"language": "en",
"n_whitespaces": 168,
"n_words": 50,
"vocab_size": 42
} | https://github.com/huggingface/datasets.git |
|
3 | _check_deprecated_resample_kwargs | def _check_deprecated_resample_kwargs(kwargs, origin):
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# core/generic.py::NDFrame.resample
# core/groupby/groupby.py::GroupBy.resample
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
if kwargs.get("base", None) is not None:
warnings.warn(
"'base' in .resample() and in Grouper() is deprecated.\n"
"The new arguments that you should use are 'offset' or 'origin'.\n"
'\n>>> df.resample(freq="3s", base=2)\n'
"\nbecomes:\n"
'\n>>> df.resample(freq="3s", offset="2s")\n',
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
if kwargs.get("loffset", None) is not None:
warnings.warn(
"'loffset' in .resample() and in Grouper() is deprecated.\n"
'\n>>> df.resample(freq="3s", loffset="8H")\n'
"\nbecomes:\n"
"\n>>> from pandas.tseries.frequencies import to_offset"
'\n>>> df = df.resample(freq="3s").mean()'
'\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
| 2f8d0a36703e81e4dca52ca9fe4f58c910c1b304 | 14 | grouper.py | 176 | PERF cache find_stack_level (#48023)
cache stacklevel | 40,239 | 0 | 373 | 83 | 85 | 168,224 | 136 | pandas | 11 | pandas/core/groupby/grouper.py | Python | 22 | {
"docstring": "\n Check for use of deprecated parameters in ``resample`` and related functions.\n\n Raises the appropriate warnings if these parameters are detected.\n Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629).\n\n Parameters\n ----------\n kwargs : dict\n Dictionary of keyword arguments to check for deprecated parameters.\n origin : object\n From where this function is being called; either Grouper or TimeGrouper. Used\n to determine an approximate stacklevel.\n ",
"language": "en",
"n_whitespaces": 111,
"n_words": 65,
"vocab_size": 54
} | https://github.com/pandas-dev/pandas.git |
|
2 | is_platform_arm | def is_platform_arm() -> bool:
return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith(
"armv"
)
| c7da9ea5b089ebd0a57a62309e63dff08d26b2c8 | 10 | __init__.py | 57 | TST: Create is_ci_environment helper (#45812) | 39,565 | 0 | 29 | 30 | 13 | 164,509 | 13 | pandas | 5 | pandas/compat/__init__.py | Python | 12 | {
"docstring": "\n Checking if the running platform use ARM architecture.\n\n Returns\n -------\n bool\n True if the running platform uses ARM architecture.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 19,
"vocab_size": 13
} | https://github.com/pandas-dev/pandas.git |
|
2 | broadcast_shapes | def broadcast_shapes(*shapes):
# NOTE: We have both cached and uncached versions to handle Tracers in shapes.
try:
return _broadcast_shapes_cached(*shapes)
except:
return _broadcast_shapes_uncached(*shapes)
@cache() | 78ed03c4c2970e5e0d11f14a8d4fc968a4efbca2 | @cache() | 11 | lax.py | 52 | [typing] add annotations to jax.numpy.linalg | 27,122 | 1 | 32 | 23 | 22 | 122,213 | 23 | jax | 5 | jax/_src/lax/lax.py | Python | 5 | {
"docstring": "Returns the shape that results from NumPy broadcasting of `shapes`.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | https://github.com/google/jax.git |
1 | subscription_channel_updated_webhook | def subscription_channel_updated_webhook(subscription_webhook):
return subscription_webhook(
CHANNEL_UPDATED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.CHANNEL_UPDATED
)
CHANNEL_DELETED_SUBSCRIPTION_QUERY =
@pytest.fixture | e5d78c63edd2620e67671e713ef594e924b0e1c9 | @pytest.fixture | 8 | fixtures.py | 36 | New events for changes related to channels (#9570)
* Channel webhooks events added
* use isActive instead od channel status property
* correct CHOICES value for channel_created event | 5,063 | 1 | 21 | 14 | 10 | 26,788 | 10 | saleor | 8 | saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py | Python | 4 | {
"docstring": "\n subscription{\n event{\n ...on ChannelDeleted{\n channel{\n id\n }\n }\n }\n }\n",
"language": "en",
"n_whitespaces": 69,
"n_words": 10,
"vocab_size": 7
} | https://github.com/saleor/saleor.git |
1 | mixin_worker_runtime_parser | def mixin_worker_runtime_parser(parser):
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help=,
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help=,
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help=,
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help=,
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help=,
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help=,
)
| ceb51082b4ec6f31811945ffc67b734acbbebac2 | 10 | worker.py | 363 | feat: convert embedding/tensor array type at executor level (#4484) | 2,127 | 0 | 460 | 216 | 71 | 11,824 | 110 | jina | 21 | jina/parsers/orchestrate/runtimes/worker.py | Python | 92 | {
"docstring": "Mixing in arguments required by :class:`WorkerRuntime` into the given parser.\n :param parser: the parser instance to which we add arguments\n \n The config of the executor, it could be one of the followings:\n * an Executor YAML file (.yml, .yaml, .jaml)\n * a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)\n * a docker image (must start with `docker://`)\n * the string literal of a YAML config (must start with `!` or `jtype: `)\n * the string literal of a JSON config\n\n When use it under Python, one can use the following values additionally:\n - a Python dict that represents the config\n - a text file stream has `.read()` interface\n \n Dictionary of keyword arguments that will override the `with` configuration in `uses`\n \n Dictionary of keyword arguments that will override the `metas` configuration in `uses`\n \n Dictionary of keyword arguments that will override the `requests` configuration in `uses`\n \nThe customized python modules need to be imported before loading the executor\n\nNote that the recommended way is to only import a single module - a simple python file, if your\nexecutor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,\nwhich should be structured as a python package. For more details, please see the\n`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__\n\nThe type of array `tensor` and `embedding` will be serialized to.\n\nSupports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found \n`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.\nDefaults to retaining whatever type is returned by the Executor.\n",
"language": "en",
"n_whitespaces": 343,
"n_words": 245,
"vocab_size": 138
} | https://github.com/jina-ai/jina.git |
|
6 | _sparsemax_threshold_and_support | def _sparsemax_threshold_and_support(X, dim=-1, k=None):
if k is None or k >= X.shape[dim]: # do full sort
topk, _ = torch.sort(X, dim=dim, descending=True)
else:
topk, _ = torch.topk(X, k=k, dim=dim)
topk_cumsum = topk.cumsum(dim) - 1
rhos = _make_ix_like(topk, dim)
support = rhos * topk > topk_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = topk_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(X.dtype)
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
in_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k)
_roll_last(tau, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau, support_size
| 20a8a6fdb516e543d4598c852063ba0fb407f3ba | 14 | activations.py | 336 | Removes dependency on entmax from PyPI, adds entmax source to utils (#1778)
* Removes dependency on entmax from PyPi, add entmax source code into utils instead.
* Removes build status and image from README
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Fix python formatting in docs for pre-commit.
* Removes __main__ from test_losses.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Update entmax imports.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
Co-authored-by: Daniel Treiman <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | 947 | 0 | 202 | 221 | 64 | 6,266 | 96 | ludwig | 29 | ludwig/utils/entmax/activations.py | Python | 19 | {
"docstring": "Core computation for sparsemax: optimal threshold and support size.\n\n Parameters\n ----------\n X : torch.Tensor\n The input tensor to compute thresholds over.\n\n dim : int\n The dimension along which to apply sparsemax.\n\n k : int or None\n number of largest elements to partial-sort over. For optimal\n performance, should be slightly bigger than the expected number of\n nonzeros in the solution. If the solution is more than k-sparse,\n this function is recursively called with a 2*k schedule.\n If `None`, full sorting is performed from the beginning.\n\n Returns\n -------\n tau : torch.Tensor like `X`, with all but the `dim` dimension intact\n the threshold value for each vector\n support_size : torch LongTensor, shape like `tau`\n the number of nonzeros in each vector.\n ",
"language": "en",
"n_whitespaces": 211,
"n_words": 118,
"vocab_size": 85
} | https://github.com/ludwig-ai/ludwig.git |
|
23 | bulk_update | def bulk_update(self, objs, fields, batch_size=None):
if batch_size is not None and batch_size < 0:
raise ValueError('Batch size must be a positive integer.')
if not fields:
raise ValueError('Field names must be given to bulk_update().')
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError('All bulk_update() objects must have a primary key set.')
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError('bulk_update() can only be used with concrete fields.')
if any(f.primary_key for f in fields):
raise ValueError('bulk_update() cannot be used with primary key fields.')
if not objs:
return 0
for obj in objs:
obj._prepare_related_fields_for_save(operation_name='bulk_update', fields=fields)
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
connection = connections[self.db]
max_batch_size = connection.ops.bulk_batch_size(['pk', 'pk'] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connection.features.requires_casted_case_in_updates
batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not hasattr(attr, 'resolve_expression'):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
rows_updated = 0
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
rows_updated += self.filter(pk__in=pks).update(**update_kwargs)
return rows_updated
bulk_update.alters_data = True
| 0af9a5fc7d765aa05ea784e2c3237675f3bb4b49 | 17 | query.py | 609 | Fixed #33463 -- Fixed QuerySet.bulk_update() with F() expressions. | 50,240 | 0 | 704 | 381 | 135 | 203,160 | 237 | django | 61 | django/db/models/query.py | Python | 42 | {
"docstring": "\n Update the given fields in each of the given objects in the database.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 9
} | https://github.com/django/django.git |
|
4 | call_hm | def call_hm(self, other_args):
parser = argparse.ArgumentParser(
prog="hm",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=int,
help="Display N items",
default=10,
)
parser.add_argument(
"-c",
"--category",
default="",
dest="category",
help="Category (e.g., stablecoins). Empty for no category",
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
pycoingecko_view.display_crypto_heatmap(
category=ns_parser.category,
top=ns_parser.limit,
export=ns_parser.export,
)
| a5848af9088466ae711ce403c9f344b964d581b8 | 11 | overview_controller.py | 222 | Crypto heatmaps (#1416)
* added hm feature
* updated requirements
* updated tests
* updated tests
* updated charts convention and removed duplicated autocompletion
* added percentage
* lint
Co-authored-by: jmaslek <[email protected]> | 84,248 | 0 | 388 | 137 | 50 | 282,685 | 57 | OpenBBTerminal | 27 | gamestonk_terminal/cryptocurrency/overview/overview_controller.py | Python | 37 | {
"docstring": "Process hm commandDisplay cryptocurrencies heatmap [Source: https://coingecko.com]\n Accepts --category or -c to display only coins of a certain category\n (default no category to display all coins ranked by market cap).\n You can look on only top N number of records with --limit.\n ",
"language": "en",
"n_whitespaces": 86,
"n_words": 42,
"vocab_size": 36
} | https://github.com/OpenBB-finance/OpenBBTerminal.git |
|
3 | parsedate_tz | def parsedate_tz(data):
res = _parsedate_tz(data)
if not res:
return
if res[9] is None:
res[9] = 0
return tuple(res)
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 9 | _parseaddr.py | 61 | add python 3.10.4 for windows | 57,010 | 0 | 47 | 36 | 14 | 223,619 | 18 | XX-Net | 5 | python3.10.4/Lib/email/_parseaddr.py | Python | 7 | {
"docstring": "Convert a date string to a time tuple.\n\n Accounts for military timezones.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 12,
"vocab_size": 11
} | https://github.com/XX-net/XX-Net.git |
|
10 | get_type | def get_type(self) -> str:
# values of the dict are functions evaluating whether components of this pipeline match the pipeline type
# specified by dict keys
pipeline_types = {
"GenerativeQAPipeline": lambda x: {"Generator", "Retriever"} <= set(x.keys()),
"FAQPipeline": lambda x: {"Docs2Answers"} <= set(x.keys()),
"ExtractiveQAPipeline": lambda x: {"Reader", "Retriever"} <= set(x.keys()),
"SearchSummarizationPipeline": lambda x: {"Retriever", "Summarizer"} <= set(x.keys()),
"TranslationWrapperPipeline": lambda x: {"InputTranslator", "OutputTranslator"} <= set(x.keys()),
"RetrieverQuestionGenerationPipeline": lambda x: {"Retriever", "QuestionGenerator"} <= set(x.keys()),
"QuestionAnswerGenerationPipeline": lambda x: {"QuestionGenerator", "Reader"} <= set(x.keys()),
"DocumentSearchPipeline": lambda x: {"Retriever"} <= set(x.keys()),
"QuestionGenerationPipeline": lambda x: {"QuestionGenerator"} <= set(x.keys()),
"MostSimilarDocumentsPipeline": lambda x: len(x.values()) == 1
and isinstance(list(x.values())[0], BaseDocumentStore),
}
retrievers = [type(comp).__name__ for comp in self.components.values() if isinstance(comp, BaseRetriever)]
doc_stores = [type(comp).__name__ for comp in self.components.values() if isinstance(comp, BaseDocumentStore)]
pipeline_type = next(
(p_type for p_type, eval_f in pipeline_types.items() if eval_f(self.components)), "Unknown pipeline"
)
retrievers_used = retrievers if retrievers else "None"
doc_stores_used = doc_stores if doc_stores else "None"
return f"{pipeline_type} (retriever: {retrievers_used}, doc_store: {doc_stores_used})"
| 938e6fda5b686ec49c52cb23f786a74d9321e048 | 17 | base.py | 556 | Classify pipeline's type based on its components (#3132)
* Add pipeline get_type mehod
* Add pipeline uptime
* Add pipeline telemetry event sending
* Send pipeline telemetry once a day (at most)
* Add pipeline invocation counter, change invocation counter logic
* Update allowed telemetry parameters - allow pipeline parameters
* PR review: add unit test | 75,161 | 0 | 369 | 317 | 89 | 257,868 | 153 | haystack | 26 | haystack/pipelines/base.py | Python | 25 | {
"docstring": "\n Returns the type of the pipeline.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 5
} | https://github.com/deepset-ai/haystack.git |
|
1 | record_states | def record_states(hass):
mp = "media_player.test"
mp2 = "media_player.test2"
mp3 = "media_player.test3"
therm = "thermostat.test"
therm2 = "thermostat.test2"
zone = "zone.home"
script_c = "script.can_cancel_this_one"
| 29bda196b5e0a90a2bea7e1797742236114afc1c | 7 | test_init.py | 62 | Break apart recorder into tasks and core modules (#71222) | 98,729 | 0 | 47 | 446 | 17 | 299,827 | 23 | core | 9 | tests/components/history/test_init.py | Python | 60 | {
"docstring": "Record some test states.\n\n We inject a bunch of state updates from media player, zone and\n thermostat.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 17,
"vocab_size": 17
} | https://github.com/home-assistant/core.git |
|
3 | labels_to_dataset | def labels_to_dataset(labels, label_mode, num_classes):
label_ds = tf.data.Dataset.from_tensor_slices(labels)
if label_mode == 'binary':
label_ds = label_ds.map(
lambda x: tf.expand_dims(tf.cast(x, 'float32'), axis=-1),
num_parallel_calls=tf.data.AUTOTUNE)
elif label_mode == 'categorical':
label_ds = label_ds.map(lambda x: tf.one_hot(x, num_classes),
num_parallel_calls=tf.data.AUTOTUNE)
return label_ds
| 3073e00912838454359079a35f1638ccf06e855f | 16 | dataset_utils.py | 154 | Fix the issue in the other two places it occurs | 79,896 | 0 | 85 | 96 | 24 | 269,098 | 33 | keras | 17 | keras/preprocessing/dataset_utils.py | Python | 10 | {
"docstring": "Create a tf.data.Dataset from the list/tuple of labels.\n\n Args:\n labels: list/tuple of labels to be converted into a tf.data.Dataset.\n label_mode: String describing the encoding of `labels`. Options are:\n - 'binary' indicates that the labels (there can be only 2) are encoded as\n `float32` scalars with values 0 or 1 (e.g. for `binary_crossentropy`).\n - 'categorical' means that the labels are mapped into a categorical vector.\n (e.g. for `categorical_crossentropy` loss).\n num_classes: number of classes of labels.\n\n Returns:\n A `Dataset` instance.\n ",
"language": "en",
"n_whitespaces": 109,
"n_words": 78,
"vocab_size": 58
} | https://github.com/keras-team/keras.git |
|
1 | test_change_view_without_object_change_permission | def test_change_view_without_object_change_permission(self):
change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,))
self.client.force_login(self.viewuser)
response = self.client.get(change_url)
self.assertEqual(response.context["title"], "View article")
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(
response,
'<a href="/test_admin/admin9/admin_views/article/" class="closelink">Close</a>',
)
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 12 | tests.py | 138 | Refs #33476 -- Reformatted code with Black. | 52,110 | 0 | 114 | 81 | 27 | 207,805 | 29 | django | 15 | tests/admin_views/tests.py | Python | 11 | {
"docstring": "\n The object should be read-only if the user has permission to view it\n and change objects of that type but not to change the current object.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 26,
"vocab_size": 23
} | https://github.com/django/django.git |
|
3 | get_party_type | def get_party_type(doctype, txt, searchfield, start, page_len, filters):
cond = ""
if filters and filters.get("account"):
account_type = frappe.db.get_value("Account", filters.get("account"), "account_type")
cond = "and account_type = '%s'" % account_type
return frappe.db.sql(
.format(
key=searchfield, cond=cond
),
{"txt": "%" + txt + "%", "start": start, "page_len": page_len},
)
| 494bd9ef78313436f0424b918f200dab8fc7c20b | 13 | party_type.py | 155 | style: format code with black | 14,544 | 0 | 33 | 91 | 36 | 67,491 | 44 | erpnext | 16 | erpnext/setup/doctype/party_type/party_type.py | Python | 13 | {
"docstring": "select name from `tabParty Type`\n\t\t\twhere `{key}` LIKE %(txt)s {cond}\n\t\t\torder by name limit %(start)s, %(page_len)s",
"language": "en",
"n_whitespaces": 13,
"n_words": 16,
"vocab_size": 15
} | https://github.com/frappe/erpnext.git |
|
2 | get_collection_path_regexes | def get_collection_path_regexes() -> t.Tuple[t.Optional[t.Pattern], t.Optional[t.Pattern]]:
if data_context().content.collection:
collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
else:
collection_search_re = None
collection_sub_re = None
return collection_search_re, collection_sub_re
| 3eb0485dd92c88cc92152d3656d94492db44b183 | 16 | __init__.py | 141 | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annotation type comments to native type hints.
* ansible-test - Use more native type hints.
Conversion of single-line function annotation type comments with default values to native type hints.
* ansible-test - Use more native type hints.
Manual conversion of type annotation comments for functions which have pylint directives. | 79,100 | 0 | 67 | 87 | 18 | 267,819 | 27 | ansible | 13 | test/lib/ansible_test/_internal/commands/coverage/__init__.py | Python | 9 | {
"docstring": "Return a pair of regexes used for identifying and manipulating collection paths.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | https://github.com/ansible/ansible.git |
|
5 | available | def available(self) -> bool:
return (
self.poe_mode is not None
and self.controller.available
and self.client.switch_port
and self.client.switch_mac
and self.client.switch_mac in self.controller.api.devices
)
| 3798d28bec6dc257da8387a6751949d47fb29a29 | 12 | switch.py | 77 | Improve entity type hints [u] (#77884) | 105,376 | 0 | 97 | 49 | 17 | 306,592 | 21 | core | 10 | homeassistant/components/unifi/switch.py | Python | 13 | {
"docstring": "Return if switch is available.\n\n Poe_mode None means its POE state is unknown.\n Sw_mac unavailable means restored client.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 18,
"vocab_size": 16
} | https://github.com/home-assistant/core.git |
|
2 | async_shutdown | async def async_shutdown(self):
if self.task:
self.task.cancel()
await asyncio.wait((self.task,))
self._unschedule_refresh()
await self.connection.stop()
| 551fb449752e1c3f55eb688d24509876020852d1 | 12 | __init__.py | 76 | Stop coordinator before connection in nibe_heatpump (#80396)
Stop coordinator in nibe_heatpump | 88,359 | 0 | 61 | 43 | 10 | 289,215 | 11 | core | 9 | homeassistant/components/nibe_heatpump/__init__.py | Python | 6 | {
"docstring": "Make sure a coordinator is shut down as well as it's connection.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | https://github.com/home-assistant/core.git |
|
3 | autorun_get_interactive_session | def autorun_get_interactive_session(cmds, **kargs):
# type: (str, **Any) -> Tuple[str, Any]
sstdout, sstderr, sexcepthook = sys.stdout, sys.stderr, sys.excepthook
sw = StringWriter()
h_old = log_scapy.handlers[0]
log_scapy.removeHandler(h_old)
log_scapy.addHandler(logging.StreamHandler(stream=sw))
try:
try:
sys.stdout = sys.stderr = sw
sys.excepthook = sys.__excepthook__ # type: ignore
res = autorun_commands_timeout(cmds, **kargs)
except StopAutorun as e:
e.code_run = sw.s
raise
finally:
sys.stdout, sys.stderr, sys.excepthook = sstdout, sstderr, sexcepthook
log_scapy.removeHandler(log_scapy.handlers[0])
log_scapy.addHandler(h_old)
return sw.s, res
| b754f97d346e2db6e4a9e9cc6ff88010f502db89 | 13 | autorun.py | 228 | Update Mypy version | 52,584 | 0 | 184 | 142 | 43 | 209,035 | 63 | scapy | 27 | scapy/autorun.py | Python | 19 | {
"docstring": "Create an interactive session and execute the\n commands passed as \"cmds\" and return all output\n\n :param cmds: a list of commands to run\n :param timeout: timeout in seconds\n :returns: (output, returned) contains both sys.stdout and sys.stderr logs\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 37,
"vocab_size": 33
} | https://github.com/secdev/scapy.git |
|
1 | generate_gexf | def generate_gexf(G, encoding="utf-8", prettyprint=True, version="1.2draft"):
writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version)
writer.add_graph(G)
yield from str(writer).splitlines()
@open_file(0, mode="rb") | 54e36acb36c75e09bc53dfcb81c73386b82a20c9 | @open_file(0, mode="rb") | 10 | gexf.py | 99 | Update gexf website link in documentation (#5275)
Hi, we've recently put the GEXF website again into its own domain http://gexf.net/ so this documentation should be updated. Thanks! | 41,768 | 1 | 27 | 50 | 16 | 176,215 | 16 | networkx | 12 | networkx/readwrite/gexf.py | Python | 4 | {
"docstring": "Generate lines of GEXF format representation of G.\n\n \"GEXF (Graph Exchange XML Format) is a language for describing\n complex networks structures, their associated data and dynamics\" [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n encoding : string (optional, default: 'utf-8')\n Encoding for text data.\n prettyprint : bool (optional, default: True)\n If True use line breaks and indenting in output XML.\n version : string (default: 1.2draft)\n Version of GEFX File Format (see http://gexf.net/schema.html)\n Supported values: \"1.1draft\", \"1.2draft\"\n\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> linefeed = chr(10) # linefeed=\\n\n >>> s = linefeed.join(nx.generate_gexf(G))\n >>> for line in nx.generate_gexf(G): # doctest: +SKIP\n ... print(line)\n\n Notes\n -----\n This implementation does not support mixed graphs (directed and undirected\n edges together).\n\n The node id attribute is set to be the string of the node label.\n If you want to specify an id use set it as node data, e.g.\n node['a']['id']=1 to set the id of node 'a' to 1.\n\n References\n ----------\n .. [1] GEXF File Format, https://gephi.org/gexf/format/\n ",
"language": "en",
"n_whitespaces": 262,
"n_words": 163,
"vocab_size": 120
} | https://github.com/networkx/networkx.git |
2 | _can_use_libjoin | def _can_use_libjoin(self) -> bool:
if type(self) is Index:
# excludes EAs
return isinstance(self.dtype, np.dtype)
return not is_interval_dtype(self.dtype)
# --------------------------------------------------------------------
# Uncategorized Methods
| 4248b23371a70b339a2c16b8e5caca9c2e5897f8 | 10 | base.py | 61 | ENH: ExtensionEngine (#45514) | 39,495 | 0 | 71 | 35 | 19 | 163,773 | 22 | pandas | 9 | pandas/core/indexes/base.py | Python | 7 | {
"docstring": "\n Whether we can use the fastpaths implement in _libs.join\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | https://github.com/pandas-dev/pandas.git |
|
18 | remount | def remount(name, device, mkmnt=False, fstype="", opts="defaults", user=None):
force_mount = False
if __grains__["os"] in ["MacOS", "Darwin"]:
if opts == "defaults":
opts = "noowners"
if fstype == "smbfs":
force_mount = True
if "AIX" in __grains__["os"]:
if opts == "defaults":
opts = []
if isinstance(opts, str):
opts = opts.split(",")
mnts = active()
if name in mnts:
# The mount point is mounted, attempt to remount it with the given data
if "remount" not in opts and __grains__["os"] not in [
"OpenBSD",
"MacOS",
"Darwin",
]:
opts.append("remount")
if force_mount:
# We need to force the mount but first we should unmount
umount(name, device, user=user)
args = ""
if opts:
lopts = ",".join(opts)
args = "-o {}".format(lopts)
if fstype:
# use of fstype on AIX differs from typical Linux use of
# -t functionality AIX uses -v vfsname, -t fstype mounts
# all with fstype in /etc/filesystems
if "AIX" in __grains__["os"]:
args += " -v {}".format(fstype)
elif "solaris" in __grains__["os"].lower():
args += " -F {}".format(fstype)
else:
args += " -t {}".format(fstype)
if __grains__["os"] not in ["OpenBSD", "MacOS", "Darwin"] or force_mount:
cmd = "mount {} {} {} ".format(args, device, name)
else:
cmd = "mount -u {} {} {} ".format(args, device, name)
out = __salt__["cmd.run_all"](cmd, runas=user, python_shell=False)
if out["retcode"]:
return out["stderr"]
return True
# Mount a filesystem that isn't already
return mount(name, device, mkmnt, fstype, opts, user=user)
| 9354c15e0818715d055242d14b1308643a6918d7 | 16 | mount.py | 529 | Convert Py 2'isms to Python 3, and add tests for set_filesystems on AIX | 54,317 | 0 | 623 | 299 | 124 | 216,005 | 219 | salt | 27 | salt/modules/mount.py | Python | 42 | {
"docstring": "\n Attempt to remount a device, if the device is not already mounted, mount\n is called\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mount.remount /mnt/foo /dev/sdz1 True\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 26,
"vocab_size": 25
} | https://github.com/saltstack/salt.git |
|
3 | close | async def close(self):
if not self._is_closed:
await asyncio.gather(*[q.close() for q in self._all_batch_queues()])
self._executor.close()
self._is_closed = True
| 46d7973043e2e599149812cc6fc7671b935c13f8 | 15 | request_handling.py | 80 | feat: dynamic batching (#5410)
Co-authored-by: Johannes Messner <[email protected]>
Co-authored-by: Alaeddine Abdessalem <[email protected]> | 2,779 | 0 | 63 | 46 | 16 | 13,871 | 16 | jina | 8 | jina/serve/runtimes/worker/request_handling.py | Python | 5 | {
"docstring": "Close the data request handler, by closing the executor and the batch queues.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 11
} | https://github.com/jina-ai/jina.git |
|
3 | filter_symbols | def filter_symbols(iterator, exclude):
exclude = set(exclude)
for s in iterator:
if s not in exclude:
yield s
| f3b08522003f40868afb20304fc0fa5b16d13f6a | 10 | iterables.py | 45 | Cleanup documentation | 48,434 | 0 | 44 | 27 | 14 | 197,287 | 17 | sympy | 5 | sympy/utilities/iterables.py | Python | 5 | {
"docstring": "\n Only yield elements from `iterator` that do not occur in `exclude`.\n\n Parameters\n ==========\n\n iterator : iterable\n iterator to take elements from\n\n exclude : iterable\n elements to exclude\n\n Returns\n =======\n\n iterator : iterator\n filtered iterator\n ",
"language": "en",
"n_whitespaces": 83,
"n_words": 34,
"vocab_size": 22
} | https://github.com/sympy/sympy.git |
|
4 | edit_focus_options | def edit_focus_options(self) -> typing.Sequence[str]:
flow = self.master.view.focus.flow
focus_options = []
if isinstance(flow, tcp.TCPFlow):
focus_options = ["tcp-message"]
elif isinstance(flow, http.HTTPFlow):
focus_options = [
"cookies",
"urlencoded form",
"multipart form",
"path",
"method",
"query",
"reason",
"request-headers",
"response-headers",
"request-body",
"response-body",
"status_code",
"set-cookies",
"url",
]
elif isinstance(flow, dns.DNSFlow):
raise exceptions.CommandError("Cannot edit DNS flows yet, please submit a patch.")
return focus_options
| fab7016b318d7c37fc30cef9c0567b9b620b883e | 11 | consoleaddons.py | 181 | beautify flowtable dns entries
this isn't perfect (the whole table needs to be refactored properly),
but good enough for now. | 73,587 | 0 | 357 | 104 | 44 | 251,073 | 54 | mitmproxy | 19 | mitmproxy/tools/console/consoleaddons.py | Python | 28 | {
"docstring": "\n Possible components for console.edit.focus.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 4,
"vocab_size": 4
} | https://github.com/mitmproxy/mitmproxy.git |
|
4 | _check_status | def _check_status(self) -> bool:
job_status = self._job["async_status"]
percent = self._job["async_percent_completion"]
logger.info(f"{self}: is {percent} complete ({job_status})")
if self.elapsed_time > self.job_timeout:
logger.info(f"{self}: run more than maximum allowed time {self.job_timeout}.")
self._finish_time = pendulum.now()
self._failed = True
return True
elif job_status == Status.COMPLETED:
self._finish_time = pendulum.now() # TODO: is not actual running time, but interval between check_status calls
return True
elif job_status in [Status.FAILED, Status.SKIPPED]:
self._finish_time = pendulum.now()
self._failed = True
logger.info(f"{self}: has status {job_status} after {self.elapsed_time.in_seconds()} seconds.")
return True
return False
| a3aae8017a0a40ff2006e2567f71dccb04c997a5 | 15 | async_job.py | 243 | ๐ ๐ Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactoring of async jobs, support random order of slices
* update source _read_incremental to hook new state logic
* fix issues with timeout
* remove debugging and clean up, improve retry logic
* merge changes from #8234
* fix call super _read_increment
* generalize batch execution, add use_batch flag
* improve coverage, do some refactoring of spec
* update test, remove overrides of source
* add split by AdSet
* add smaller insights
* fix end_date < start_date case
* add account_id to PK
* add notes
* fix new streams
* fix reversed incremental stream
* update spec.json for SAT
* upgrade CDK and bump version
Co-authored-by: Dmytro Rezchykov <[email protected]>
Co-authored-by: Eugene Kulak <[email protected]> | 532 | 0 | 245 | 119 | 54 | 3,743 | 78 | airbyte | 19 | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/async_job.py | Python | 22 | {
"docstring": "Perform status check\n\n :return: True if the job is completed, False - if the job is still running\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 18,
"vocab_size": 14
} | https://github.com/airbytehq/airbyte.git |
|
2 | _reset_layer_losses | def _reset_layer_losses(parent_layer):
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {
'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]
}
with utils.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
| e61cbc52fd3b0170769c120e9b8dabc8c4205322 | 12 | save_impl.py | 113 | Support Keras saving/loading for ShardedVariables with arbitrary partitions.
PiperOrigin-RevId: 439837516 | 79,929 | 0 | 64 | 66 | 22 | 269,147 | 27 | keras | 9 | keras/saving/saved_model/save_impl.py | Python | 11 | {
"docstring": "Resets losses of layer and its sublayers, and returns original losses.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | https://github.com/keras-team/keras.git |
|
11 | list | def list(self, verbose=True):
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
| c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | 18 | tarfile.py | 341 | Vendor in pip 22.1.2 | 3,836 | 0 | 408 | 200 | 46 | 21,440 | 74 | pipenv | 26 | pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py | Python | 21 | {
"docstring": "Print a table of contents to sys.stdout. If `verbose' is False, only\n the names of the members are printed. If it is True, an `ls -l'-like\n output is produced.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 29,
"vocab_size": 24
} | https://github.com/pypa/pipenv.git |
|
6 | _find_all_or_none | def _find_all_or_none(qt_library_info, mandatory_dll_patterns, optional_dll_patterns=None):
optional_dll_patterns = optional_dll_patterns or []
# Resolve path to the the corresponding python package (actually, its parent directory). Used to preserve directory
# structure when DLLs are collected from the python package (e.g., PyPI wheels).
package_parent_path = pathlib.Path(qt_library_info.package_location).resolve().parent
# In PyQt5/PyQt6, the DLLs we are looking for are located in location['BinariesPath'], whereas in PySide2/PySide6,
# they are located in location['PrefixPath'].
dll_path = qt_library_info.location['BinariesPath' if qt_library_info.is_pyqt else 'PrefixPath']
dll_path = pathlib.Path(dll_path).resolve()
# Helper for processing single DLL pattern | 49abfa5498b1db83b8f1b2e859e461b1e8540c6f | 12 | qt.py | 105 | hookutils: qt: ensure ANGLE DLLs are collected from Anaconda Qt5
Anaconda's Qt5 ships ANGLE DLLs (`libEGL.dll` and `libGLESv2.dll`)
but does not seem to provide the `d3dcompiler_XY.dll`. Therefore,
we need to adjust the extra Qt DLL collection to consider the
latter an optional dependency whose absence does not preclude
the collection of the ANGLE DLL group.
Rework the `get_qt_binaries` hook utility function and its
`_find_all_or_none` helper to peform collection based on a list
of mandatory and a list of optional patterns, instead of a single
list and number of expected matches (since up until now, all
matches were always expected to be found). | 77,505 | 0 | 111 | 100 | 58 | 263,901 | 81 | pyinstaller | 13 | PyInstaller/utils/hooks/qt.py | Python | 15 | {
"docstring": "\n Try to find Qt DLLs from the specified mandatory pattern list. If all mandatory patterns resolve to DLLs, collect\n them all, as well as any DLLs from the optional pattern list. If a mandatory pattern fails to resolve to a DLL,\n return an empty list.\n\n This allows all-or-none collection of particular groups of Qt DLLs that may or may not be available.\n ",
"language": "en",
"n_whitespaces": 78,
"n_words": 62,
"vocab_size": 42
} | https://github.com/pyinstaller/pyinstaller.git |
|
1 | test_double_stamping | def test_double_stamping(self, subtests):
self.app.conf.task_always_eager = True
self.app.conf.task_store_eager_result = True
self.app.conf.result_extended = True
sig_1 = self.add.s(2, 2)
sig_1.stamp(stamp1="stamp1")
sig_1.stamp(stamp2="stamp2")
sig_1_res = sig_1.freeze()
sig_1.apply()
with subtests.test("sig_1_res is stamped with stamp1", stamp1=["stamp1"]):
assert sig_1_res._get_task_meta()["stamp1"] == ["stamp1"]
with subtests.test("sig_1_res is stamped with stamp2", stamp2=["stamp2"]):
assert sig_1_res._get_task_meta()["stamp2"] == ["stamp2"]
with subtests.test("sig_1_res is stamped twice", stamped_headers=["stamp2", "stamp1"]):
assert sig_1_res._get_task_meta()["stamped_headers"] == ["stamp2", "stamp1", "groups"]
| 1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc | 12 | test_canvas.py | 291 | Canvas Header Stamping (#7384)
* Strip down the header-stamping PR to the basics.
* Serialize groups.
* Add groups to result backend meta data.
* Fix spelling mistake.
* Revert changes to canvas.py
* Revert changes to app/base.py
* Add stamping implementation to canvas.py
* Send task to AMQP with groups.
* Successfully pass single group to result.
* _freeze_gid dict merge fixed
* First draft of the visitor API.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* OptionsVisitor created
* Fixed canvas.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test for simple test for chord and fixed chord implementation
* Changed _IMMUTABLE_OPTIONS
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed list order
* Fixed tests (stamp test and chord test), fixed order in groups
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Fixed lint and elements
* Changed implementation of stamp API and fix lint
* Added documentation to Stamping API. Added chord with groups test
* Implemented stamping inside replace and added test for an implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Splitted into subtests
* Group stamping rollback
* group.id is None fixed
* Added integration test
* Added integration test
* apply_async fixed
* Integration test and test_chord fixed
* Lint fixed
* chord freeze fixed
* Minor fixes.
* Chain apply_async fixed and tests fixed
* lint fixed
* Added integration test for chord
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* type -> isinstance
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Redo header stamping (#7341)
* _freeze_gid dict merge fixed
* OptionsVisitor created
* Fixed canvas.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test for simple test for chord and fixed chord implementation
* Changed _IMMUTABLE_OPTIONS
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed list order
* Fixed tests (stamp test and chord test), fixed order in groups
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Fixed lint and elements
* Changed implementation of stamp API and fix lint
* Added documentation to Stamping API. Added chord with groups test
* Implemented stamping inside replace and added test for an implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Splitted into subtests
* Group stamping rollback
* group.id is None fixed
* Added integration test
* Added integration test
* apply_async fixed
* Integration test and test_chord fixed
* Lint fixed
* chord freeze fixed
* Minor fixes.
* Chain apply_async fixed and tests fixed
* lint fixed
* Added integration test for chord
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* type -> isinstance
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Omer Katz <[email protected]>
* Added stamping mechanism
* Manual stamping improved
* flake8 fixed
* Added subtests
* Add comma.
* Moved groups to stamps
* Fixed chord and added test for that
* Strip down the header-stamping PR to the basics.
* Serialize groups.
* Add groups to result backend meta data.
* Fix spelling mistake.
* Revert changes to canvas.py
* Revert changes to app/base.py
* Add stamping implementation to canvas.py
* Send task to AMQP with groups.
* Successfully pass single group to result.
* _freeze_gid dict merge fixed
* First draft of the visitor API.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* OptionsVisitor created
* Fixed canvas.py
* Added test for simple test for chord and fixed chord implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Changed _IMMUTABLE_OPTIONS
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed chord interface
* Fixed list order
* Fixed tests (stamp test and chord test), fixed order in groups
* Fixed lint and elements
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Changed implementation of stamp API and fix lint
* Added documentation to Stamping API. Added chord with groups test
* Implemented stamping inside replace and added test for an implementation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Added test additonal tests for chord, improved coverage
* Splitted into subtests
* Group stamping rollback
* group.id is None fixed
* Added integration test
* Added integration test
* apply_async fixed
* Integration test and test_chord fixed
* Lint fixed
* chord freeze fixed
* Minor fixes.
* Chain apply_async fixed and tests fixed
* lint fixed
* Added integration test for chord
* type -> isinstance
* Added stamping mechanism
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Manual stamping improved
* fail_ci_if_error uncommented
* flake8 fixed
* Added subtests
* Changes
* Add comma.
* Fixed chord and added test for that
* canvas.py fixed
* Test chord.py fixed
* Fixed stamped_headers
* collections import fixed
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* collections import fixed
* Update celery/backends/base.py
Co-authored-by: Omer Katz <[email protected]>
* ampq.py fixed
* Refrain from using deprecated import path.
* Fix test_complex_chain regression.
Whenever we stamp a group we need to freeze it first if it wasn't already frozen.
Somewhere along the line, the group id changed because we were freezing twice.
This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.
We don't know why yet.
* Fixed integration tests
* Fixed integration tests
* Fixed integration tests
* Fixed integration tests
* Fixed issues with maybe_list. Add documentation
* Fixed potential issue with integration tests
* Fixed issues with _regen
* Fixed issues with _regen
* Fixed test_generator issues
* Fixed _regen stamping
* Fixed _regen stamping
* Fixed TimeOut issue
* Fixed TimeOut issue
* Fixed TimeOut issue
* Update docs/userguide/canvas.rst
Co-authored-by: Omer Katz <[email protected]>
* Fixed Couchbase
* Better stamping intro
* New GroupVisitor example
* Adjust documentation.
Co-authored-by: Naomi Elstein <[email protected]>
Co-authored-by: Omer Katz <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Asif Saif Uddin <[email protected]>
Co-authored-by: Omer Katz <[email protected]> | 52,214 | 0 | 174 | 162 | 37 | 208,123 | 57 | celery | 20 | t/unit/tasks/test_canvas.py | Python | 15 | {
"docstring": "\n Test manual signature stamping with two different stamps.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | https://github.com/celery/celery.git |
|
2 | arraylist_to_blobprotovector_str | def arraylist_to_blobprotovector_str(arraylist):
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
| cc4d0564756ca067516f71718a3d135996525909 | 10 | io.py | 61 | Balanced joint maximum mean discrepancy for deep transfer learning | 12,051 | 0 | 24 | 36 | 12 | 60,260 | 12 | transferlearning | 10 | code/deep/BJMMD/caffe/python/caffe/io.py | Python | 4 | {
"docstring": "Converts a list of arrays to a serialized blobprotovec, which could be\n then passed to a network for processing.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 16
} | https://github.com/jindongwang/transferlearning.git |
|
4 | cleanup | def cleanup(self):
cleanup_filename = set(self._uid2filename.values())
if Path(self._cache_folder / SETTING_FILENAME).exists():
with Path(self._cache_folder / SETTING_FILENAME).open(mode='r') as f:
uid2filename: Dict[str, str] = json_tricks.load(f)
cleanup_filename = cleanup_filename.difference(uid2filename.values())
for filename in cleanup_filename:
filepath = self._data_folder / filename
if filepath.exists():
os.remove(str(filepath))
| 9c19236902c2c66238c75f149cf9cefa411494c9 | 16 | storage.py | 178 | [Compression] update distillation utils (#5215) | 25,012 | 0 | 141 | 106 | 26 | 113,714 | 35 | nni | 24 | nni/contrib/distillation/storage.py | Python | 10 | {
"docstring": "\n Cleanup the saved files under `cache_folder`.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | https://github.com/microsoft/nni.git |
|
2 | column_names | def column_names(self) -> Dict[str, List[str]]:
self._check_values_type()
return {k: dataset.column_names for k, dataset in self.items()}
| 1904d0c0a3a96330d9b870cdca3e9a3a137f2977 | 9 | dataset_dict.py | 62 | Add code examples for DatasetDict (#4245)
* ๐ add code examples for DatasetDict
* ๐ apply quentin review | 21,968 | 0 | 35 | 39 | 14 | 104,786 | 14 | datasets | 9 | src/datasets/dataset_dict.py | Python | 16 | {
"docstring": "Names of the columns in each split of the dataset.\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.column_names\n {'test': ['text', 'label'],\n 'train': ['text', 'label'],\n 'validation': ['text', 'label']}\n ```\n ",
"language": "en",
"n_whitespaces": 105,
"n_words": 33,
"vocab_size": 26
} | https://github.com/huggingface/datasets.git |
|
2 | test_merge_asof_on_variations | def test_merge_asof_on_variations():
left = {"a": [1, 5, 10], "left_val": ["a", "b", "c"]}
left_index = [6, 8, 12]
right = {"a": [1, 2, 3, 6, 7], "right_val": ["d", "e", "f", "g", "h"]}
right_index = [6, 7, 8, 9, 15]
pandas_left, pandas_right = (
pandas.DataFrame(left, index=left_index),
pandas.DataFrame(right, index=right_index),
)
modin_left, modin_right = (
pd.DataFrame(left, index=left_index),
pd.DataFrame(right, index=right_index),
)
for on_arguments in [
{"on": "a"},
{"left_on": "a", "right_on": "a"},
{"left_on": "a", "right_index": True},
{"left_index": True, "right_on": "a"},
{"left_index": True, "right_index": True},
]:
pandas_merged = pandas.merge_asof(pandas_left, pandas_right, **on_arguments)
with warns_that_defaulting_to_pandas():
modin_merged = pd.merge_asof(modin_left, modin_right, **on_arguments)
df_equals(pandas_merged, modin_merged)
| be2716f393fddd2f669f26616f80e051fc7ceee6 | 13 | test_general.py | 367 | TEST-#3655: Check that Modin is defaulting to Pandas. (#3656)
Co-authored-by: Dmitry Chigarev <[email protected]>
Co-authored-by: Devin Petersohn <[email protected]>
Signed-off-by: mvashishtha <[email protected]> | 35,224 | 0 | 221 | 226 | 68 | 153,002 | 93 | modin | 19 | modin/pandas/test/test_general.py | Python | 24 | {
"docstring": "on=,left_on=,right_on=,right_index=,left_index= options match Pandas.",
"language": "en",
"n_whitespaces": 3,
"n_words": 4,
"vocab_size": 4
} | https://github.com/modin-project/modin.git |
|
1 | train_and_predict_model | def train_and_predict_model(input_features, output_features, data_csv, output_directory):
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "fc_size": 14},
"training": {"epochs": 2},
}
model = LudwigModel(config, backend=LocalTestBackend())
model.train(
dataset=data_csv,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
output_directory=output_directory,
)
model.predict(dataset=data_csv, output_directory=output_directory)
return model
| 4fb8f63181f5153b4f6778c6ef8dad61022c4f3f | 11 | test_server.py | 153 | Use tempfile to automatically garbage collect data and modeling artifacts in ludwig integration tests. (#1642)
* Use tmpdir to automatically garbage collect data and modeling artifacts in ludwig integration tests. | 859 | 0 | 123 | 95 | 33 | 5,863 | 36 | ludwig | 16 | tests/integration_tests/test_server.py | Python | 17 | {
"docstring": "Helper method to avoid code repetition for training a model and using it for prediction.\n\n :param input_features: input schema\n :param output_features: output schema\n :param data_csv: path to data\n :param output_directory: model output directory\n :return: None\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 35,
"vocab_size": 27
} | https://github.com/ludwig-ai/ludwig.git |
|
3 | calc_second_derivative | def calc_second_derivative(self, x):
if x < self.x[0]:
return None
elif x > self.x[-1]:
return None
i = self.__search_index(x)
dx = x - self.x[i]
ddy = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return ddy
| def289b723e9216830c2a7b2577cb31b55710167 | 11 | cubic_spline_planner.py | 115 | enhance cubic spline path doc (#698)
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc
* enhance cubic spline path doc | 2,948 | 0 | 107 | 78 | 25 | 19,360 | 36 | PythonRobotics | 9 | PathPlanning/CubicSpline/cubic_spline_planner.py | Python | 9 | {
"docstring": "\n Calc second derivative at given x.\n\n if x is outside the input x, return None\n\n Returns\n -------\n ddy : float\n second derivative for given x.\n ",
"language": "en",
"n_whitespaces": 79,
"n_words": 25,
"vocab_size": 21
} | https://github.com/AtsushiSakai/PythonRobotics.git |
|
5 | remove_edge | def remove_edge(self, u, v, key=None):
try:
d = self._adj[u][v]
except KeyError as err:
raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err
# remove the edge with specified data
if key is None:
d.popitem()
else:
try:
del d[key]
except KeyError as err:
msg = f"The edge {u}-{v} with key {key} is not in the graph."
raise NetworkXError(msg) from err
if len(d) == 0:
# remove the key entries if last edge
del self._succ[u][v]
del self._pred[v][u]
| c8fdab5d87235cbf5c2142531087fadfa653887a | 14 | multidigraph.py | 184 | Update multigraph docstrings to reflect `remove_edges_from` behavior. (#5699)
* Update MG docstring to reflect rm_edges_from behavior.
Also adds example.
* Update remove_edge docstring in MG and MDG.
* Fix MDG examples. | 42,123 | 0 | 259 | 103 | 48 | 176,827 | 77 | networkx | 15 | networkx/classes/multidigraph.py | Python | 16 | {
"docstring": "Remove an edge between u and v.\n\n Parameters\n ----------\n u, v : nodes\n Remove an edge between nodes u and v.\n key : hashable identifier, optional (default=None)\n Used to distinguish multiple edges between a pair of nodes.\n If None, remove a single edge between u and v. If there are\n multiple edges, removes the last edge added in terms of\n insertion order.\n\n Raises\n ------\n NetworkXError\n If there is not an edge between u and v, or\n if there is no edge with the specified key.\n\n See Also\n --------\n remove_edges_from : remove a collection of edges\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> nx.add_path(G, [0, 1, 2, 3])\n >>> G.remove_edge(0, 1)\n >>> e = (1, 2)\n >>> G.remove_edge(*e) # unpacks e from an edge tuple\n\n For multiple edges\n\n >>> G = nx.MultiDiGraph()\n >>> G.add_edges_from([(1, 2), (1, 2), (1, 2)]) # key_list returned\n [0, 1, 2]\n\n When ``key=None`` (the default), edges are removed in the opposite\n order that they were added:\n\n >>> G.remove_edge(1, 2)\n >>> G.edges(keys=True)\n OutMultiEdgeView([(1, 2, 0), (1, 2, 1)])\n\n For edges with keys\n\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(1, 2, key=\"first\")\n 'first'\n >>> G.add_edge(1, 2, key=\"second\")\n 'second'\n >>> G.remove_edge(1, 2, key=\"first\")\n >>> G.edges(keys=True)\n OutMultiEdgeView([(1, 2, 'second')])\n\n ",
"language": "en",
"n_whitespaces": 528,
"n_words": 197,
"vocab_size": 109
} | https://github.com/networkx/networkx.git |