complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
2
test_id_in_matches
def test_id_in_matches(req_type): response = graphql_query( ( f'{req_type} {{' ) ) assert sorted(set(response['data']['docs'][0].keys())) == sorted( {'id', 'text', 'matches'} ) assert len(response['data']['docs'][0]['matches']) == 2 for match in response['data']['docs'][0]['matches']: assert set(match.keys()) == {'id'} @pytest.mark.parametrize('req_type', ['mutation', 'query'])
bb8ab48d2008b6165a8d08cd9942c07e938f8dd5
@pytest.mark.parametrize('req_type', ['mutation', 'query'])
16
test_graphql.py
203
feat(gateway): add graphql support (#4403)
2,085
1
100
96
27
11,718
33
jina
12
tests/integration/graphql/test_graphql.py
Python
21
{ "docstring": "docs(data: {text: \"abcd\"}) { \n id \n text\n matches {\n id\n }\n } \n }\n ", "language": "en", "n_whitespaces": 115, "n_words": 12, "vocab_size": 8 }
https://github.com/jina-ai/jina.git
6
collect_typelib_data
def collect_typelib_data(self): datas = [] binaries = [] hiddenimports = [] logger.debug("Collecting module data for %s %s", self.name, self.version) # Module unavailable if not self.available: raise ValueError(f"Module {self.name} {self.version} is unavailable!") # Find shared libraries for lib in self.sharedlibs: lib_path = findSystemLibrary(lib) if lib_path: logger.debug('Collecting shared library %s at %s', lib, lib_path) binaries.append((lib_path, '.')) # Find and collect .typelib file. Run it through the `gir_library_path_fix` to fix the library path, if # necessary. typelib_entry = gir_library_path_fix(self.typelib) if typelib_entry: logger.debug('Collecting gir typelib at %s', typelib_entry[0]) datas.append(typelib_entry) # Overrides for the module hiddenimports += collect_submodules('gi.overrides', lambda name: name.endswith('.' + self.name)) # Module dependencies for dep in self.dependencies: dep_module, _ = dep.rsplit('-', 1) hiddenimports += [f'gi.repository.{dep_module}'] return binaries, datas, hiddenimports # The old function, provided for backwards compatibility in 3rd party hooks.
684bfac8adcf254fec5777f212c13eb62181f900
13
gi.py
288
hooks: refactor GObject introspection (gi) hooks The modules imported from gi.repository are marked as runtime modules by their corresponding pre-safe-import-module hooks. Therefore, their standard hooks are always loaded and executed, regardless of whether the modue is actually importable or not. In PyInstaller v5, this behavior triggers errors in hooks for GI modules that are not importable, because the new `isolated` framework propagates the errors instead of swallowing them. While these errors could be caught and demoted to warnings to match the old behavior, it would be better hooks checked whether module is importable before doing any processing at all. To that end, we introduce new class, `GiModuleInfo` that, as part of its initialization, allows us to: - perform availability check - obtain data previously returned by `get_gi_typelibs` - obtain data previously returned by `get_gi_libdir` using a single isolated import attempt (instead of one being performed in each of those steps). In addition, if passed `hook_api` as an optional argument, the `GiModuleInfo` can use hook configuration API to override the GI module version to be collected (which allows the standard use pattern to be removed from the hook itself). The old `get_gi_typelibs` and `get_gi_libdir` functions now internally use `GiModuleInfo` to provide backward compatible behavior to (potential) exetnal user. All `gi` hooks are ported to the `GiModuleInfo` and now become no-op if the module is not available. In addition, hooks are cleaned up/refactored so that all processing is performed either in the loading stage ("simple" hooks that do not require access to hook configuration API) or in the `hook()` function (hooks that require access to hook configuration API), but not in the mixture of the two.
77,453
0
361
159
92
263,828
129
pyinstaller
26
PyInstaller/utils/hooks/gi.py
Python
21
{ "docstring": "\n Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
https://github.com/pyinstaller/pyinstaller.git
1
_generate_pdfjs_script
def _generate_pdfjs_script(filename): url = QUrl('qute://pdfjs/file') url_query = QUrlQuery() url_query.addQueryItem('filename', filename) url.setQuery(url_query) js_url = javascript.to_js( url.toString(QUrl.ComponentFormattingOption.FullyEncoded)) # type: ignore[arg-type] return jinja.js_environment.from_string().render(url=js_url)
0877fb0d78635692e481c8bde224fac5ad0dd430
12
pdfjs.py
113
Run scripts/dev/rewrite_enums.py
117,534
0
49
64
18
321,106
20
qutebrowser
18
qutebrowser/browser/pdfjs.py
Python
22
{ "docstring": "Generate the script that shows the pdf with pdf.js.\n\n Args:\n filename: The name of the file to open.\n \n document.addEventListener(\"DOMContentLoaded\", function() {\n if (typeof window.PDFJS !== 'undefined') {\n // v1.x\n window.PDFJS.verbosity = window.PDFJS.VERBOSITY_LEVELS.info;\n } else {\n // v2.x\n const options = window.PDFViewerApplicationOptions;\n options.set('verbosity', pdfjsLib.VerbosityLevel.INFOS);\n }\n\n const viewer = window.PDFView || window.PDFViewerApplication;\n viewer.open({{ url }});\n });\n ", "language": "en", "n_whitespaces": 195, "n_words": 54, "vocab_size": 45 }
https://github.com/qutebrowser/qutebrowser.git
3
_auto_format_str
def _auto_format_str(fmt, value): try: lbl = fmt % value # when used in `Axes.bar_label()`` this doesn't always raise an error # when the {}-style formatting should be used instead of %-style if lbl == fmt: raise TypeError return lbl except (TypeError, ValueError): return fmt.format(value)
f57f30e14de5dfb59dfc22522a602084ba8c8081
10
__init__.py
64
Add cbook function for str formatting.
23,611
0
106
38
37
109,500
44
matplotlib
7
lib/matplotlib/cbook/__init__.py
Python
8
{ "docstring": "\n Apply *value* to the format string *fmt*.\n\n This works both with unnamed %-style formatting and\n unnamed {}-style formatting. %-style formatting has priority.\n If *fmt* is %-style formattable that will be used. Otherwise,\n {}-formatting is applied. Strings without formatting placeholders\n are passed through as is.\n\n Examples\n --------\n >>> _auto_format_str('%.2f m', 0.2)\n '0.20 m'\n >>> _auto_format_str('{} m', 0.2)\n '0.2 m'\n >>> _auto_format_str('const', 0.2)\n 'const'\n >>> _auto_format_str('%d or {}', 0.2)\n '0 or {}'\n ", "language": "en", "n_whitespaces": 122, "n_words": 70, "vocab_size": 55 }
https://github.com/matplotlib/matplotlib.git
2
get_destination
def get_destination(self, obj): if obj.destination_id is not None: serializer = get_serializer_for_model(obj.destination, prefix=NESTED_SERIALIZER_PREFIX) context = {'request': self.context['request']} return serializer(obj.destination, context=context).data return None
e07dd3ddcb2b8453d5a72ccb984e456297fb8296
12
serializers.py
89
Define NESTED_SERIALIZER_PREFIX constant
78,093
0
75
55
19
265,414
21
netbox
11
netbox/dcim/api/serializers.py
Python
6
{ "docstring": "\n Return the appropriate serializer for the destination, if any.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
https://github.com/netbox-community/netbox.git
2
exports
def exports(self): result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: result = self.read_exports() return result
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
database.py
53
upd; format
12,760
0
61
30
11
61,936
15
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
Python
6
{ "docstring": "\n Return the information exported by this distribution.\n :return: A dictionary of exports, mapping an export category to a dict\n of :class:`ExportEntry` instances describing the individual\n export entries, and keyed by name.\n ", "language": "en", "n_whitespaces": 85, "n_words": 31, "vocab_size": 27 }
https://github.com/jindongwang/transferlearning.git
2
text
def text(self) -> str: if self.error is None: return str(self) return self.error.databaseText()
ee4d6e0396a6b570f4d5592a9c4c1a9fee1027b6
9
sql.py
48
sql: Add *all* primary sqlite result codes For three reasons: - There are only 31 of them, and we don't really expect any more to turn up (last happened in 2013, and we have a test for it happening) - It makes for nicer debug output - It always felt strange to only have a small subset in the enum
117,945
0
44
28
11
321,852
12
qutebrowser
5
qutebrowser/misc/sql.py
Python
8
{ "docstring": "Get a short text description of the error.\n\n This is a string suitable to show to the user as error message.\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 18 }
https://github.com/qutebrowser/qutebrowser.git
3
query
def query(self, query, **kwargs) -> Result: try: if self.db_conn: result = self.db_conn.aql.execute(query, **kwargs) return result else: raise AirflowException( f"Failed to execute AQLQuery, error connecting to database: {self.database}" ) except AQLQueryExecuteError as error: raise AirflowException(f"Failed to execute AQLQuery, error: {str(error)}")
c758c76ac336c054fd17d4b878378aa893b7a979
15
arangodb.py
109
Adding ArangoDB Provider (#22548) * Adding ArangoDB Provider
8,978
0
172
56
31
46,739
39
airflow
13
airflow/providers/arangodb/hooks/arangodb.py
Python
18
{ "docstring": "\n Function to create a arangodb session\n and execute the AQL query in the session.\n\n :param query: AQL query\n :return: Result\n ", "language": "en", "n_whitespaces": 56, "n_words": 20, "vocab_size": 17 }
https://github.com/apache/airflow.git
1
nr_of_successful_entries
def nr_of_successful_entries(self) -> int: return len(self.select_filled_orders(self.enter_side))
bcfa73d492e3c150f0b909df58eb2c59ce6a15a6
10
models.py
35
Add "nr_of_successfull_entries"
34,357
0
20
20
6
149,011
6
freqtrade
6
freqtrade/persistence/models.py
Python
6
{ "docstring": "\n Helper function to count the number of entry orders that have been filled.\n :return: int count of entry orders that have been filled for this trade.\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 19 }
https://github.com/freqtrade/freqtrade.git
8
argtopk
def argtopk(a_plus_idx, k, axis, keepdims): assert keepdims is True axis = axis[0] if isinstance(a_plus_idx, list): a_plus_idx = list(flatten(a_plus_idx)) a = np.concatenate([ai for ai, _ in a_plus_idx], axis) idx = np.concatenate( [np.broadcast_to(idxi, ai.shape) for ai, idxi in a_plus_idx], axis ) else: a, idx = a_plus_idx if abs(k) >= a.shape[axis]: return a_plus_idx idx2 = np.argpartition(a, -k, axis=axis) k_slice = slice(-k, None) if k > 0 else slice(-k) idx2 = idx2[tuple(k_slice if i == axis else slice(None) for i in range(a.ndim))] return np.take_along_axis(a, idx2, axis), np.take_along_axis(idx, idx2, axis)
20e924618999febeac706b20212104fe4f3ea61d
14
chunk.py
282
Type annotations, part 1 (#8295)
36,460
0
168
189
58
155,766
85
dask
27
dask/array/chunk.py
Python
17
{ "docstring": "Chunk and combine function of argtopk\n\n Extract the indices of the k largest elements from a on the given axis.\n If k is negative, extract the indices of the -k smallest elements instead.\n Note that, unlike in the parent function, the returned elements\n are not sorted internally.\n ", "language": "en", "n_whitespaces": 62, "n_words": 47, "vocab_size": 35 }
https://github.com/dask/dask.git
7
split_filename
def split_filename(filename, project_name=None): result = None pyver = None filename = unquote(filename).replace(' ', '-') m = PYTHON_VERSION.search(filename) if m: pyver = m.group(1) filename = filename[:m.start()] if project_name and len(filename) > len(project_name) + 1: m = re.match(re.escape(project_name) + r'\b', filename) if m: n = m.end() result = filename[:n], filename[n + 1:], pyver if result is None: m = PROJECT_NAME_AND_VERSION.match(filename) if m: result = m.group(1), m.group(3), pyver return result # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' r'\(\s*(?P<ver>[^\s)]+)\)$')
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
14
util.py
270
upd; format
12,890
0
212
154
54
62,169
84
transferlearning
21
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py
Python
18
{ "docstring": "\n Extract name, version, python version from a filename (no extension)\n\n Return name, version, pyver or None\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
https://github.com/jindongwang/transferlearning.git
3
numpy_pad_and_concatenate
def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result
47412c7d434f6ddfc02a9b7ecd6182b86ae0a164
12
trainer_pt_utils.py
242
Ensure tensors are at least 1d for pad and concat (#17179) * Ensure tensors are at least 1d for pad and concat * Compatibility * Fix * Fix * Add test * Retrigger CI * Consistency with master * Retrigger CI
6,885
0
104
162
49
37,910
64
transformers
14
src/transformers/trainer_pt_utils.py
Python
10
{ "docstring": "Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/huggingface/transformers.git
2
update
def update(self) -> None: begin = datetime.now() delta = timedelta(days=2) end = begin + delta try: df_predictions = self._station.get_data( begin_date=begin.strftime("%Y%m%d %H:%M"), end_date=end.strftime("%Y%m%d %H:%M"), product="predictions", datum="MLLW", interval="hilo", units=self._unit_system, time_zone=self._timezone, ) self.data = df_predictions.head() _LOGGER.debug("Data = %s", self.data) _LOGGER.debug( "Recent Tide data queried with start time set to %s", begin.strftime("%m-%d-%Y %H:%M"), ) except ValueError as err: _LOGGER.error("Check NOAA Tides and Currents: %s", err.args) self.data = None
420733a064286cfe6fc5cf11483835d15ff83462
14
sensor.py
226
Improve entity type hints [n] (#77824)
104,467
0
329
134
50
305,683
64
core
30
homeassistant/components/noaa_tides/sensor.py
Python
24
{ "docstring": "Get the latest data from NOAA Tides and Currents API.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
source_url
def source_url(self): return self.metadata.source_url download_url = source_url # Backward compatibility
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
7
database.py
28
upd; format
12,751
0
29
12
10
61,926
10
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
Python
2
{ "docstring": "\n The source archive download URL for this distribution.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/jindongwang/transferlearning.git
1
pcap_service_start
def pcap_service_start(askadmin=True): # type: (bool) -> bool return _pcap_service_control('sc start', askadmin=askadmin)
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
8
__init__.py
32
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <[email protected]>
52,782
0
20
17
11
209,792
11
scapy
3
scapy/arch/windows/__init__.py
Python
2
{ "docstring": "Starts the pcap adapter. Will ask for admin. Returns True if success", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/secdev/scapy.git
3
create_systemd_cgroup_v1
def create_systemd_cgroup_v1(self) -> str: self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}' # Privileged mode is required to create the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0. # The mkdir command will fail with "Permission denied" otherwise. options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged'] cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && mkdir {shlex.quote(self.cgroup_path)}'] try: run_utility_container(self.args, f'ansible-test-cgroup-create-{self.label}', cmd, options) except SubprocessError as ex: if error := self.extract_error(ex.stderr): raise ControlGroupError(self.args, f'Unable to create a v1 cgroup within the systemd hierarchy.\n' f'Reason: {error}') from ex # cgroup create permission denied raise return self.cgroup_path
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
15
host_profiles.py
193
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
79,634
0
251
84
75
268,734
89
ansible
18
test/lib/ansible_test/_internal/host_profiles.py
Python
13
{ "docstring": "Create a unique ansible-test cgroup in the v1 systemd hierarchy and return its path.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
https://github.com/ansible/ansible.git
1
close
def close(self): # XXX: Should have a connect too? # def connect(self): #
ab4803984bce4a4de7cc10910e7310c4babf557e
6
base.py
18
Start to add base class defs
53,945
0
34
6
10
215,394
13
salt
2
salt/transport/base.py
Python
1
{ "docstring": "\n Close the connection.\n \n # Connect to the server / broker.\n # ", "language": "en", "n_whitespaces": 39, "n_words": 11, "vocab_size": 9 }
https://github.com/saltstack/salt.git
10
polar
def polar(a, side='right', *, method='qdwh', eps=None, max_iterations=None): r a = jnp.asarray(a) if a.ndim != 2: raise ValueError("The input `a` must be a 2-D array.") if side not in ["right", "left"]: raise ValueError("The argument `side` must be either 'right' or 'left'.") m, n = a.shape if method == "qdwh": # TODO(phawkins): return info also if the user opts in? if m >= n and side == "right": unitary, posdef, _, _ = qdwh.qdwh(a, is_hermitian=False, eps=eps) elif m < n and side == "left": a = a.T.conj() unitary, posdef, _, _ = qdwh.qdwh(a, is_hermitian=False, eps=eps) posdef = posdef.T.conj() unitary = unitary.T.conj() else: raise NotImplementedError("method='qdwh' only supports mxn matrices " "where m < n where side='right' and m >= n " f"side='left', got {a.shape} with side={side}") elif method == "svd": u_svd, s_svd, vh_svd = lax_linalg.svd(a, full_matrices=False) unitary = u_svd @ vh_svd if side == "right": # a = u * p posdef = (vh_svd.T.conj() * s_svd[None, :]) @ vh_svd else: # a = p * u posdef = (u_svd * s_svd[None, :]) @ (u_svd.T.conj()) else: raise ValueError(f"Unknown polar decomposition method {method}.") return unitary, posdef
7ba36fc1784a7a286aa13ab7c098f84ff64336f1
17
linalg.py
439
Change implementation of jax.scipy.linalg.polar() and jax._src.scipy.eigh to use the QDWH decomposition from jax._src.lax.qdwh. Remove jax._src.lax.polar. PiperOrigin-RevId: 448241206
26,871
0
334
258
103
120,507
181
jax
27
jax/_src/scipy/linalg.py
Python
81
{ "docstring": "Computes the polar decomposition.\n\n Given the :math:`m \\times n` matrix :math:`a`, returns the factors of the polar\n decomposition :math:`u` (also :math:`m \\times n`) and :math:`p` such that\n :math:`a = up` (if side is ``\"right\"``; :math:`p` is :math:`n \\times n`) or\n :math:`a = pu` (if side is ``\"left\"``; :math:`p` is :math:`m \\times m`),\n where :math:`p` is positive semidefinite. If :math:`a` is nonsingular,\n :math:`p` is positive definite and the\n decomposition is unique. :math:`u` has orthonormal columns unless\n :math:`n > m`, in which case it has orthonormal rows.\n\n Writing the SVD of :math:`a` as\n :math:`a = u_\\mathit{svd} \\cdot s_\\mathit{svd} \\cdot v^h_\\mathit{svd}`, we\n have :math:`u = u_\\mathit{svd} \\cdot v^h_\\mathit{svd}`. Thus the unitary\n factor :math:`u` can be constructed as the application of the sign function to\n the singular values of :math:`a`; or, if :math:`a` is Hermitian, the\n eigenvalues.\n\n Several methods exist to compute the polar decomposition. Currently two\n are supported:\n\n * ``method=\"svd\"``:\n\n Computes the SVD of :math:`a` and then forms\n :math:`u = u_\\mathit{svd} \\cdot v^h_\\mathit{svd}`.\n\n * ``method=\"qdwh\"``:\n\n Applies the `QDWH`_ (QR-based Dynamically Weighted Halley) algorithm.\n\n Args:\n a: The :math:`m \\times n` input matrix.\n side: Determines whether a right or left polar decomposition is computed.\n If ``side`` is ``\"right\"`` then :math:`a = up`. If ``side`` is ``\"left\"``\n then :math:`a = pu`. The default is ``\"right\"``.\n method: Determines the algorithm used, as described above.\n precision: :class:`~jax.lax.Precision` object specifying the matmul precision.\n eps: The final result will satisfy\n :math:`\\left|x_k - x_{k-1}\\right| < \\left|x_k\\right| (4\\epsilon)^{\\frac{1}{3}}`,\n where :math:`x_k` are the QDWH iterates. Ignored if ``method`` is not\n ``\"qdwh\"``.\n max_iterations: Iterations will terminate after this many steps even if the\n above is unsatisfied. Ignored if ``method`` is not ``\"qdwh\"``.\n\n Returns:\n A ``(unitary, posdef)`` tuple, where ``unitary`` is the unitary factor\n (:math:`m \\times n`), and ``posdef`` is the positive-semidefinite factor.\n ``posdef`` is either :math:`n \\times n` or :math:`m \\times m` depending on\n whether ``side`` is ``\"right\"`` or ``\"left\"``, respectively.\n\n .. _QDWH: https://epubs.siam.org/doi/abs/10.1137/090774999\n ", "language": "en", "n_whitespaces": 403, "n_words": 310, "vocab_size": 178 }
https://github.com/google/jax.git
1
aggregate
def aggregate(self, batch_outs, batch_start=None, batch_end=None): raise NotImplementedError("Must be implemented in subclasses.")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
8
training_utils_v1.py
35
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,886
0
25
21
11
271,877
11
keras
6
keras/engine/training_utils_v1.py
Python
2
{ "docstring": "Aggregates batch-level results into total results.\n\n Args:\n batch_outs: A list of batch-level outputs.\n batch_start: The start index of this batch. Always `None` if `use_steps`\n is `True`.\n batch_end: The end index of this batch. Always `None` if `use_steps` is\n `True`.\n ", "language": "en", "n_whitespaces": 102, "n_words": 39, "vocab_size": 26 }
https://github.com/keras-team/keras.git
3
get_default
def get_default(): # Since Engine is imported in django.template and since # DjangoTemplates is a wrapper around this Engine class, # local imports are required to avoid import loops. from django.template import engines from django.template.backends.django import DjangoTemplates for engine in engines.all(): if isinstance(engine, DjangoTemplates): return engine.engine raise ImproperlyConfigured("No DjangoTemplates backend is configured.")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
engine.py
79
Refs #33476 -- Reformatted code with Black.
51,461
0
134
47
40
206,274
52
django
10
django/template/engine.py
Python
7
{ "docstring": "\n Return the first DjangoTemplates backend that's configured, or raise\n ImproperlyConfigured if none are configured.\n\n This is required for preserving historical APIs that rely on a\n globally available, implicitly configured engine such as:\n\n >>> from django.template import Context, Template\n >>> template = Template(\"Hello {{ name }}!\")\n >>> context = Context({'name': \"world\"})\n >>> template.render(context)\n 'Hello world!'\n ", "language": "en", "n_whitespaces": 125, "n_words": 54, "vocab_size": 50 }
https://github.com/django/django.git
1
test_get_by_name_3
def test_get_by_name_3(): # no duplicate ret_op_class = get_by_name("SelectPercentile", tpot_obj.operators) # add a copy of TPOTSelectPercentile into operator list tpot_obj.operators.append(TPOTSelectPercentile) assert_raises(ValueError, get_by_name, "SelectPercentile", tpot_obj.operators)
388616b6247ca4ea8de4e2f340d6206aee523541
9
export_tests.py
62
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,409
0
41
35
21
181,621
23
tpot
9
tests/export_tests.py
Python
4
{ "docstring": "Assert that get_by_name raises ValueError with duplicate operators in operator dictionary.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/EpistasisLab/tpot.git
1
to_dask_dataframe
def to_dask_dataframe(self, columns=None, index=None, meta=None): from dask.dataframe import from_dask_array return from_dask_array(self, columns=columns, index=index, meta=meta)
cccb9d8d8e33a891396b1275c2448c352ef40c27
8
core.py
60
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
36,513
0
35
41
14
156,035
14
dask
8
dask/array/core.py
Python
3
{ "docstring": "Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n ", "language": "en", "n_whitespaces": 340, "n_words": 145, "vocab_size": 94 }
https://github.com/dask/dask.git
1
condition_score_with_grad
def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None): alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, p_mean_var, **model_kwargs) out = p_mean_var.copy() out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t) return out
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
12
gaussian_diffusion.py
187
add disco_diffusion_cnclip_vitb16 module
9,905
0
102
124
32
49,781
46
PaddleHub
21
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py
Python
8
{ "docstring": "\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n ", "language": "en", "n_whitespaces": 79, "n_words": 36, "vocab_size": 33 }
https://github.com/PaddlePaddle/PaddleHub.git
3
get_db_prep_value
def get_db_prep_value(self, value, connection=None, prepared=False): if value is not None and not isinstance(value, PickledObject): # We call force_str here explicitly, so that the encoded string # isn't rejected by the postgresql_psycopg2 backend. Alternatively, # we could have just registered PickledObject with the psycopg # marshaller (telling it to store it like it would a string), but # since both of these methods result in the same value being stored, # doing things this way is much easier. value = force_str(dbsafe_encode(value, self.compress, self.protocol, self.copy)) return value
0ad4aa9d9fe3beb3fd20aaedd961b3c2c800efb1
13
fields.py
85
ref(django-3.2): Vendor django picklefield (#35727) * ref(django-3.2): Vendor django picklefield Django-picklefield hasn't been updated in 2 years, but we need it. We also need to upgrade to django 3.2 which means we need to update picklefield. * Remove reference to django-picklefield * Fix the module name to use underscore * style(lint): Auto commit lint changes * Remove versioning code Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
18,774
0
183
53
70
91,654
85
sentry
12
src/django_picklefield/fields.py
Python
4
{ "docstring": "\n Pickle and b64encode the object, optionally compressing it.\n\n The pickling protocol is specified explicitly (by default 2),\n rather than as -1 or HIGHEST_PROTOCOL, because we don't want the\n protocol to change over time. If it did, ``exact`` and ``in``\n lookups would likely fail, since pickle would now be generating\n a different string.\n\n ", "language": "en", "n_whitespaces": 102, "n_words": 52, "vocab_size": 48 }
https://github.com/getsentry/sentry.git
1
test_08_sql_create_predictor
def test_08_sql_create_predictor(self): resp = self.sql_via_http('show predictors', RESPONSE_TYPE.TABLE) self.assertTrue(len(resp['data']) == 0) self.sql_via_http(, RESPONSE_TYPE.OK) status = self.await_predictor('p_test_1', timeout=120) self.assertTrue(status == 'complete') resp = self.sql_via_http(, RESPONSE_TYPE.TABLE) sqft_index = resp['column_names'].index('sqft') rental_price_index = resp['column_names'].index('rental_price') self.assertTrue(len(resp['data']) == 1) self.assertTrue(resp['data'][0][sqft_index] == 1000) self.assertTrue(resp['data'][0][rental_price_index] > 0) resp = self.sql_via_http(, RESPONSE_TYPE.TABLE) rental_price_index = resp['column_names'].index('rental_price') self.assertTrue(len(resp['data']) == 5) # FIXME rental price is str instead of float # for row in resp['data']: # self.assertTrue(row[rental_price_index] > 0)
ce99adc96da2e6f98f722f9e3733af00204b26f3
12
test_http.py
313
http test
25,244
0
188
180
41
114,643
66
mindsdb
15
tests/integration_tests/flows/test_http.py
Python
23
{ "docstring": "\n create predictor p_test_1\n from files (select sqft, location, rental_price from test_file limit 30)\n predict rental_price\n \n select * from mindsdb.p_test_1 where sqft = 1000\n \n select * from files.small_test_file ta join mindsdb.p_test_1\n ", "language": "en", "n_whitespaces": 109, "n_words": 30, "vocab_size": 23 }
https://github.com/mindsdb/mindsdb.git
2
paired_manhattan_distances
def paired_manhattan_distances(X, Y): X, Y = check_paired_arrays(X, Y) diff = X - Y if issparse(diff): diff.data = np.abs(diff.data) return np.squeeze(np.array(diff.sum(axis=1))) else: return np.abs(diff).sum(axis=-1)
912a71788eaca2a6b87aced510781cb88b38f14c
14
pairwise.py
125
DOC Update `paired_manhattan_distances` and make it pass numpydoc validation (#23900) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
76,281
0
59
77
19
260,485
23
scikit-learn
13
sklearn/metrics/pairwise.py
Python
8
{ "docstring": "Compute the paired L1 distances between X and Y.\n\n Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ...,\n (X[n_samples], Y[n_samples]).\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n An array-like where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples, n_features)\n An array-like where each row is a sample and each column is a feature.\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n L1 paired distances between the row vectors of `X`\n and the row vectors of `Y`.\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import paired_manhattan_distances\n >>> import numpy as np\n >>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])\n >>> Y = np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n >>> paired_manhattan_distances(X, Y)\n array([1., 2., 1.])\n ", "language": "en", "n_whitespaces": 221, "n_words": 136, "vocab_size": 75 }
https://github.com/scikit-learn/scikit-learn.git
3
get_path
def get_path(cls, file_path): if is_fsspec_url(file_path) or is_url(file_path): return file_path else: return os.path.abspath(file_path)
c5107e5be29089720528c6c0ec4f96bc2a6a1eb3
11
file_dispatcher.py
54
FEAT-#4766: Support fsspec URLs in `read_csv` and `read_csv_glob` (#4898) Signed-off-by: Karthik Velayutham <[email protected]>
35,943
0
55
32
11
154,350
12
modin
8
modin/core/io/file_dispatcher.py
Python
5
{ "docstring": "\n Process `file_path` in accordance to it's type.\n\n Parameters\n ----------\n file_path : str, os.PathLike[str] object or file-like object\n The file, or a path to the file. Paths to S3 buckets are also\n acceptable.\n\n Returns\n -------\n str\n Updated or verified `file_path` parameter.\n\n Notes\n -----\n if `file_path` is a URL, parameter will be returned as is, otherwise\n absolute path will be returned.\n ", "language": "en", "n_whitespaces": 177, "n_words": 59, "vocab_size": 48 }
https://github.com/modin-project/modin.git
5
broken_barh
def broken_barh(self, xranges, yrange, **kwargs): # process the unit information if len(xranges): xdata = cbook._safe_first_non_none(xranges) else: xdata = None if len(yrange): ydata = cbook._safe_first_non_none(yrange) else: ydata = None self._process_unit_info( [("x", xdata), ("y", ydata)], kwargs, convert=False) xranges_conv = [] for xr in xranges: if len(xr) != 2: raise ValueError('each range in xrange must be a sequence ' 'with two elements (i.e. an Nx2 array)') # convert the absolute values, not the x and dx... x_conv = np.asarray(self.convert_xunits(xr[0])) x1 = self._convert_dx(xr[1], xr[0], x_conv, self.convert_xunits) xranges_conv.append((x_conv, x1)) yrange_conv = self.convert_yunits(yrange) col = mcoll.BrokenBarHCollection(xranges_conv, yrange_conv, **kwargs) self.add_collection(col, autolim=True) self._request_autoscale_view() return col
a8c01a42c5bbe96fa6c536c72e6c26954c798908
13
_axes.py
293
BUG: modified safe_first_element and added tests Co-authored-by: Thomas A Caswell <[email protected]>
23,482
0
352
184
78
109,223
97
matplotlib
30
lib/matplotlib/axes/_axes.py
Python
24
{ "docstring": "\n Plot a horizontal sequence of rectangles.\n\n A rectangle is drawn for each element of *xranges*. All rectangles\n have the same vertical position and size defined by *yrange*.\n\n This is a convenience function for instantiating a\n `.BrokenBarHCollection`, adding it to the Axes and autoscaling the\n view.\n\n Parameters\n ----------\n xranges : sequence of tuples (*xmin*, *xwidth*)\n The x-positions and extends of the rectangles. For each tuple\n (*xmin*, *xwidth*) a rectangle is drawn from *xmin* to *xmin* +\n *xwidth*.\n yrange : (*ymin*, *yheight*)\n The y-position and extend for all the rectangles.\n\n Returns\n -------\n `~.collections.BrokenBarHCollection`\n\n Other Parameters\n ----------------\n data : indexable object, optional\n DATA_PARAMETER_PLACEHOLDER\n **kwargs : `.BrokenBarHCollection` properties\n\n Each *kwarg* can be either a single argument applying to all\n rectangles, e.g.::\n\n facecolors='black'\n\n or a sequence of arguments over which is cycled, e.g.::\n\n facecolors=('black', 'blue')\n\n would create interleaving black and blue rectangles.\n\n Supported keywords:\n\n %(BrokenBarHCollection:kwdoc)s\n ", "language": "en", "n_whitespaces": 418, "n_words": 140, "vocab_size": 98 }
https://github.com/matplotlib/matplotlib.git
1
test_custom_user_pk_not_named_id
def test_custom_user_pk_not_named_id(self): context = Context({"user": CustomIdUser()}) template = Template( "{% load log %}{% get_admin_log 10 as admin_log for_user user %}" ) # This template tag just logs. self.assertEqual(template.render(context), "")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
tests.py
70
Refs #33476 -- Reformatted code with Black.
51,831
0
82
37
27
207,004
29
django
9
tests/admin_changelist/tests.py
Python
6
{ "docstring": "\n {% get_admin_log %} works if the user model's primary key isn't named\n 'id'.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/django/django.git
5
dist_is_in_project
def dist_is_in_project(self, dist): # type: (pkg_resources.Distribution) -> bool from .environments import normalize_pipfile_path as _normalized prefixes = [ _normalized(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep) if _normalized(prefix).startswith(_normalized(self.prefix.as_posix())) ] location = self.locate_dist(dist) if not location: return False location = _normalized(make_posix(location)) return any(location.startswith(prefix) for prefix in prefixes)
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
15
environment.py
157
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
3,056
0
149
95
34
19,673
42
pipenv
18
pipenv/environment.py
Python
12
{ "docstring": "Determine whether the supplied distribution is in the environment.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/pypa/pipenv.git
9
test_rate_limited
def test_rate_limited(indexer): if isinstance(indexer, RawSimpleIndexer): pytest.skip("mock indexer does not support rate limiting") org_strings = {1: {"a", "b", "c"}, 2: {"e", "f"}, 3: {"g"}} with override_options( { "sentry-metrics.writes-limiter.limits.releasehealth.per-org": [ {"window_seconds": 10, "granularity_seconds": 10, "limit": 1} ], } ): results = indexer.bulk_record(use_case_id=use_case_id, org_strings=org_strings) assert len(results[1]) == 3 assert len(results[2]) == 2 assert len(results[3]) == 1 assert results[3]["g"] is not None rate_limited_strings = set() for org_id in 1, 2, 3: for k, v in results[org_id].items(): if v is None: rate_limited_strings.add((org_id, k)) assert len(rate_limited_strings) == 3 assert (3, "g") not in rate_limited_strings for org_id, string in rate_limited_strings: assert results.get_fetch_metadata()[org_id][string] == Metadata( id=None, fetch_type=FetchType.RATE_LIMITED, fetch_type_ext=FetchTypeExt(is_global=False), ) org_strings = {1: {"x", "y", "z"}} # attempt to index even more strings, and assert that we can't get any indexed with override_options( { "sentry-metrics.writes-limiter.limits.releasehealth.per-org": [ {"window_seconds": 10, "granularity_seconds": 10, "limit": 1} ], } ): results = indexer.bulk_record(use_case_id=use_case_id, org_strings=org_strings) assert results[1] == {"x": None, "y": None, "z": None} for letter in "xyz": assert results.get_fetch_metadata()[1][letter] == Metadata( id=None, fetch_type=FetchType.RATE_LIMITED, fetch_type_ext=FetchTypeExt(is_global=False), ) org_strings = {1: rate_limited_strings} # assert that if we reconfigure limits, the quota resets with override_options( { "sentry-metrics.writes-limiter.limits.releasehealth.global": [ {"window_seconds": 10, "granularity_seconds": 10, "limit": 2} ], } ): results = indexer.bulk_record(use_case_id=use_case_id, org_strings=org_strings) rate_limited_strings2 = set() for k, v in results[1].items(): if v is None: rate_limited_strings2.add(k) assert len(rate_limited_strings2) == 1 assert len(rate_limited_strings - rate_limited_strings2) == 2
7bbb85a0d95d23620228a02bb4401fc09658f5f1
15
test_all_indexers.py
701
ref(metrics): Split caching out of indexers, random test refactoring [sns-1606] (#37714)
19,098
0
626
431
116
94,501
216
sentry
31
tests/sentry/sentry_metrics/test_all_indexers.py
Python
60
{ "docstring": "\n Assert that rate limits per-org and globally are applied at all.\n\n Since we don't have control over ordering in sets/dicts, we have no\n control over which string gets rate-limited. That makes assertions\n quite awkward and imprecise.\n ", "language": "en", "n_whitespaces": 52, "n_words": 36, "vocab_size": 31 }
https://github.com/getsentry/sentry.git
2
key_aliases
def key_aliases(self) -> Iterable[str]: for alias in _get_key_aliases(self.key): yield _normalize_key(alias)
bd3a723d86f9c550b0324153975580b70509cb22
10
events.py
44
Move aliasing/normalisation logic into Key
44,978
0
35
26
10
185,332
10
textual
8
src/textual/events.py
Python
4
{ "docstring": "Get the aliases for the key, including the key itself", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
https://github.com/Textualize/textual.git
3
get_primary_key_column
def get_primary_key_column(self, cursor, table_name): for constraint in self.get_constraints(cursor, table_name).values(): if constraint["primary_key"]: return constraint["columns"][0] return None
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
introspection.py
68
Refs #33476 -- Reformatted code with Black.
50,933
0
62
42
14
204,855
15
django
7
django/db/backends/base/introspection.py
Python
5
{ "docstring": "\n Return the name of the primary key column for the given table.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 10 }
https://github.com/django/django.git
2
cosine_similarity
def cosine_similarity(X, Y=None, dense_output=True): # to avoid recursive import X, Y = check_pairwise_arrays(X, Y) X_normalized = normalize(X, copy=True) if X is Y: Y_normalized = X_normalized else: Y_normalized = normalize(Y, copy=True) K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output) return K
60c5d320e1478514e6bbe31fd0347692591cca9e
12
pairwise.py
110
DOC Ensure `cosine_similarity` passes numpydoc validation (#23947)
76,294
0
75
70
29
260,499
37
scikit-learn
12
sklearn/metrics/pairwise.py
Python
9
{ "docstring": "Compute cosine similarity between samples in X and Y.\n\n Cosine similarity, or the cosine kernel, computes similarity as the\n normalized dot product of X and Y:\n\n K(X, Y) = <X, Y> / (||X||*||Y||)\n\n On L2-normalized data, this function is equivalent to linear_kernel.\n\n Read more in the :ref:`User Guide <cosine_similarity>`.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)\n Input data.\n\n Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \\\n default=None\n Input data. If ``None``, the output will be the pairwise\n similarities between all samples in ``X``.\n\n dense_output : bool, default=True\n Whether to return dense output even when the input is sparse. If\n ``False``, the output is sparse if both input arrays are sparse.\n\n .. versionadded:: 0.17\n parameter ``dense_output`` for dense output.\n\n Returns\n -------\n kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)\n Returns the cosine similarity between samples in X and Y.\n ", "language": "en", "n_whitespaces": 260, "n_words": 144, "vocab_size": 95 }
https://github.com/scikit-learn/scikit-learn.git
2
_is_env_truthy
def _is_env_truthy(name): if name not in os.environ: return False return os.environ.get(name).lower() not in _false_values
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
11
environments.py
53
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
3,067
0
30
32
11
19,689
14
pipenv
7
pipenv/environments.py
Python
4
{ "docstring": "An environment variable is truthy if it exists and isn't one of (0, false, no, off)", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 16 }
https://github.com/pypa/pipenv.git
4
min_weight_matching
def min_weight_matching(G, maxcardinality=False, weight="weight"): if len(G.edges) == 0: return max_weight_matching(G, maxcardinality, weight) G_edges = G.edges(data=weight, default=1) min_weight = min(w for _, _, w in G_edges) InvG = nx.Graph() edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges) InvG.add_weighted_edges_from(edges, weight=weight) return max_weight_matching(InvG, maxcardinality, weight) @not_implemented_for("multigraph") @not_implemented_for("directed")
28b3014d68d2b4e40d3e02219770296a827bd55c
@not_implemented_for("multigraph") @not_implemented_for("directed")
12
matching.py
191
Update matching functions for error validation and speed (#4897) * First steps to update matching functions for #4644 Expand tests Change API to raise NetworkXError when matching involves nodes not in G Update is_*_matching to 100+ times faster. * improve matching_dict_to_set and docs for min_weight_matching * fix sphinx error
41,855
1
82
114
40
176,369
53
networkx
21
networkx/algorithms/matching.py
Python
9
{ "docstring": "Computing a minimum-weight maximal matching of G.\n\n Use reciprocal edge weights with the maximum-weight algorithm.\n\n A matching is a subset of edges in which no node occurs more than once.\n The weight of a matching is the sum of the weights of its edges.\n A maximal matching cannot add more edges and still be a matching.\n The cardinality of a matching is the number of matched edges.\n\n This method replaces the weights with their reciprocal and\n then runs :func:`max_weight_matching`.\n Read the documentation of max_weight_matching for more information.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n maxcardinality: bool, optional (default=False)\n If maxcardinality is True, compute the maximum-cardinality matching\n with minimum weight among all maximum-cardinality matchings.\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n If key not found, uses 1 as weight.\n\n Returns\n -------\n matching : set\n A minimal weight matching of the graph.\n ", "language": "en", "n_whitespaces": 233, "n_words": 146, "vocab_size": 92 }
https://github.com/networkx/networkx.git
6
is_datetime64_ns_dtype
def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or ( isinstance(tipo, DatetimeTZDtype) and tipo._unit == "ns" )
67e8c4c3761ab1da4b0a341a472c0fe2ea393e8b
14
common.py
106
ENH: DTI/DTA.astype support non-nano (#47579) * ENH: DTI/DTA.astype support non-nano * whatsnew * GH ref * pyright fixup
40,074
0
112
63
29
167,667
37
pandas
12
pandas/core/dtypes/common.py
Python
47
{ "docstring": "\n Check whether the provided array or dtype is of the datetime64[ns] dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n bool\n Whether or not the array or dtype is of the datetime64[ns] dtype.\n\n Examples\n --------\n >>> is_datetime64_ns_dtype(str)\n False\n >>> is_datetime64_ns_dtype(int)\n False\n >>> is_datetime64_ns_dtype(np.datetime64) # no unit\n False\n >>> is_datetime64_ns_dtype(DatetimeTZDtype(\"ns\", \"US/Eastern\"))\n True\n >>> is_datetime64_ns_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_ns_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=\"datetime64\")) # no unit\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=\"datetime64[ps]\")) # wrong unit\n False\n >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype=\"datetime64[ns]\"))\n True\n ", "language": "en", "n_whitespaces": 188, "n_words": 86, "vocab_size": 49 }
https://github.com/pandas-dev/pandas.git
1
raise_on_deprecated
def raise_on_deprecated(): with warnings.catch_warnings(): warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*') yield
6d2bbf80752549276a968fd4af78231c569d55c5
11
runtests.py
54
runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy
49,585
0
29
27
9
200,290
9
sympy
6
sympy/testing/runtests.py
Python
4
{ "docstring": "Context manager to make DeprecationWarning raise an error\n\n This is to catch SymPyDeprecationWarning from library code while running\n tests and doctests. It is important to use this context manager around\n each individual test/doctest in case some tests modify the warning\n filters.\n ", "language": "en", "n_whitespaces": 56, "n_words": 41, "vocab_size": 36 }
https://github.com/sympy/sympy.git
5
_update_mean_variance
def _update_mean_variance(n_past, mu, var, X, sample_weight=None): if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) if np.isclose(n_new, 0.0): return mu, var new_mu = np.average(X, axis=0, weights=sample_weight) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2 total_var = total_ssd / n_total return total_mu, total_var
2cce02414d4a7161f0d105450c196d94b1182220
13
naive_bayes.py
314
TST Add common tests for single class fitting induced by sample weights (#24140) Co-authored-by: johayon <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
77,021
0
402
204
81
261,829
162
scikit-learn
24
sklearn/naive_bayes.py
Python
22
{ "docstring": "Compute online update of Gaussian mean and variance.\n\n Given starting sample count, mean, and variance, a new set of\n points X, and optionally sample weights, return the updated mean and\n variance. (NB - each dimension (column) in X is treated as independent\n -- you get variance, not covariance).\n\n Can take scalar mean and variance, or vector mean and variance to\n simultaneously update a number of independent Gaussians.\n\n See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:\n\n http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf\n\n Parameters\n ----------\n n_past : int\n Number of samples represented in old mean and variance. If sample\n weights were given, this should contain the sum of sample\n weights represented in old mean and variance.\n\n mu : array-like of shape (number of Gaussians,)\n Means for Gaussians in original set.\n\n var : array-like of shape (number of Gaussians,)\n Variances for Gaussians in original set.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Weights applied to individual samples (1. for unweighted).\n\n Returns\n -------\n total_mu : array-like of shape (number of Gaussians,)\n Updated mean for each Gaussian over the combined set.\n\n total_var : array-like of shape (number of Gaussians,)\n Updated variance for each Gaussian over the combined set.\n ", "language": "en", "n_whitespaces": 412, "n_words": 191, "vocab_size": 105 }
https://github.com/scikit-learn/scikit-learn.git
6
serving_output
def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention else None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=output.hidden_states, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns, ) @add_start_docstrings( , XGLM_START_DOCSTRING, )
c72d7d91bf4899760725793421eff9da640c8527
@add_start_docstrings( """ The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XGLM_START_DOCSTRING, )
11
modeling_tf_xglm.py
180
Add TF implementation of `XGLMModel` (#16543) * Add TFXGLM models * Add todo: self.supports_xla_generation = False Co-authored-by: Daniel Stancl <[email protected]> Co-authored-by: Daniel Stancl <[email protected]> Co-authored-by: Joao Gante <[email protected]> Co-authored-by: Daniel <[email protected]> Co-authored-by: Patrick von Platen <[email protected]>
6,060
1
187
113
32
33,107
47
transformers
22
src/transformers/models/xglm/modeling_tf_xglm.py
Python
16
{ "docstring": "\n The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ", "language": "en", "n_whitespaces": 30, "n_words": 20, "vocab_size": 19 }
https://github.com/huggingface/transformers.git
1
get_period_wise_query
def get_period_wise_query(bet_dates, trans_date, query_details): query_details += % { "trans_date": trans_date, "sd": bet_dates[0], "ed": bet_dates[1], } return query_details @frappe.whitelist(allow_guest=True)
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist(allow_guest=True)
10
trends.py
72
style: format code with black
13,987
1
11
35
16
65,688
18
erpnext
7
erpnext/controllers/trends.py
Python
9
{ "docstring": "SUM(IF(t1.%(trans_date)s BETWEEN '%(sd)s' AND '%(ed)s', t2.stock_qty, NULL)),\n\t\t\t\t\tSUM(IF(t1.%(trans_date)s BETWEEN '%(sd)s' AND '%(ed)s', t2.base_net_amount, NULL)),\n\t\t\t\t", "language": "en", "n_whitespaces": 12, "n_words": 14, "vocab_size": 8 }
https://github.com/frappe/erpnext.git
2
DeveloperAPI
def DeveloperAPI(obj): if not obj.__doc__: obj.__doc__ = "" obj.__doc__ += "\n DeveloperAPI: This API may change across minor Ray releases." return obj
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
9
annotations.py
47
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,859
0
44
24
21
132,900
22
ray
3
python/ray/util/annotations.py
Python
5
{ "docstring": "Annotation for documenting developer APIs.\n\n Developer APIs are lower-level methods explicitly exposed to advanced Ray\n users and library developers. Their interfaces may change across minor\n Ray releases.\n\n Examples:\n >>> @DeveloperAPI\n >>> def func(x):\n >>> return x\n ", "language": "en", "n_whitespaces": 76, "n_words": 36, "vocab_size": 33 }
https://github.com/ray-project/ray.git
2
_set_gradient_checkpointing
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, Data2VecVisionEncoder): module.gradient_checkpointing = value DATA2VEC_VISION_START_DOCSTRING = r DATA2VEC_VISION_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.", DATA2VEC_VISION_START_DOCSTRING, ) # Copied from transformers.models.beit.modeling_beit.BeitModel with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,True->False
8d3f952adb8c98cec2ea1f59bb7acfbc08232381
@add_start_docstrings( "The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.", DATA2VEC_VISION_START_DOCSTRING, )
9
modeling_data2vec_vision.py
65
[Data2Vec] Add data2vec vision (#16760) * save intermediate * add vision * add vision * save * finish models * finish models * continue * finish * up * up * up * tests all pass * clean up * up * up * fix bugs in beit * correct docs * finish * finish docs * make style * up * more fixes * fix type hint * make style * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]> * Update tests/data2vec/test_modeling_data2vec_vision.py Co-authored-by: NielsRogge <[email protected]> * fix test Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
6,754
1
57
24
36
37,208
39
transformers
10
src/transformers/models/data2vec/modeling_data2vec_vision.py
Python
3
{ "docstring": "\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`BeitFeatureExtractor`]. See\n [`BeitFeatureExtractor.__call__`] for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 379, "n_words": 190, "vocab_size": 113 }
https://github.com/huggingface/transformers.git
1
truncate
def truncate(self, size=None): # type: (Optional[int]) -> int return self._file.truncate(size)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
8
lazy_wheel.py
33
upd; format
12,321
0
31
19
10
60,887
10
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py
Python
2
{ "docstring": "Resize the stream to the given size in bytes.\n\n If size is unspecified resize to the current position.\n The current stream position isn't changed.\n\n Return the new file size.\n ", "language": "en", "n_whitespaces": 57, "n_words": 29, "vocab_size": 22 }
https://github.com/jindongwang/transferlearning.git
1
test_crash_rate_alert_for_sessions_with_critical_warning_then_resolved
def test_crash_rate_alert_for_sessions_with_critical_warning_then_resolved(self): rule = self.crash_rate_alert_rule trigger = self.crash_rate_alert_critical_trigger trigger_warning = self.crash_rate_alert_warning_trigger action_critical = self.crash_rate_alert_critical_action action_warning = self.crash_rate_alert_warning_action # Send Critical Update update_value = (1 - trigger.alert_threshold / 100) + 0.05 self.send_crash_rate_alert_update( rule=rule, value=update_value, time_delta=timedelta(minutes=-10), subscription=rule.snuba_query.subscriptions.filter(project=self.project).get(), ) incident = self.assert_active_incident(rule) self.assert_actions_fired_for_incident( incident, [action_critical], [(75.0, IncidentStatus.CRITICAL)] ) self.assert_trigger_exists_with_status(incident, trigger, TriggerStatus.ACTIVE) # Send Warning Update update_value = (1 - trigger_warning.alert_threshold / 100) + 0.05 self.send_crash_rate_alert_update( rule=rule, value=update_value, time_delta=timedelta(minutes=-3), subscription=rule.snuba_query.subscriptions.filter(project=self.project).get(), ) incident = self.assert_active_incident(rule) self.assert_actions_resolved_for_incident( incident, [action_critical], [(85.0, IncidentStatus.WARNING)] ) self.assert_trigger_exists_with_status(incident, trigger_warning, TriggerStatus.ACTIVE) # Send update higher than warning threshold update_value = (1 - trigger_warning.alert_threshold / 100) - 0.05 self.send_crash_rate_alert_update( rule=rule, value=update_value, time_delta=timedelta(minutes=-1), subscription=rule.snuba_query.subscriptions.filter(project=self.project).get(), ) self.assert_actions_resolved_for_incident( incident, [action_warning], [(95.0, IncidentStatus.CLOSED)] ) self.assert_no_active_incident(rule)
146fba432a32568be7d0b884dae0c39a6c33a11f
14
test_subscription_processor.py
468
fix(metric_alerts): Make sure critical triggers resolve properly when no action is set on a warning trigger (#31883) ### Problem If we have an alert set up like: - Warning: 50. Action: None - Critical: 100. Action: Slack Then if we go from critical -> warning state the slack resolve action will fail to fire. ### Cause The reason this happens is related to a previous fix. For an alert like - Warning: 50. Action: Slack - Critical: 100. Action: Slack When going from critical -> warning the critical action would be marked as resolved. This would cause a slack notification with `Resolved` to be sent to the channel. This is misleading, because the alert is still active, just in the warning state. What we want here is to fire a warning notification instead. The initial fix for this was that when we resolved a critical trigger, we’d check and see whether there was an active warning trigger. If so, we’d send a warning trigger fire to our actions, rather than a critical trigger resolve. This works ok for many cases, but fails when the actions on the warning trigger are different to those on the critical trigger. ### Fix Substituting the warning trigger for the critical trigger causes us subtle bugs. So, instead of this, when triggering fires/resolves on our action handlers we will also pass along the incident state change that the trigger/resolve caused the incident to go into. So if a critical trigger resolves, we check what state it would have put the incident in. If there’s a warning trigger, then the state is warning. If no warning trigger, the state is closed. This state is then used to appropriately generate the messages that we send to users via our various actions. So now, If we have an alert set up like: - Warning: 50. Action: None - Critical: 100. Action: Slack If this goes from - critical -> warning OR critical -> resolved we will send `IncidentStatus.WARNING` to any actions related to the critical trigger. - warning -> resolved We do nothing since there are no actions on the warning trigger If we have an alert set up like: - Warning: 50. Action: Slack - Critical: 100. Action: Slack If this goes from: - critical -> warning: critical trigger, `IncidentStatus.Warning` - warning -> resolved: warning trigger, `IncidentStatus.Closed` - critical -> resolved: Since we de-dupe triggers to avoid spamming the user, we will select the warning trigger here, and send `IncidentStatus.closed` If we have an alert set up like: - Warning: 50. Action: Slack - Critical: 100. Action: Pagerduty If this goes from: - critical -> warning: critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Nothing sent to Slack - warning -> resolved: warning trigger, `IncidentStatus.Closed` sent to Slack. Nothing sent to Pagerduty - critical -> resolved: Critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Warning trigger, `IncidentStatus.Closed` sent to Slack. We don’t de-dupe here since the actions are different.
19,312
0
476
320
58
96,433
108
sentry
37
tests/sentry/incidents/test_subscription_processor.py
Python
41
{ "docstring": "\n Test that tests the behavior of going from Critical status to Warning status to Resolved\n for Crash Rate Alerts\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 17 }
https://github.com/getsentry/sentry.git
5
test_dangling_matrix
def test_dangling_matrix(self): G = self.G dangling = self.dangling_edges dangling_sum = sum(dangling.values()) M1 = nx.google_matrix(G, personalization=dangling) M2 = nx.google_matrix(G, personalization=dangling, dangling=dangling) for i in range(len(G)): for j in range(len(G)): if i == self.dangling_node_index and (j + 1) in dangling: assert M2[i, j] == pytest.approx( dangling[j + 1] / dangling_sum, abs=1e-4 ) else: assert M2[i, j] == pytest.approx(M1[i, j], abs=1e-4)
2a05ccdb07cff88e56661dee8a9271859354027f
17
test_pagerank.py
214
Remove redundant py2 numeric conversions (#5661) * Remove redundant float conversion * Remove redundant int conversion * Use integer division Co-authored-by: Miroslav Šedivý <[email protected]>
42,054
0
228
145
42
176,719
58
networkx
21
networkx/algorithms/link_analysis/tests/test_pagerank.py
Python
14
{ "docstring": "\n Tests that the google_matrix doesn't change except for the dangling\n nodes.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 10 }
https://github.com/networkx/networkx.git
6
search_query
def search_query(self, term, config_read_column, *join): term.strip().lower() self.session.connection().connection.connection.create_function("lower", 1, lcase) q = list() authorterms = re.split("[, ]+", term) for authorterm in authorterms: q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + authorterm + "%"))) query = self.generate_linked_query(config_read_column, Books) if len(join) == 6: query = query.outerjoin(join[0], join[1]).outerjoin(join[2]).outerjoin(join[3], join[4]).outerjoin(join[5]) if len(join) == 3: query = query.outerjoin(join[0], join[1]).outerjoin(join[2]) elif len(join) == 2: query = query.outerjoin(join[0], join[1]) elif len(join) == 1: query = query.outerjoin(join[0]) return query.filter(self.common_filters(True)).filter( or_(Books.tags.any(func.lower(Tags.name).ilike("%" + term + "%")), Books.series.any(func.lower(Series.name).ilike("%" + term + "%")), Books.authors.any(and_(*q)), Books.publishers.any(func.lower(Publishers.name).ilike("%" + term + "%")), func.lower(Books.title).ilike("%" + term + "%") )) # read search results from calibre-database and return it (function is used for feed and simple search
32a3c45ee0f7e13bd61075f32a4dcebc415585a1
17
db.py
567
Refactored load read status for web access and opds access Refactored and removed discover html page Bugfix show author Bugfix open dialog in author page Fix for #2341 (advanced search with linked read column and read column having a higher number than number of available custom columns)
40,821
0
331
353
64
172,886
103
calibre-web
40
cps/db.py
Python
39
{ "docstring": "if not config_read_column:\n query = (self.session.query(Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(Books)\n .outerjoin(ub.ReadBook, and_(Books.id == ub.ReadBook.book_id,\n int(current_user.id) == ub.ReadBook.user_id)))\n else:\n try:\n read_column = cc_classes[config_read_column]\n query = (self.session.query(Books, ub.ArchivedBook.is_archived, read_column.value)\n .select_from(Books)\n .outerjoin(read_column, read_column.book == Books.id))\n except (KeyError, AttributeError, IndexError):\n log.error(\"Custom Column No.{} is not existing in calibre database\".format(config_read_column))\n # Skip linking read column\n query = self.session.query(Books, ub.ArchivedBook.is_archived, None)\n query = query.outerjoin(ub.ArchivedBook, and_(Books.id == ub.ArchivedBook.book_id,\n int(current_user.id) == ub.ArchivedBook.user_id))", "language": "en", "n_whitespaces": 353, "n_words": 62, "vocab_size": 45 }
https://github.com/janeczku/calibre-web.git
2
get_items
def get_items(filters): conditions = get_conditions(filters) match_conditions = frappe.build_match_conditions("Sales Invoice") if match_conditions: match_conditions = " and {0} ".format(match_conditions) items = frappe.db.sql( % (conditions, match_conditions), filters, as_dict=1, ) ======= `tabSales Invoice Item`.parent, `tabSales Invoice Item`.item_code
d99b4e29b95db9c3ae9a2852a9757977e4912096
======= `tabSales Invoice Item`.`as_dict=1) >>>>>>> 363752510fix:summary of outward supplies Updated
11
hsn_wise_summary_of_outward_supplies.py
158
fix: HSN-wise-summary of outward supplies Updated Report Report changes done in order to meet the specification as per govt guideline - [GUIDELINE](https://taxguru.in/goods-and-service-tax/12-points-note-filing-gstr-1-01st-2021-onwards.html) (cherry picked from commit 363752510ead7d3b86693d3659b2157753f2762d) # Conflicts: # erpnext/regional/report/hsn_wise_summary_of_outward_supplies/hsn_wise_summary_of_outward_supplies.py
14,805
5
20
53
27
68,515
33
erpnext
27
erpnext/regional/report/hsn_wise_summary_of_outward_supplies/hsn_wise_summary_of_outward_supplies.py
Python
38
{ "docstring": "\n\t\tselect\n\t\t\t`tabSales Invoice Item`.gst_hsn_code,\n\t\t\t`tabSales Invoice Item`.stock_uom,\n\t\t\tsum(`tabSales Invoice Item`.stock_qty) as stock_qty,\n\t\t\tsum(`tabSales Invoice Item`.base_net_amount) as base_net_amount,\n\t\t\tsum(`tabSales Invoice Item`.base_price_list_rate) as base_price_list_rate,\n\t\t\t`tabSales Invoice Item`.parent,\n\t\t\t`tabSales Invoice Item`.item_code,\n\t\t\t`tabGST HSN Code`.description,\n\t\t\tjson_extract(`tabSales Taxes and Charges`.item_wise_tax_detail,\n\t\t\tconcat('$.\"' , `tabSales Invoice Item`.item_code, '\"[0]')) * count(distinct `tabSales Taxes and Charges`.name) as tax_rate\n\t\tfrom\n\t\t\t`tabSales Invoice`,\n\t\t\t`tabSales Invoice Item`,\n\t\t\t`tabGST HSN Code`,\n\t\t\t`tabSales Taxes and Charges`\n\t\twhere\n\t\t\t`tabSales Invoice`.name = `tabSales Invoice Item`.parent\n\t\t\tand `tabSales Taxes and Charges`.parent = `tabSales Invoice`.name\n\t\t\tand `tabSales Invoice`.docstatus = 1\n\t\t\tand `tabSales Invoice Item`.gst_hsn_code is not NULL\n\t\t\tand `tabSales Invoice Item`.gst_hsn_code = `tabGST HSN Code`.name %s %s\n\t\tgroup by\n<<<<<<< HEAD\n\t\t\t`tabSales Invoice Item`.parent, `tabSales Invoice Item`.item_code\n\n\t\t % (conditions, match_conditions), filters, as_dict=1)\n>>>>>>> 363752510e (fix: HSN-wise-summary of outward supplies Updated Report)\n\n\treturn items\n\n", "language": "en", "n_whitespaces": 98, "n_words": 125, "vocab_size": 68 }
https://github.com/frappe/erpnext.git
2
intersection_over_box
def intersection_over_box(chips, boxes): M = chips.shape[0] N = boxes.shape[0] if M * N == 0: return np.zeros([M, N], dtype='float32') box_area = bbox_area(boxes) # B inter_x2y2 = np.minimum(np.expand_dims(chips, 1)[:, :, 2:], boxes[:, 2:]) # CxBX2 inter_x1y1 = np.maximum(np.expand_dims(chips, 1)[:, :, :2], boxes[:, :2]) # CxBx2 inter_wh = inter_x2y2 - inter_x1y1 inter_wh = np.clip(inter_wh, a_min=0, a_max=None) inter_area = inter_wh[:, :, 0] * inter_wh[:, :, 1] # CxB iob = inter_area / np.expand_dims(box_area, 0) return iob
da0157cf64aef2b7bf54a46e9cd1fbdea8b38f3e
11
chip_box_utils.py
259
update numpy 1.24 (#7552)
53,153
0
174
168
47
211,859
73
PaddleDetection
22
ppdet/data/crop_utils/chip_box_utils.py
Python
15
{ "docstring": "\n intersection area over box area\n :param chips: C\n :param boxes: B\n :return: iob, CxB\n ", "language": "en", "n_whitespaces": 32, "n_words": 14, "vocab_size": 12 }
https://github.com/PaddlePaddle/PaddleDetection.git
1
samefile
def samefile(f1, f2): s1 = os.stat(f1) s2 = os.stat(f2) return samestat(s1, s2) # Are two open files really referencing the same file? # (Not necessarily the same file descriptor!)
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
genericpath.py
53
add python 3.10.4 for windows
54,828
0
39
31
25
217,515
29
XX-Net
8
python3.10.4/Lib/genericpath.py
Python
4
{ "docstring": "Test whether two pathnames reference the same actual file or directory\n\n This is determined by the device number and i-node number and\n raises an exception if an os.stat() call on either pathname fails.\n ", "language": "en", "n_whitespaces": 42, "n_words": 33, "vocab_size": 29 }
https://github.com/XX-net/XX-Net.git
2
line
def line(loc, strg): lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR >= 0: return strg[lastCR + 1:nextCR] else: return strg[lastCR + 1:]
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
pyparsing.py
90
upd; format
13,281
0
54
54
19
63,386
25
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
Python
7
{ "docstring": "Returns the line of text containing loc within a string, counting newlines as line separators.\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 14 }
https://github.com/jindongwang/transferlearning.git
2
_sort_rules
def _sort_rules(self) -> None: if not self._rules: return self._rules.sort(key=len, reverse=True)
5f5d69722ff6c28ce46ebc958eb9d44d36cbf75b
8
tree.py
49
feat(txcluster): Discard rules with all `*`s (#42076) Rules consisting of all `*`s aren't helpful and provide a worse user experience, so we want to get rid of them. All `*` rules are produced when the merge threshold is not high enough for the existing data, and they look like the following: ```json "rules": [ "/*/*/*/*/*/*/**", "/*/*/*/*/*/**", "/*/*/*/*/**", "/*/*/*/**", "/*/*/**", "/*/**" ] ``` This PR introduces a `RuleValidator` running when computing rules, so that invalid rules are never produced. There's also a small refactor to separate the concerns of each method. I've tried to build this in an easy-to-extend way since it's likely we will need to add new validation requirements.
18,542
0
42
29
10
89,462
10
sentry
7
src/sentry/ingest/transaction_clusterer/tree.py
Python
5
{ "docstring": "Sorts the rules by path length, descending (most specific rule first).", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/getsentry/sentry.git
7
_resize_worker_group
def _resize_worker_group(self, state_dict, max_retries=10): old_workers = self.worker_group.num_workers self.worker_group.reset() time.sleep(1) for i in range(max_retries): new_workers = self.worker_group.new_workers_size() if new_workers: self._last_resize = time.time() startup_success = self._start_workers(int(new_workers)) if not startup_success: logger.info( f"Worker startup failed. Retrying " f"{max_retries-i-1} more times." ) self.worker_group.reset() continue self.load_state_dict(state_dict, blocking=True) if self.use_local and new_workers == 1 and old_workers > 1: # Major hack. If we go from LocalDistributedRunner to a # standard TorchRunner we have to manually reset the # dummy actor handle global vars. # TODO(amog): Refactor LocalDistributedTorchRunner to # not use global variables for resource reservation. ray.util.sgd.torch.distributed_torch_runner._dummy_cuda_actor = None ray.util.sgd.torch.distributed_torch_runner._dummy_cpu_actor = None return else: delay = 2 ** i logger.warning("No new workers found. Retrying in %d sec." % delay) time.sleep(delay) raise RuntimeError("Exceeded max_retries for relaunching workers.")
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
18
torch_trainer.py
294
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,987
0
585
169
92
133,356
119
ray
33
python/ray/util/sgd/torch/torch_trainer.py
Python
26
{ "docstring": "Resizes the number of remote workers based on available resources.\n Total number of workers will never exceed `num_workers` amount.\n\n Args:\n state_dict (dict): The state dict to load to all workers.\n max_retries (int): How many times to attempt to resize workers\n before failing.\n ", "language": "en", "n_whitespaces": 100, "n_words": 42, "vocab_size": 35 }
https://github.com/ray-project/ray.git
60
readfortrancode
def readfortrancode(ffile, dowithline=show, istop=1): global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 global beginpattern, quiet, verbose, dolowercase, include_paths if not istop: saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ beginpattern, quiet, verbose, dolowercase if ffile == []: return localdolowercase = dolowercase # cont: set to True when the content of the last line read # indicates statement continuation cont = False finalline = '' ll = '' includeline = re.compile( r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I) cont1 = re.compile(r'(?P<line>.*)&\s*\Z') cont2 = re.compile(r'(\s*&|)(?P<line>.*)') mline_mark = re.compile(r".*?)(?P<this>.*?)(?P<after>
d4e11c7a2eb64861275facb076d47ccd135fa28c
22
crackfortran.py
3,370
ENH: Support character string arrays TST: added test for issue #18684 ENH: f2py opens files with correct encoding, fixes #635 TST: added test for issue #6308 TST: added test for issue #4519 TST: added test for issue #3425 ENH: Implement user-defined hooks support for post-processing f2py data structure. Implement character BC hook. ENH: Add support for detecting utf-16 and utf-32 encodings.
38,652
0
155
1,187
57
160,531
78
numpy
119
numpy/f2py/crackfortran.py
Python
201
{ "docstring": "\n Read fortran codes from files and\n 1) Get rid of comments, line continuations, and empty lines; lower cases.\n 2) Call dowithline(line) on every line.\n 3) Recursively call itself when statement \\\"include '<filename>'\\\" is met.\n \")\n if istop:\n dowithline('', -1)\n ll, l1 = '', ''\n spacedigits = [' '] + [str(_m) for _m in range(10)]\n filepositiontext = ''\n fin = fileinput.FileInput(ffile, openhook=openhook)\n while True:\n try:\n l = fin.readline()\n except UnicodeDecodeError as msg:\n raise Exception(\n f'readfortrancode: reading {fin.filename()}#{fin.lineno()}'\n f' failed with\\n{msg}.\\nIt is likely that installing chardet'\n ' package will help f2py determine the input file encoding'\n ' correctly.')\n if not l:\n break\n if fin.isfirstline():\n filepositiontext = ''\n currentfilename = fin.filename()\n gotnextfile = 1\n l1 = l\n strictf77 = 0\n sourcecodeform = 'fix'\n ext = os.path.splitext(currentfilename)[1]\n if is_f_file(currentfilename) and \\\n not (_has_f90_header(l) or _has_fix_header(l)):\n strictf77 = 1\n elif is_free_format(currentfilename) and not _has_fix_header(l):\n sourcecodeform = 'free'\n if strictf77:\n beginpattern = beginpattern77\n else:\n beginpattern = beginpattern90\n outmess('\\tReading file %s (format:%s%s)\\n'\n % (repr(currentfilename), sourcecodeform,\n strictf77 and ',strict' or ''))\n\n l = l.expandtabs().replace('\\xa0', ' ')\n # Get rid of newline characters\n while not l == '':\n if l[-1] not in \"\\n\\r\\f\":\n break\n l = l[:-1]\n if not strictf77:\n (l, rl) = split_by_unquoted(l, '!')\n l += ' '\n if rl[:5].lower() == '!f2py': # f2py directive\n l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')\n if l.strip() == '': # Skip empty line\n if sourcecodeform == 'free':\n # In free form, a statement continues in the next line\n # that is not a comment line [3.3.2.4^1], lines with\n # blanks are comment lines [3.3.2.3^1]. Hence, the\n # line continuation flag must retain its state.\n pass\n else:\n # In fixed form, statement continuation is determined\n # by a non-blank character at the 6-th position. Empty\n # line indicates a start of a new statement\n # [3.3.3.3^1]. Hence, the line continuation flag must\n # be reset.\n cont = False\n continue\n if sourcecodeform == 'fix':\n if l[0] in ['*', 'c', '!', 'C', '#']:\n if l[1:5].lower() == 'f2py': # f2py directive\n l = ' ' + l[5:]\n else: # Skip comment line\n cont = False\n continue\n elif strictf77:\n if len(l) > 72:\n l = l[:72]\n if not (l[0] in spacedigits):\n raise Exception('readfortrancode: Found non-(space,digit) char '\n 'in the first column.\\n\\tAre you sure that '\n 'this code is in fix form?\\n\\tline=%s' % repr(l))\n\n if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):\n # Continuation of a previous line\n ll = ll + l[6:]\n finalline = ''\n origfinalline = ''\n else:\n if not strictf77:\n # F90 continuation\n r = cont1.match(l)\n if r:\n l = r.group('line') # Continuation follows ..\n if cont:\n ll = ll + cont2.match(l).group('line')\n finalline = ''\n origfinalline = ''\n else:\n # clean up line beginning from possible digits.\n l = ' ' + l[5:]\n if localdolowercase:\n finalline = ll.lower()\n else:\n finalline = ll\n origfinalline = ll\n ll = l\n cont = (r is not None)\n else:\n # clean up line beginning from possible digits.\n l = ' ' + l[5:]\n if localdolowercase:\n finalline = ll.lower()\n else:\n finalline = ll\n origfinalline = ll\n ll = l\n\n elif sourcecodeform == 'free':\n if not cont and ext == '.pyf' and mline_mark.match(l):\n l = l + '\\n'\n while True:\n lc = fin.readline()\n if not lc:\n errmess(\n 'Unexpected end of file when reading multiline\\n')\n break\n l = l + lc\n if mline_mark.match(lc):\n break\n l = l.rstrip()\n r = cont1.match(l)\n if r:\n l = r.group('line') # Continuation follows ..\n if cont:\n ll = ll + cont2.match(l).group('line')\n finalline = ''\n origfinalline = ''\n else:\n if localdolowercase:\n finalline = ll.lower()\n else:\n finalline = ll\n origfinalline = ll\n ll = l\n cont = (r is not None)\n else:\n raise ValueError(\n \"Flag sourcecodeform must be either 'fix' or 'free': %s\" % repr(sourcecodeform))\n filepositiontext = 'Line #%d in %s:\"%s\"\\n\\t' % (\n fin.filelineno() - 1, currentfilename, l1)\n m = includeline.match(origfinalline)\n if m:\n fn = m.group('name')\n if os.path.isfile(fn):\n readfortrancode(fn, dowithline=dowithline, istop=0)\n else:\n include_dirs = [\n os.path.dirname(currentfilename)] + include_paths\n foundfile = 0\n for inc_dir in include_dirs:\n fn1 = os.path.join(inc_dir, fn)\n if os.path.isfile(fn1):\n foundfile = 1\n readfortrancode(fn1, dowithline=dowithline, istop=0)\n break\n if not foundfile:\n outmess('readfortrancode: could not find include file %s in %s. Ignoring.\\n' % (\n repr(fn), os.pathsep.join(include_dirs)))\n else:\n dowithline(finalline)\n l1 = ll\n if localdolowercase:\n finalline = ll.lower()\n else:\n finalline = ll\n origfinalline = ll\n filepositiontext = 'Line #%d in %s:\"%s\"\\n\\t' % (\n fin.filelineno() - 1, currentfilename, l1)\n m = includeline.match(origfinalline)\n if m:\n fn = m.group('name')\n if os.path.isfile(fn):\n readfortrancode(fn, dowithline=dowithline, istop=0)\n else:\n include_dirs = [os.path.dirname(currentfilename)] + include_paths\n foundfile = 0\n for inc_dir in include_dirs:\n fn1 = os.path.join(inc_dir, fn)\n if os.path.isfile(fn1):\n foundfile = 1\n readfortrancode(fn1, dowithline=dowithline, istop=0)\n break\n if not foundfile:\n outmess('readfortrancode: could not find include file %s in %s. Ignoring.\\n' % (\n repr(fn), os.pathsep.join(include_dirs)))\n else:\n dowithline(finalline)\n filepositiontext = ''\n fin.close()\n if istop:\n dowithline('', 1)\n else:\n gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\\\n beginpattern, quiet, verbose, dolowercase = saveglobals\n\n# Crack line\nbeforethisafter = r'\\s*(?P<before>%s(?=\\s*(\\b(%s)\\b)))' + \\\n r'\\s*(?P<this>(\\b(%s)\\b))' + \\\n r'\\s*(?P<after>%s)\\s*\\Z'\n##\nfortrantypes = r'character|logical|integer|real|complex|double\\s*(precision\\s*(complex|)|complex)|type(?=\\s*\\([\\w\\s,=(*)]*\\))|byte'\ntypespattern = re.compile(\n beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'\ntypespattern4implicit = re.compile(beforethisafter % (\n '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)\n#\nfunctionpattern = re.compile(beforethisafter % (\n r'([a-z]+[\\w\\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'\nsubroutinepattern = re.compile(beforethisafter % (\n r'[a-z\\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'\n# modulepattern=re.compile(beforethisafter%('[a-z\\s]*?','module','module','.*'),re.I),'begin'\n#\ngroupbegins77 = r'program|block\\s*data'\nbeginpattern77 = re.compile(\n beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'\ngroupbegins90 = groupbegins77 + \\\n r'|module(?!\\s*procedure)|python\\s*module|(abstract|)\\s*interface|' + \\\n r'type(?!\\s*\\()'\nbeginpattern90 = re.compile(\n beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'\ngroupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'\n r'endinterface|endsubroutine|endfunction')\nendpattern = re.compile(\n beforethisafter % ('', groupends, groupends, r'.*'), re.I), 'end'\nendifs = r'end\\s*(if|do|where|select|while|forall|associate|block|' + \\\n r'critical|enum|team)'\nendifpattern = re.compile(\n beforethisafter % (r'[\\w]*?', endifs, endifs, r'[\\w\\s]*'), re.I), 'endif'\n#\nmoduleprocedures = r'module\\s*procedure'\nmoduleprocedurepattern = re.compile(\n beforethisafter % ('', moduleprocedures, moduleprocedures, r'.*'), re.I), \\\n 'moduleprocedure'\nimplicitpattern = re.compile(\n beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'\ndimensionpattern = re.compile(beforethisafter % (\n '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'\nexternalpattern = re.compile(\n beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'\noptionalpattern = re.compile(\n beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'\nrequiredpattern = re.compile(\n beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'\npublicpattern = re.compile(\n beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'\nprivatepattern = re.compile(\n beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'\nintrinsicpattern = re.compile(\n beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic'\nintentpattern = re.compile(beforethisafter % (\n '', 'intent|depend|note|check', 'intent|depend|note|check', r'\\s*\\(.*?\\).*'), re.I), 'intent'\nparameterpattern = re.compile(\n beforethisafter % ('', 'parameter', 'parameter', r'\\s*\\(.*'), re.I), 'parameter'\ndatapattern = re.compile(\n beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'\ncallpattern = re.compile(\n beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'\nentrypattern = re.compile(\n beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'\ncallfunpattern = re.compile(\n beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'\ncommonpattern = re.compile(\n beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'\nusepattern = re.compile(\n beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'\ncontainspattern = re.compile(\n beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'\nformatpattern = re.compile(\n beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'\n# Non-fortran and f2py-specific statements\nf2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',\n 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'\nmultilinepattern = re.compile(\n r\"\\s*(?P<before>)\\s*\\Z\", re.S), 'multiline'\n##\n", "language": "en", "n_whitespaces": 4055, "n_words": 1168, "vocab_size": 439 }
https://github.com/numpy/numpy.git
2
_get_streams
def _get_streams(self): self.session.http.headers.update({ "User-Agent": useragents.CHROME, "Referer": self.url, }) re_m3u8 = re.compile( r, re.VERBOSE, ) hls_url = self.session.http.get(self.url, schema=validate.Schema( validate.transform(lambda text: next(reversed(list(re_m3u8.finditer(text))), None)), validate.any( None, validate.all( validate.get("string"), str, validate.any( "", validate.url(), ), ), validate.all( validate.get("obfuscated"), str, validate.parse_json(), validate.transform(lambda arr: unquote("".join(arr))), validate.url(), ), validate.all( validate.get("obfuscated_b64"), str, validate.parse_json(), validate.transform(lambda arr: unquote("".join(arr))), validate.transform(lambda b64: b64decode(b64).decode("utf-8")), validate.url(), ), ), )) if hls_url: return HLSStream.parse_variant_playlist(self.session, hls_url) __plugin__ = RTPPlay
f7cbfae2621d6dd2f9f1189cf98a59ba64ff9721
23
rtpplay.py
399
plugins: move and refactor validation schemas Refactor validation schemas of plugins where schemas are defined as class attributes and where no major changes are needed: - Move validation schemas from class attributes to schema definitions - Apply minor validation schema adjustments (eg. union_get) - Use `validate.none_or_all(...)` - Replace `validate.transform(pattern.search)` with `pattern` and fix schemas using `validate.regex(pattern)` where a regex has to match - Move pattern definitions from class attributes to schema definitions - Fix some patterns in regards to quotation matching - Fix minor style issues where it makes sense - use double quotes - use f-strings - add trailing commas - fix indentation
45,806
0
634
251
44
187,554
63
streamlink
38
src/streamlink/plugins/rtpplay.py
Python
48
{ "docstring": "\n hls\\s*:\\s*(?:\n (?P<q>[\"'])(?P<string>.*?)(?P=q)\n |\n decodeURIComponent\\s*\\((?P<obfuscated>\\[.*?])\\.join\\(\n |\n atob\\s*\\(\\s*decodeURIComponent\\s*\\((?P<obfuscated_b64>\\[.*?])\\.join\\(\n )\n ", "language": "en", "n_whitespaces": 144, "n_words": 7, "vocab_size": 6 }
https://github.com/streamlink/streamlink.git
1
fit
def fit(self, X, y, sample_weight=None): self._validate_params() _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver) X, y = self._validate_data( X, y, accept_sparse=_accept_sparse, dtype=[np.float64, np.float32], multi_output=True, y_numeric=True, ) return super().fit(X, y, sample_weight=sample_weight)
b7721135a0d3df30e5a1a1c34156421b55fe67ca
11
_ridge.py
126
MAINT validate parameters in Ridge (#23563) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,169
0
134
86
21
260,299
26
scikit-learn
20
sklearn/linear_model/_ridge.py
Python
12
{ "docstring": "Fit Ridge regression model.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "language": "en", "n_whitespaces": 178, "n_words": 60, "vocab_size": 50 }
https://github.com/scikit-learn/scikit-learn.git
2
_prepare_options
def _prepare_options(self) -> None: super()._prepare_options() self.options.restart_cmd[0] = 'apachectl' if not self.options.restart_cmd_alt: # pragma: no cover raise ValueError("OS option restart_cmd_alt must be set for Fedora.") self.options.restart_cmd_alt[0] = 'apachectl' self.options.conftest_cmd[0] = 'apachectl'
7d9e9a49005de7961e84d2a7c608db57dbab3046
10
override_fedora.py
104
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
45,574
0
84
58
26
186,667
30
certbot
8
certbot-apache/certbot_apache/_internal/override_fedora.py
Python
12
{ "docstring": "\n Override the options dictionary initialization to keep using apachectl\n instead of httpd and so take advantages of this new bash script in newer versions\n of Fedora to restart httpd.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 26 }
https://github.com/certbot/certbot.git
4
johnson_lindenstrauss_min_dim
def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1): eps = np.asarray(eps) n_samples = np.asarray(n_samples) if np.any(eps <= 0.0) or np.any(eps >= 1): raise ValueError("The JL bound is defined for eps in ]0, 1[, got %r" % eps) if np.any(n_samples) <= 0: raise ValueError( "The JL bound is defined for n_samples greater than zero, got %r" % n_samples ) denominator = (eps**2 / 2) - (eps**3 / 3) return (4 * np.log(n_samples) / denominator).astype(np.int64)
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
12
random_projection.py
177
MNT Update black to stable version (#22474)
75,490
0
133
112
50
258,948
69
scikit-learn
11
sklearn/random_projection.py
Python
12
{ "docstring": "Find a 'safe' number of components to randomly project to.\n\n The distortion introduced by a random projection `p` only changes the\n distance between two points by a factor (1 +- eps) in an euclidean space\n with good probability. The projection `p` is an eps-embedding as defined\n by:\n\n (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2\n\n Where u and v are any rows taken from a dataset of shape (n_samples,\n n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian\n N(0, 1) matrix of shape (n_components, n_features) (or a sparse\n Achlioptas matrix).\n\n The minimum number of components to guarantee the eps-embedding is\n given by:\n\n n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)\n\n Note that the number of dimensions is independent of the original\n number of features but instead depends on the size of the dataset:\n the larger the dataset, the higher is the minimal dimensionality of\n an eps-embedding.\n\n Read more in the :ref:`User Guide <johnson_lindenstrauss>`.\n\n Parameters\n ----------\n n_samples : int or array-like of int\n Number of samples that should be a integer greater than 0. If an array\n is given, it will compute a safe number of components array-wise.\n\n eps : float or ndarray of shape (n_components,), dtype=float, \\\n default=0.1\n Maximum distortion rate in the range (0,1 ) as defined by the\n Johnson-Lindenstrauss lemma. If an array is given, it will compute a\n safe number of components array-wise.\n\n Returns\n -------\n n_components : int or ndarray of int\n The minimal number of components to guarantee with good probability\n an eps-embedding with n_samples.\n\n Examples\n --------\n >>> from sklearn.random_projection import johnson_lindenstrauss_min_dim\n >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)\n 663\n\n >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])\n array([ 663, 11841, 1112658])\n\n >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)\n array([ 7894, 9868, 11841])\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma\n\n .. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,\n \"An elementary proof of the Johnson-Lindenstrauss Lemma.\"\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654\n\n ", "language": "en", "n_whitespaces": 522, "n_words": 318, "vocab_size": 187 }
https://github.com/scikit-learn/scikit-learn.git
7
act
def act(self): obs = self.observation reply = {'text': INVALID, 'id': self.getID(), 'episode_done': False} if obs is None or obs['text'] == DO_NOT_RETRIEVE: return Message(reply) # construct the search query labels = obs.get('labels', obs.get('eval_labels', None)) search_query = self.construct_search_query(labels) if ( self.opt['min_num_search_words'] > 0 and len(search_query[0].split()) <= self.opt['min_num_search_words'] ): return Message(reply) # retrieve self.search_engine.set_search_queries(search_query) retrieved, _ = self.search_engine.retrieve_and_score(self.dummy) all_docs = [d.get_tokenization_str() for d in retrieved[0]] # batched # Find the right doc best_f1, best_doc, best_doc_idx = self.get_best_doc(all_docs, labels) if best_doc: assert best_doc_idx is not None reply['knowledge'] = f'{TOKEN_KNOWLEDGE}{best_doc}{TOKEN_END_KNOWLEDGE}' reply['f1_overlap'] = best_f1 reply['text'] = labels[0] reply['retrieved_docs'] = all_docs reply['gold_doc'] = all_docs[best_doc_idx] reply['search_query'] = search_query[0] return Message(reply)
7e453008fde751aff0cfd752662e19fe2adc7410
13
generate_lm_data.py
379
SeeKeR (#4447) * seeker * todo * readme updates; add test * small config changes * various updates * readme fix * model card * add arxiv link * surround spacy with try catch * more protected * more protection of imports * lint
47,131
0
343
219
74
194,950
102
ParlAI
31
projects/seeker/scripts/generate_lm_data.py
Python
25
{ "docstring": "\n Search for overlap with the observation label.\n\n Return the best fitting document. A document is valid if the f1 is above the\n threshold AND the f1 is less than 1.0 AND the target label is not in the\n document.\n ", "language": "en", "n_whitespaces": 75, "n_words": 39, "vocab_size": 27 }
https://github.com/facebookresearch/ParlAI.git
1
test_thread_with_bundled_aggregations_for_latest
def test_thread_with_bundled_aggregations_for_latest(self) -> None: self._send_relation(RelationTypes.THREAD, "m.room.test") channel = self._send_relation(RelationTypes.THREAD, "m.room.test") thread_2 = channel.json_body["event_id"] self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2 )
75dff3dc980974960f55fa21fc8e672201f63045
9
test_relations.py
93
Include bundled aggregations for the latest event in a thread. (#12273) The `latest_event` field of the bundled aggregations for `m.thread` relations did not include bundled aggregations itself. This resulted in clients needing to immediately request the event from the server (and thus making it useless that the latest event itself was serialized instead of just including an event ID).
72,127
0
72
68
16
248,149
19
synapse
10
tests/rest/client/test_relations.py
Python
12
{ "docstring": "\n Bundled aggregations should get applied to the latest thread event.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
1
map
def map(self, arg, na_action=None) -> Series: new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index).__finalize__( self, method="map" )
521259299f7829da667ba39302ec77acedde9e5e
11
series.py
75
DOC: Improve doc summaries in series.rst (#45237)
39,401
0
55
48
16
163,193
16
pandas
11
pandas/core/series.py
Python
79
{ "docstring": "\n Map values of Series according to an input mapping or function.\n\n Used for substituting each value in a Series with another value,\n that may be derived from a function, a ``dict`` or\n a :class:`Series`.\n\n Parameters\n ----------\n arg : function, collections.abc.Mapping subclass or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}, default None\n If 'ignore', propagate NaN values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n Series\n Same index as caller.\n\n See Also\n --------\n Series.apply : For applying more complex functions on a Series.\n DataFrame.apply : Apply a function row-/column-wise.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n\n Notes\n -----\n When ``arg`` is a dictionary, values in Series that are not in the\n dictionary (as keys) are converted to ``NaN``. However, if the\n dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.\n provides a method for default values), then this default is used\n rather than ``NaN``.\n\n Examples\n --------\n >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])\n >>> s\n 0 cat\n 1 dog\n 2 NaN\n 3 rabbit\n dtype: object\n\n ``map`` accepts a ``dict`` or a ``Series``. Values that are not found\n in the ``dict`` are converted to ``NaN``, unless the dict has a default\n value (e.g. ``defaultdict``):\n\n >>> s.map({'cat': 'kitten', 'dog': 'puppy'})\n 0 kitten\n 1 puppy\n 2 NaN\n 3 NaN\n dtype: object\n\n It also accepts a function:\n\n >>> s.map('I am a {}'.format)\n 0 I am a cat\n 1 I am a dog\n 2 I am a nan\n 3 I am a rabbit\n dtype: object\n\n To avoid applying the function to missing values (and keep them as\n ``NaN``) ``na_action='ignore'`` can be used:\n\n >>> s.map('I am a {}'.format, na_action='ignore')\n 0 I am a cat\n 1 I am a dog\n 2 NaN\n 3 I am a rabbit\n dtype: object\n ", "language": "en", "n_whitespaces": 802, "n_words": 285, "vocab_size": 155 }
https://github.com/pandas-dev/pandas.git
8
_validate_encoder_architecture
def _validate_encoder_architecture(self): arch = self.config["enc_architecture"].lower() model = _MODEL_MAPPING.get(arch) if not model: raise FaceswapError(f"'{arch}' is not a valid choice for encoder architecture. Choose " f"one of {list(_MODEL_MAPPING.keys())}.") if get_backend() == "amd" and model.get("no_amd"): valid = [k for k, v in _MODEL_MAPPING.items() if not v.get('no_amd')] raise FaceswapError(f"'{arch}' is not compatible with the AMD backend. Choose one of " f"{valid}.") tf_ver = float(".".join(tf.__version__.split(".")[:2])) # pylint:disable=no-member tf_min = model.get("tf_min", 2.0) if get_backend() != "amd" and tf_ver < tf_min: raise FaceswapError(f"{arch}' is not compatible with your version of Tensorflow. The " f"minimum version required is {tf_min} whilst you have version " f"{tf_ver} installed.")
aa39234538a8f83e6aa2b60b8275a570e8876ac2
16
phaze_a.py
281
Update all Keras Imports to be conditional (#1214) * Remove custom keras importer * first round keras imports fix * launcher.py: Remove KerasFinder references * 2nd round keras imports update (lib and extract) * 3rd round keras imports update (train) * remove KerasFinder from tests * 4th round keras imports update (tests)
19,941
0
323
138
65
100,467
98
faceswap
23
plugins/train/model/phaze_a.py
Python
16
{ "docstring": " Validate that the requested architecture is a valid choice for the running system\n configuration.\n\n If the selection is not valid, an error is logged and system exits.\n ", "language": "en", "n_whitespaces": 49, "n_words": 27, "vocab_size": 22 }
https://github.com/deepfakes/faceswap.git
4
gallery_image_warning_filter
def gallery_image_warning_filter(record): msg = record.msg for gallery_dir in sphinx_gallery_conf['gallery_dirs']: if msg.startswith(f'image file not readable: {gallery_dir}'): return False if msg == 'Could not obtain image size. :scale: option is ignored.': return False return True logger = logging.getLogger('sphinx') logger.addFilter(gallery_image_warning_filter) mathmpl_fontsize = 11.0 mathmpl_srcset = ['2x'] # Monkey-patching gallery header to include search keywords gen_rst.EXAMPLE_HEADER = # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # This is the default encoding, but it doesn't hurt to be explicit source_encoding = "utf-8" # The toplevel toctree document (renamed to root_doc in Sphinx 4.0) root_doc = master_doc = 'users/index' # General substitutions. try: SHA = subprocess.check_output( ['git', 'describe', '--dirty']).decode('utf-8').strip() # Catch the case where git is not installed locally, and use the setuptools_scm # version number instead except (subprocess.CalledProcessError, FileNotFoundError): SHA = matplotlib.__version__ project = 'Matplotlib' copyright = ( '2002–2012 John Hunter, Darren Dale, Eric Firing, Michael Droettboom ' 'and the Matplotlib development team; ' f'2012–{sourceyear} The Matplotlib development team' ) # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = matplotlib.__version__ # The full version, including alpha/beta/rc tags. release = version # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [] # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' default_role = 'obj' # Plot directive configuration # ---------------------------- # For speedup, decide which plot_formats to build based on build targets: # html only -> png # latex only -> pdf # all other cases, including html + latex -> png, pdf # For simplicity, we assume that the build targets appear in the command line. # We're falling back on using all formats in case that assumption fails. formats = {'html': ('png', 100), 'latex': ('pdf', 100)} plot_formats = [formats[target] for target in ['html', 'latex'] if target in sys.argv] or list(formats.values()) # GitHub extension github_project_url = "https://github.com/matplotlib/matplotlib/" # Options for HTML output # -----------------------
1374e34d2f5cb9c424fc0ae4a9495f3e562e4b06
14
conf.py
418
filter warnings
23,672
0
507
38
279
109,615
446
matplotlib
45
doc/conf.py
Python
8
{ "docstring": "\n.. DO NOT EDIT.\n.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.\n.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:\n.. \"{0}\"\n.. LINE NUMBERS ARE GIVEN BELOW.\n\n.. only:: html\n\n .. meta::\n :keywords: codex\n\n .. note::\n :class: sphx-glr-download-link-note\n\n Click :ref:`here <sphx_glr_download_{1}>`\n to download the full example code{2}\n\n.. rst-class:: sphx-glr-example-title\n\n.. _sphx_glr_{1}:\n\n", "language": "en", "n_whitespaces": 80, "n_words": 54, "vocab_size": 45 }
https://github.com/matplotlib/matplotlib.git
2
_create_default_prometheus_configs
def _create_default_prometheus_configs(self): prometheus_config_output_path = os.path.join( self.metrics_root, "prometheus", "prometheus.yml" ) # Copy default prometheus configurations if os.path.exists(prometheus_config_output_path): os.remove(prometheus_config_output_path) os.makedirs(os.path.dirname(prometheus_config_output_path), exist_ok=True) shutil.copy(PROMETHEUS_CONFIG_INPUT_PATH, prometheus_config_output_path)
42da4445e7a3cb358a1a02ae433a004e9fa836b5
10
metrics_head.py
105
Export default configurations for grafana and prometheus (#28286)
28,527
0
92
63
21
127,791
21
ray
15
dashboard/modules/metrics/metrics_head.py
Python
8
{ "docstring": "\n Creates the prometheus configurations that are by default provided by Ray.\n This will completely replace the `/tmp/ray/metrics/prometheus` folder.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
https://github.com/ray-project/ray.git
3
set_connectionstyle
def set_connectionstyle(self, connectionstyle=None, **kwargs): if connectionstyle is None: return ConnectionStyle.pprint_styles() self._connector = ( ConnectionStyle(connectionstyle, **kwargs) if isinstance(connectionstyle, str) else connectionstyle) self.stale = True
0dc472b4c7cdcc1e88228988fff17762c90f1cb9
11
patches.py
80
Harmonize docstrings for boxstyle/connectionstyle/arrowstyle. - Rely on `__init_subclass__` to avoid the need for the out-of-order `interpd.update`/`dedent_interpd`. - Use consistent wording for all setters, and add ACCEPTS list in all cases. - Move get_boxstyle right next to set_boxstyle (consistently with the other setters/getters). - Make the type check in the setters consistent in all cases (check for str, not for forcing inheritance from the private _Base). - Support `set_connectionstyle()` as equivalent to `set_connectionstyle(None)`, consistently with the other two setters.
23,577
0
84
51
21
109,425
23
matplotlib
10
lib/matplotlib/patches.py
Python
7
{ "docstring": "\n Set the connection style, possibly with further attributes.\n\n Attributes from the previous connection style are not reused.\n\n Without argument (or with ``connectionstyle=None``), the available box\n styles are returned as a human-readable string.\n\n Parameters\n ----------\n connectionstyle : str or `matplotlib.patches.ConnectionStyle`\n The style of the connection: either a `.ConnectionStyle` instance,\n or a string, which is the style name and optionally comma separated\n attributes (e.g. \"Arc,armA=30,rad=10\"). Such a string is used to\n construct a `.ConnectionStyle` object, as documented in that class.\n\n The following connection styles are available:\n\n %(ConnectionStyle:table_and_accepts)s\n\n **kwargs\n Additional attributes for the connection style. See the table above\n for supported parameters.\n\n Examples\n --------\n ::\n\n set_connectionstyle(\"Arc,armA=30,rad=10\")\n set_connectionstyle(\"arc\", armA=30, rad=10)\n ", "language": "en", "n_whitespaces": 301, "n_words": 106, "vocab_size": 80 }
https://github.com/matplotlib/matplotlib.git
2
formatTime
def formatTime(self, record, datefmt=None): ct = dt.datetime.fromtimestamp(record.created) if datefmt: s = ct.strftime(datefmt) else: # Format datetime object ct to microseconds t = ct.strftime("%Y-%m-%d %H:%M:%S") s = f"{t},{record.msecs:03}" return s
49fc2cf3733f20ac6cf8a7c61e42ef7aa5cf4b03
12
config.py
100
FEAT-#4371: Add logging to Modin (#4372) Co-authored-by: Devin Petersohn <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Anatoly Myachev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Naren Krishna <[email protected]>
35,586
0
108
51
23
153,755
29
modin
13
modin/logging/config.py
Python
8
{ "docstring": "\n Return the creation time of the specified LogRecord as formatted text.\n\n This custom logging formatter inherits from the logging module and\n records timestamps at the microsecond level of granularity.\n\n Parameters\n ----------\n record : LogRecord\n The specified LogRecord object.\n datefmt : str, default: None\n Used with time.ststrftime() to format time record.\n\n Returns\n -------\n datetime\n Datetime object containing microsecond timestamp.\n ", "language": "en", "n_whitespaces": 169, "n_words": 58, "vocab_size": 47 }
https://github.com/modin-project/modin.git
1
host_local_array_to_global_array
def host_local_array_to_global_array(local_inputs, global_mesh, pspecs): def _convert(arr, pspec): if isinstance(arr, array.ArrayImpl) and isinstance(arr.sharding, PmapSharding): arr = np.array(arr) local_sharding = MeshPspecSharding(global_mesh.local_mesh, pspec) arrays = [ device_put(arr[index], d) for d, index in local_sharding.devices_indices_map(arr.shape).items() ] global_aval = global_mesh._local_to_global( pxla._get_array_mapping(pspec), core.ShapedArray(arr.shape, arrays[0].dtype)) return array.ArrayImpl(global_aval, MeshPspecSharding(global_mesh, pspec), arrays, committed=True) flattened_inps, in_tree = tree_flatten(local_inputs) in_pspecs = flatten_axis_resources( 'input pspecs', in_tree, pspecs, tupled_args=True) out = tree_map(_convert, tuple(flattened_inps), in_pspecs) return tree_unflatten(in_tree, out)
4da72cf3988b4918f65b1401e46c40b7c4504963
15
pjit.py
262
Add `host_local_array_to_global_array` and `global_array_to_host_local_array` for enabling transition to jax.Array. Also support `FROM_GDA` for `jax.Array` as a backwards compatible change so that users can continue to use that until they transition to jax.Array. Its currently required because of usage like `in_axis_resources = (FROM_GDA, FROM_GDA, P('data'), None)` and changing this on users side will require input from users so we as JAX can just support it as a temporary thing since GDA and Array act similarly in pjit. PiperOrigin-RevId: 479035326
27,124
0
151
54
55
122,219
63
jax
41
jax/experimental/pjit.py
Python
7
{ "docstring": "Converts a host local value to a globally sharded `jax.Array`.\n\n You can use this function to transition to `jax.Array`. Using `jax.Array` with\n `pjit` has the same semantics of using GDA with pjit i.e. all `jax.Array`\n inputs to pjit should be globally shaped.\n\n If you are currently passing host local values to pjit, you can use this\n function to convert your host local values to global Arrays and then pass that\n to pjit.\n\n Example usage:\n\n ```\n global_inputs = jax.experimental.pjit.host_local_array_to_global_array(\n host_local_inputs, global_mesh, in_pspecs)\n\n with mesh:\n global_out = pjitted_fun(global_inputs)\n\n host_local_output = jax.experimental.pjit.global_array_to_host_local_array(\n global_out, mesh, out_pspecs)\n ```\n\n Args:\n local_inputs: A Pytree of host local values.\n global_mesh: The global mesh.\n pspecs: A Pytree of PartitionSpecs.\n ", "language": "en", "n_whitespaces": 142, "n_words": 110, "vocab_size": 76 }
https://github.com/google/jax.git
2
get_charsets
def get_charsets(self, failobj=None): return [part.get_content_charset(failobj) for part in self.walk()]
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
message.py
44
add python 3.10.4 for windows
57,064
0
23
27
9
223,783
9
XX-Net
6
python3.10.4/Lib/email/message.py
Python
2
{ "docstring": "Return a list containing the charset(s) used in this message.\n\n The returned list of items describes the Content-Type headers'\n charset parameter for this message and all the subparts in its\n payload.\n\n Each item will either be a string (the value of the charset parameter\n in the Content-Type header of that part) or the value of the\n 'failobj' parameter (defaults to None), if the part does not have a\n main MIME type of \"text\", or the charset is not defined.\n\n The list will contain one string for each part of the message, plus\n one for the container message (i.e. self), so that a non-multipart\n message will still return a list of length 1.\n ", "language": "en", "n_whitespaces": 189, "n_words": 112, "vocab_size": 67 }
https://github.com/XX-net/XX-Net.git
4
get_all_lexers
def get_all_lexers(plugins=True): for item in LEXERS.values(): yield item[1:] if plugins: for lexer in find_plugin_lexers(): yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
12
__init__.py
79
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,117
0
53
49
16
22,022
19
pipenv
11
pipenv/patched/pip/_vendor/pygments/lexers/__init__.py
Python
6
{ "docstring": "Return a generator of tuples in the form ``(name, aliases,\n filenames, mimetypes)`` of all know lexers.\n\n If *plugins* is true (the default), plugin lexers supplied by entrypoints\n are also returned. Otherwise, only builtin ones are considered.\n ", "language": "en", "n_whitespaces": 49, "n_words": 36, "vocab_size": 34 }
https://github.com/pypa/pipenv.git
2
get_placement_groups
async def get_placement_groups(self) -> dict: reply = await self._client.get_all_placement_group_info( timeout=DEFAULT_RPC_TIMEOUT ) result = {} for message in reply.placement_group_table_data: data = self._message_to_dict( message=message, fields_to_decode=["placement_group_id"], ) data = filter_fields(data, PlacementGroupState) result[data["placement_group_id"]] = data return result
30ab5458a7e4ba2351d5e1beef8c8797b5946493
13
state_aggregator.py
112
[State Observability] Tasks and Objects API (#23912) This PR implements ray list tasks and ray list objects APIs. NOTE: You can ignore the merge conflict for now. It is because the first PR was reverted. There's a fix PR open now.
31,406
0
160
68
25
138,398
33
ray
16
dashboard/state_aggregator.py
Python
19
{ "docstring": "List all placement group information from the cluster.\n\n Returns:\n {pg_id -> pg_data_in_dict}\n pg_data_in_dict's schema is in PlacementGroupState\n ", "language": "en", "n_whitespaces": 53, "n_words": 17, "vocab_size": 17 }
https://github.com/ray-project/ray.git
1
get_prompt_template_names
def get_prompt_template_names(cls) -> List[str]: return list(cls.prompt_templates.keys())
9ebf164cfdfb320503b7161493420c1b0ec577a3
10
prompt_node.py
38
feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667) Co-authored-by: ZanSara <[email protected]>
75,224
0
20
22
6
258,354
6
haystack
7
haystack/nodes/prompt/prompt_node.py
Python
6
{ "docstring": "\n Returns the list of supported prompt template names.\n :return: List of supported prompt template names.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 10 }
https://github.com/deepset-ai/haystack.git
3
_handle_meta_tensor_data_access
def _handle_meta_tensor_data_access(self): try: yield except NotImplementedError as error: if "meta" not in str(error).lower(): raise error # TODO: See https://github.com/pytorch/pytorch/issues/68592 raise self._make_error_meta(NotImplementedError, "Comparing meta tensors is currently not supported.")
8d05174defd689cb1cb2346e0cde5b7fa572814a
13
_comparison.py
71
make meta tensor data access error message for expressive in assert_close (#68802) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/68802 Without this patch, the error message of comparing meta tensors looks like this after #68722 was merged: ```python >>> t = torch.empty((), device="meta") >>> assert_close(t, t) NotImplementedError: Could not run 'aten::abs.out' with arguments from the 'Meta' backend. [...] [...] The above exception was the direct cause of the following exception: [...] RuntimeError: Comparing TensorLikePair( id=(), actual=tensor(..., device='meta', size=()), expected=tensor(..., device='meta', size=()), rtol=1.3e-06, atol=1e-05, equal_nan=False, check_device=True, check_dtype=True, check_layout=True, check_stride=False, check_is_coalesced=True, ) resulted in the unexpected exception above. If you are a user and see this message during normal operation please file an issue at https://github.com/pytorch/pytorch/issues. If you are a developer and working on the comparison functions, please except the previous error and raise an expressive `ErrorMeta` instead. ``` Thus, we follow our own advice and turn it into an expected exception until #68592 is resolved: ```python >>> t = torch.empty((), device="meta") >>> assert_close(t, t) ValueError: Comparing meta tensors is currently not supported ``` Test Plan: Imported from OSS Reviewed By: ngimel Differential Revision: D33542999 Pulled By: mruberry fbshipit-source-id: 0fe1ddee15b5decdbd4c5dd84f03804ca7eac95b
21,545
0
108
38
26
102,547
28
pytorch
7
torch/testing/_comparison.py
Python
7
{ "docstring": "Turns a vanilla :class:`NotImplementedError` stemming from data access on a meta tensor into an expressive\n :class:`ErrorMeta`.\n\n Although it looks like meta tensors could be handled upfront, we need to do it lazily: there are use cases\n where a meta tensor wraps a data tensors and dispatches all operator calls to it. Thus, although the tensor is\n a meta tensor, it behaves like a regular one.\n ", "language": "en", "n_whitespaces": 100, "n_words": 65, "vocab_size": 49 }
https://github.com/pytorch/pytorch.git
1
exec_train_and_forecast
def exec_train_and_forecast(self, mock_handler, model_name, using): # create predictor create_sql = f ret = self.run_mindsdb_sql(sql=create_sql) assert ret.error_code is None, "train failed: " + model_name self.wait_training(model_name=f'{model_name}_forecaster') predict_sql = f ret = self.run_mindsdb_sql(sql=predict_sql) assert ret.error_code is None, "forecast failed: " + model_name
fca34e2db1ab32fb348abb3b6e9d5feef80b6d23
10
test_merlion_handler.py
125
Integration merlion issue2377 (#3435) * [issue2377] Merlion integrated, forecaster: default, sarima, prophet, mses, detector: default, isolation forest, windstats, prophet. * [issue2377] 1) Add ref to models; 2) Use url to fetch test data; 3) Set author. * [issue2377] 1) Replace print with mindsdb.log 2) Add lower and upper bound of merlin version in requirements. * [issue2377] 1) requirements.txt fixed. 2) test cases fixed. 3) Code refined. * [issue2377] Delete the redundant logic of expanding customized columns. * [issue2377] Some useless column processing code removed. * [issue2377] Truncate blank lines. * [issue2377] Fix * [issue2377] 1) Fix some bugs, 2) Modify testcase according to the modification of mindsdb (Add 'project' concept and use engine='xx' to specify handler) * [issue2377] 1) Using timeseries_settings.order_by/window/horizon in merlion 2) Fix importing problem caused by merging about mindsdb.utilities.log. * [issue2377] Fix the arg fetching problem causing by merging staging.
26,018
0
94
68
26
117,490
39
mindsdb
12
tests/unit/test_merlion_handler.py
Python
19
{ "docstring": "\n CREATE PREDICTOR mindsdb.{model_name}_forecaster\n FROM pg\n (select t, H1 from m4 where train = 1) \n PREDICT H1\n USING engine='merlion'{using}\n \n select p.t, p.H1 real, t.H1, t.H1__upper, t.H1__lower\n from mindsdb.{model_name}_forecaster t\n inner join pg.m4 p on t.t = p.t\n where p.train = 0\n ", "language": "en", "n_whitespaces": 244, "n_words": 40, "vocab_size": 34 }
https://github.com/mindsdb/mindsdb.git
10
load
def load(f, _dict=dict, decoder=None): if _ispath(f): with io.open(_getpath(f), encoding='utf-8') as ffile: return loads(ffile.read(), _dict, decoder) elif isinstance(f, list): from os import path as op from warnings import warn if not [path for path in f if op.exists(path)]: error_msg = "Load expects a list to contain filenames only." error_msg += linesep error_msg += ("The list needs to contain the path of at least one " "existing file.") raise FNFError(error_msg) if decoder is None: decoder = TomlDecoder(_dict) d = decoder.get_empty_table() for l in f: # noqa: E741 if op.exists(l): d.update(load(l, _dict, decoder)) else: warn("Non-existent filename in list with at least one valid " "filename") return d else: try: return loads(f.read(), _dict, decoder) except AttributeError: raise TypeError("You can only load a file descriptor, filename or " "list") _groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
17
decoder.py
323
upd; format
13,519
0
421
181
88
63,872
127
transferlearning
34
.venv/lib/python3.8/site-packages/pip/_vendor/toml/decoder.py
Python
29
{ "docstring": "Parses named file or files as toml and returns a dictionary\n\n Args:\n f: Path to the file to open, array of files to read into single dict\n or a file descriptor\n _dict: (optional) Specifies the class of the returned toml dictionary\n decoder: The decoder to use\n\n Returns:\n Parsed toml file represented as a dictionary\n\n Raises:\n TypeError -- When f is invalid type\n TomlDecodeError: Error while decoding toml\n IOError / FileNotFoundError -- When an array with no valid (existing)\n (Python 2 / Python 3) file paths is passed\n ", "language": "en", "n_whitespaces": 174, "n_words": 87, "vocab_size": 62 }
https://github.com/jindongwang/transferlearning.git
4
map_stream_block_value
def map_stream_block_value(stream_block_value, block_def, block_path, **kwargs): mapped_value = [] for child_block in stream_block_value: if not should_alter_block(child_block["type"], block_path): mapped_value.append(child_block) else: try: child_block_def = block_def.child_blocks[child_block["type"]] except KeyError: raise InvalidBlockDefError( "No current block def named {}".format(child_block["type"]) ) mapped_child_value = map_block_value( child_block["value"], block_def=child_block_def, block_path=block_path[1:], **kwargs, ) mapped_value.append({**child_block, "value": mapped_child_value}) return mapped_value
ec6229c23600ebae8ec0d5db6846b095a9468151
19
utils.py
182
Add StreamField migration helpers from https://github.com/sandilsranasinghe/wagtail-streamfield-migration-toolkit/
17,006
0
262
111
41
80,110
46
wagtail
16
wagtail/blocks/migrations/utils.py
Python
20
{ "docstring": "\n Maps each child block in a StreamBlock value.\n\n Args:\n stream_block_value:\n The value of the StreamBlock, a list of child blocks\n block_def:\n The definition of the StreamBlock\n block_path:\n A '.' separated list of names of the blocks from the current block (not included) to\n the nested block of which the value will be passed to the operation.\n\n Returns\n mapped_value:\n The value of the StreamBlock after mapping all the children.\n ", "language": "en", "n_whitespaces": 164, "n_words": 68, "vocab_size": 41 }
https://github.com/wagtail/wagtail.git
2
test_invite_by_user_ratelimit
def test_invite_by_user_ratelimit(self) -> None: other_server = "otherserver" other_user = "@otheruser:" + other_server # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test")
9e06e220649cc0139749c388a894bee0d65d5f4e
9
test_federation.py
72
Add type hints to more tests files. (#12240)
71,877
0
66
101
19
247,727
24
synapse
8
tests/handlers/test_federation.py
Python
27
{ "docstring": "Tests that invites from federation to a particular user are\n actually rate-limited.\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git
5
get_authenticated_entity
def get_authenticated_entity(self) -> Tuple[Optional[str], Optional[str]]: # Convert the requester into a string that we can log if isinstance(self._requester, str): return self._requester, None elif isinstance(self._requester, Requester): requester = self._requester.user.to_string() authenticated_entity = self._requester.authenticated_entity # If this is a request where the target user doesn't match the user who # authenticated (e.g. and admin is puppetting a user) then we return both. if requester != authenticated_entity: return requester, authenticated_entity return requester, None elif self._requester is not None: # This shouldn't happen, but we log it so we don't lose information # and can see that we're doing something wrong. return repr(self._requester), None # type: ignore[unreachable] return None, None
3dd175b628bab5638165f20de9eade36a4e88147
13
site.py
156
`synapse.api.auth.Auth` cleanup: make permission-related methods use `Requester` instead of the `UserID` (#13024) Part of #13019 This changes all the permission-related methods to rely on the Requester instead of the UserID. This is a first step towards enabling scoped access tokens at some point, since I expect the Requester to have scope-related informations in it. It also changes methods which figure out the user/device/appservice out of the access token to return a Requester instead of something else. This avoids having store-related objects in the methods signatures.
72,895
0
273
97
70
249,401
105
synapse
13
synapse/http/site.py
Python
24
{ "docstring": "\n Get the \"authenticated\" entity of the request, which might be the user\n performing the action, or a user being puppeted by a server admin.\n\n Returns:\n A tuple:\n The first item is a string representing the user making the request.\n\n The second item is a string or None representing the user who\n authenticated when making this request. See\n Requester.authenticated_entity.\n ", "language": "en", "n_whitespaces": 158, "n_words": 58, "vocab_size": 38 }
https://github.com/matrix-org/synapse.git
1
_forward
def _forward(self, *args, **kwargs): raise NotImplementedError( f'`_forward` is not implemented in {self.__class__.__name__}')
9c1b26726eebe4a196d213249dc22e8017761fab
11
d2_wrapper.py
41
[Feature] Support training detection models in detectron2 (#8672) * [Feature]Support using mmengine to train detectron2 * update * del unnecessary comments * minor fix * minor fix * Support mask rcnn and retinanet * minor fix * minor fix * minor fix * minor fix * minor fix * minor fix * chinese doc * update * minor fix * minor fix * update docs
70,906
0
37
18
12
245,836
12
mmdetection
7
mmdet/models/detectors/d2_wrapper.py
Python
3
{ "docstring": "Network forward process.\n\n Usually includes backbone, neck and head forward without any post-\n processing.\n ", "language": "en", "n_whitespaces": 35, "n_words": 14, "vocab_size": 13 }
https://github.com/open-mmlab/mmdetection.git
3
us_indices
def us_indices() -> pd.DataFrame: url = ( "https://www.wsj.com/market-data/stocks?id=%7B%22application%22%3A%22WSJ%22%2C%22instruments%22%3A%5B%7B" "%22symbol%22%3A%22INDEX%2FUS%2F%2FDJIA%22%2C%22name%22%3A%22DJIA%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F" "%2FCOMP%22%2C%22name%22%3A%22Nasdaq%20Composite%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FSPX%22%2C%22name" "%22%3A%22S%26P%20500%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FDWCF%22%2C%22name%22%3A%22DJ%20Total%20Stock" "%20Market%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FRUT%22%2C%22name%22%3A%22Russell%202000%22%7D%2C%7B" "%22symbol%22%3A%22INDEX%2FUS%2F%2FNYA%22%2C%22name%22%3A%22NYSE%20Composite%22%7D%2C%7B%22symbol%22%3A%22INDEX" "%2FUS%2F%2FB400%22%2C%22name%22%3A%22Barron%27s%20400%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FVIX%22%2C%22" "name%22%3A%22CBOE%20Volatility%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FDJIA%20FUTURES%22%2C%22name%22%3A%" "22DJIA%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FS%26P%20500%20FUTURES%22%2C%22name%22%3A%22S%26P" "%20500%20Futures%22%7D%5D%7D&type=mdc_quotes" ) try: response = requests.get( url, headers={"User-Agent": get_user_agent()}, ) except requests.exceptions.RequestException: console.print("[red]Could not retrieve data from wsj.[/red]\n") return pd.DataFrame() data = response.json() name, last_price, net_change, percent_change = [], [], [], [] for entry in data["data"]["instruments"]: name.append(entry["formattedName"]) last_price.append(entry["lastPrice"]) net_change.append(entry["priceChange"]) percent_change.append(entry["percentChange"]) indices = pd.DataFrame( {" ": name, "Price": last_price, "Chg": net_change, "%Chg": percent_change} ) return indices @log_start_end(log=logger)
4304a5c664700cf083f1432fa7523f051492754c
@log_start_end(log=logger)
15
wsj_model.py
304
Enhances error handling in economy menu (#2819) * Lots of bug fixes * Fixed issue
85,525
1
259
162
58
286,052
72
OpenBBTerminal
25
openbb_terminal/economy/wsj_model.py
Python
39
{ "docstring": "Get the top US indices\n\n Returns\n -------\n indices: pd.DataFrame\n Dataframe containing name, price, net change and percent change\n ", "language": "en", "n_whitespaces": 37, "n_words": 18, "vocab_size": 17 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
async_activate
async def async_activate(self, **kwargs): await async_publish( self.hass, self._config[CONF_COMMAND_TOPIC], self._config[CONF_PAYLOAD_ON], self._config[CONF_QOS], self._config[CONF_RETAIN], self._config[CONF_ENCODING], )
635d7085cf42dfaf8e60d1e262f096827d56e6e1
10
scene.py
76
Move MQTT config schemas and client to separate modules (#71995) * Move MQTT config schemas and client to separate modules * Update integrations depending on MQTT
100,629
0
100
52
13
301,790
13
core
11
homeassistant/components/mqtt/scene.py
Python
9
{ "docstring": "Activate the scene.\n\n This method is a coroutine.\n ", "language": "en", "n_whitespaces": 22, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
4
state_updates
def state_updates(self): warnings.warn( "`Model.state_updates` will be removed in a future version. " "This property should not be used in TensorFlow 2.0, " "as `updates` are applied automatically.", stacklevel=2, ) state_updates = [] for layer in self.layers: if getattr(layer, "stateful", False): if hasattr(layer, "updates"): state_updates += layer.updates return state_updates
84afc5193d38057e2e2badf9c889ea87d80d8fbf
13
training.py
96
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,809
0
179
55
41
271,578
48
keras
10
keras/engine/training.py
Python
13
{ "docstring": "Deprecated, do NOT use!\n\n Returns the `updates` from all layers that are stateful.\n\n This is useful for separating training updates and\n state updates, e.g. when we need to update a layer's internal state\n during prediction.\n\n Returns:\n A list of update ops.\n ", "language": "en", "n_whitespaces": 94, "n_words": 41, "vocab_size": 39 }
https://github.com/keras-team/keras.git
1
test_get_image_disabled
async def test_get_image_disabled(hass, hass_ws_client): patch_key, entity_id, config_entry = _setup(CONFIG_ANDROIDTV_DEFAULT) config_entry.add_to_hass(hass) hass.config_entries.async_update_entry( config_entry, options={CONF_SCREENCAP: False} ) with patchers.patch_connect(True)[patch_key], patchers.patch_shell( SHELL_RESPONSE_OFF )[patch_key]: assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() with patchers.patch_shell("11")[patch_key]: await async_update_entity(hass, entity_id) client = await hass_ws_client(hass) with patch( "androidtv.basetv.basetv_async.BaseTVAsync.adb_screencap", return_value=b"image" ) as screen_cap: await client.send_json( {"id": 5, "type": "media_player_thumbnail", "entity_id": entity_id} ) await client.receive_json() assert not screen_cap.called
ea456893f94c7dc88b0cc28f92dadf240fbb1fe7
13
test_media_player.py
253
Review AndroidTV tests for media player entity (#71168)
98,635
0
169
147
44
299,731
55
core
28
tests/components/androidtv/test_media_player.py
Python
22
{ "docstring": "Test taking a screen capture with screencap option disabled.\n\n This is based on `test_get_image` in tests/components/media_player/test_init.py.\n ", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 16 }
https://github.com/home-assistant/core.git
1
test___virtual___fail
def test___virtual___fail(sentry_handler): with patch("salt.log.handlers.sentry_mod.HAS_RAVEN", False), patch( "salt.log.handlers.sentry_mod.__opts__", sentry_handler ): ret = salt.log.handlers.sentry_mod.__virtual__() assert ret[0] is False assert ret[1] == "Cannot find 'raven' python library" with patch("salt.log.handlers.sentry_mod.HAS_RAVEN", True), patch( "salt.log.handlers.sentry_mod.__opts__", {} ): ret = salt.log.handlers.sentry_mod.__virtual__() assert ret[0] is False assert ret[1] == "'sentry_handler' config is empty or not defined"
eb8bd12761fdad5abc682bf29c131231736f4616
13
test_sentry_mod.py
157
Test that sentry logger does not load grains/modules Reinforce sentry log handler not wasting time reloading grains/execution modules with unit tests.
54,121
0
103
91
31
215,721
48
salt
9
tests/pytests/unit/log/handlers/test_sentry_mod.py
Python
13
{ "docstring": "\n Test `__virtual__()` returns a reason for not loading.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
https://github.com/saltstack/salt.git
1
install_multiple_bleak_catcher
def install_multiple_bleak_catcher() -> None: bleak.BleakScanner = HaBleakScannerWrapper # type: ignore[misc, assignment] bleak.BleakClient = HaBleakClientWrapper # type: ignore[misc, assignment]
1b144c0e4dd683e3b47668a89da5eb6da4ae5e08
7
usage.py
33
Update to bleak 0.18.0 (#79008)
86,936
0
29
17
13
287,748
18
core
6
homeassistant/components/bluetooth/usage.py
Python
4
{ "docstring": "Wrap the bleak classes to return the shared instance if multiple instances are detected.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/home-assistant/core.git
3
configure
def configure(self): # type: () -> None # a target uses a single python version, but a controller may include additional versions for targets running on the controller python_versions = [self.python.version] + [target.python.version for target in self.targets if isinstance(target, ControllerConfig)] python_versions = sorted_versions(list(set(python_versions))) core_ci = self.wait_for_instance() pwd = self.wait_until_ready() display.info(f'Remote working directory: {pwd}', verbosity=1) bootstrapper = BootstrapRemote( controller=self.controller, platform=self.config.platform, platform_version=self.config.version, python_versions=python_versions, ssh_key=core_ci.ssh_key, ) setup_sh = bootstrapper.get_script() shell = setup_sh.splitlines()[0][2:] ssh = self.get_origin_controller_connection() ssh.run([shell], data=setup_sh, capture=False)
5c2d830dea986a8c7bd8c286b86bdce326cd7eb1
12
host_profiles.py
245
ansible-test - Fix subprocess management. (#77641) * Run code-smell sanity tests in UTF-8 Mode. * Update subprocess use in sanity test programs. * Use raw_command instead of run_command with always=True set. * Add more capture=True usage. * Don't expose stdin to subprocesses. * Capture more output. Warn on retry. * Add more captures. * Capture coverage cli output. * Capture windows and network host checks. * Be explicit about interactive usage. * Use a shell for non-captured, non-interactive subprocesses. * Add integration test to assert no TTY. * Add unit test to assert no TTY. * Require blocking stdin/stdout/stderr. * Use subprocess.run in ansible-core sanity tests. * Remove unused arg. * Be explicit with subprocess.run check=False. * Add changelog. * Use a Python subprocess instead of a shell. * Use InternalError instead of Exception. * Require capture argument. * Check for invalid raw_command arguments. * Removed pointless communicate=True usage. * Relocate stdout w/o capture check. * Use threads instead of a subprocess for IO.
78,777
0
222
154
61
267,172
75
ansible
35
test/lib/ansible_test/_internal/host_profiles.py
Python
17
{ "docstring": "Perform in-band configuration. Executed before delegation for the controller and after delegation for targets.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
https://github.com/ansible/ansible.git
1
test_align_labels
def test_align_labels(): fig, (ax3, ax1, ax2) = plt.subplots(3, 1, layout="constrained", figsize=(6.4, 8), gridspec_kw={"height_ratios": (1, 1, 0.7)}) ax1.set_ylim(0, 1) ax1.set_ylabel("Label") ax2.set_ylim(-1.5, 1.5) ax2.set_ylabel("Label") ax3.set_ylim(0, 1) ax3.set_ylabel("Label") fig.align_ylabels(axs=(ax3, ax1, ax2)) fig.draw_without_rendering() after_align = [ax1.yaxis.label.get_window_extent(), ax2.yaxis.label.get_window_extent(), ax3.yaxis.label.get_window_extent()] # ensure labels are approximately aligned np.testing.assert_allclose([after_align[0].x0, after_align[2].x0], after_align[1].x0, rtol=0, atol=1e-05) # ensure labels do not go off the edge assert after_align[0].x0 >= 1
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
12
test_constrainedlayout.py
294
ENH: implement and use base layout_engine for more flexible layout.
22,617
0
317
200
51
107,162
58
matplotlib
25
lib/matplotlib/tests/test_constrainedlayout.py
Python
19
{ "docstring": "\n Tests for a bug in which constrained layout and align_ylabels on\n three unevenly sized subplots, one of whose y tick labels include\n negative numbers, drives the non-negative subplots' y labels off\n the edge of the plot\n ", "language": "en", "n_whitespaces": 52, "n_words": 36, "vocab_size": 31 }
https://github.com/matplotlib/matplotlib.git
1
test_broken_document_link
def test_broken_document_link(self): converter = ContentstateConverter(features=["document-link"]) result = json.loads( converter.from_database_format( ) ) self.assertContentStateEqual( result, { "entityMap": { "0": { "mutability": "MUTABLE", "type": "DOCUMENT", "data": {"id": 9999}, } }, "blocks": [ { "inlineStyleRanges": [], "text": "a document link", "depth": 0, "type": "unstyled", "key": "00000", "entityRanges": [{"offset": 2, "length": 8, "key": 0}], }, ], }, )
d10f15e55806c6944827d801cd9c2d53f5da4186
16
test_contentstate.py
203
Reformat with black
15,764
0
501
110
43
71,901
53
wagtail
10
wagtail/admin/tests/test_contentstate.py
Python
31
{ "docstring": "\n <p>a <a linktype=\"document\" id=\"9999\">document</a> link</p>\n ", "language": "en", "n_whitespaces": 28, "n_words": 5, "vocab_size": 5 }
https://github.com/wagtail/wagtail.git
2
finalize_variable_values
def finalize_variable_values(self, var_list): if self.use_ema: # If the optimizer uses EMA, then when finalizing, we replace the model # variable value with its moving average stored inside optimizer. self._overwrite_model_variables_with_average_value(var_list)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
9
optimizer.py
35
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,369
0
76
19
27
275,284
29
keras
5
keras/optimizers/optimizer_experimental/optimizer.py
Python
3
{ "docstring": "Set the final value of model's trainable variables.\n\n Sometimes there are some extra steps before ending the variable updates,\n such as overriding the model variables with its average value.\n\n Args:\n var_list: list of model variables.\n ", "language": "en", "n_whitespaces": 72, "n_words": 35, "vocab_size": 30 }
https://github.com/keras-team/keras.git
2
_get_free_vram
def _get_free_vram(self) -> List[float]: vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024) for handle in self._handles] self._log("debug", f"GPU VRAM free: {vram}") return vram
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
11
nvidia.py
79
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
20,039
0
65
46
21
100,575
22
faceswap
11
lib/gpu_stats/nvidia.py
Python
14
{ "docstring": " Obtain the amount of VRAM that is available, in Megabytes, for each connected Nvidia\n GPU.\n\n Returns\n -------\n list\n List of `float`s containing the amount of VRAM available, in Megabytes, for each\n connected GPU as corresponding to the values in :attr:`_handles\n ", "language": "en", "n_whitespaces": 100, "n_words": 40, "vocab_size": 27 }
https://github.com/deepfakes/faceswap.git
1
test_from_is_negative
def test_from_is_negative(self) -> None: channel = self.make_request( "GET", self.url + "?from=-5", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
2281427175e4c93a30c39607fb4ac23c2a1f399f
10
test_event_reports.py
97
Use literals in place of `HTTPStatus` constants in tests (#13488) * Use literals in place of `HTTPStatus` constants in tests * newsfile * code style * code style
72,812
0
86
60
18
249,309
18
synapse
13
tests/rest/admin/test_event_reports.py
Python
11
{ "docstring": "\n Testing that a negative from parameter returns a 400\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
https://github.com/matrix-org/synapse.git
3
execute
def execute(): if not frappe.db.a_row_exists("Leave Ledger Entry"): return leave_application_list = get_denied_leave_application_list() if leave_application_list: delete_denied_leaves_from_leave_ledger_entry(leave_application_list)
494bd9ef78313436f0424b918f200dab8fc7c20b
9
remove_denied_leaves_from_leave_ledger.py
55
style: format code with black
14,264
0
8
29
13
66,639
14
erpnext
7
erpnext/patches/v12_0/remove_denied_leaves_from_leave_ledger.py
Python
6
{ "docstring": "Delete leave ledger entry created\n\tvia leave applications with status != Approved", "language": "en", "n_whitespaces": 10, "n_words": 12, "vocab_size": 11 }
https://github.com/frappe/erpnext.git
1
clear
def clear(self) -> None: assert self._current_run_info is not None assert self._current_run_info.end is not None self._current_run_info = None
f073f170402bd02e6d6c7597ce5d842a016e97be
8
run_history.py
47
Refactor tracking of the recorder run history (#70456) Co-authored-by: Erik Montnemery <[email protected]>
98,086
0
45
29
11
299,149
17
core
4
homeassistant/components/recorder/run_history.py
Python
8
{ "docstring": "Clear the current run after ending it.\n\n Must run in the recorder thread.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 11 }
https://github.com/home-assistant/core.git
1
test_github_tag_workflow_completed
def test_github_tag_workflow_completed(self) -> None: expected_topic = "circleci-webhook-test" expected_message = .strip() self.check_webhook("github_tag_workflow_completed", expected_topic, expected_message)
5346de91647f29ce9e24b520c8fb85a7fa0f72d5
9
tests.py
49
circleci: Rewrite integration to support the new webhook format. CircleCI has updated its webhook format[1] for CircleCI Cloud, Server version 3.x and 4.x. This commit rewrites the CircleCI integration to parse the new webhook structure. The tests have also been rewritten for the new format. With this commit, we support webhooks from projects that use GitHub, BitBucket and GitLab as VCS providers. The CircleCI integration doc has been updated to mention the same. The doc has also been updated with the latest instructions for configuring a webhook on the CircleCI interface, and the new output screenshots. References: [1]: https://circleci.com/docs/webhooks
17,961
0
33
27
12
85,163
13
zulip
6
zerver/webhooks/circleci/tests.py
Python
8
{ "docstring": "\nWorkflow [`sample`](https://app.circleci.com/pipelines/github/prah23/circleci-webhook-test/20/workflows/045c6271-78e2-4802-8a62-f4fa6d25d0c9) within Pipeline #20 has succeeded.\n\nTriggered on the latest tag on [0e6e66c14e6](https://github.com/prah23/circleci-webhook-test/commit/0e6e66c14e61fbcd95db716b0f30d67dbcce7814).\n", "language": "en", "n_whitespaces": 12, "n_words": 14, "vocab_size": 13 }
https://github.com/zulip/zulip.git
3
rebuild_cablepaths
def rebuild_cablepaths(instance, raw=False, **kwargs): if not raw: peer_termination = instance.get_peer_termination() # if peer_termination: # rebuild_paths(peer_termination)
5667a9c456e0514a2d00d6475e7013748b4a7c1e
10
signals.py
43
Refactor CablePath.from_origin()
77,847
0
46
31
13
264,829
15
netbox
6
netbox/circuits/signals.py
Python
5
{ "docstring": "\n Rebuild any CablePaths which traverse the peer CircuitTermination.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
https://github.com/netbox-community/netbox.git
3
tick_params
def tick_params(self, axis='both', **kwargs): _api.check_in_list(['x', 'y', 'z', 'both'], axis=axis) if axis in ['x', 'y', 'both']: super().tick_params(axis, **kwargs) if axis in ['z', 'both']: zkw = dict(kwargs) zkw.pop('top', None) zkw.pop('bottom', None) zkw.pop('labeltop', None) zkw.pop('labelbottom', None) self.zaxis.set_tick_params(**zkw) # data limits, ticks, tick labels, and formatting
3b3fb2afbe3264ea3fa39e7e6e547410b402bfa0
11
axes3d.py
194
Tweak Axes3D docstrings that refer to 2D plotting methods. ... and minor edits to 2D Axes.margins docs as well.
22,810
0
150
109
34
107,554
42
matplotlib
12
lib/mpl_toolkits/mplot3d/axes3d.py
Python
11
{ "docstring": "\n Convenience method for changing the appearance of ticks and\n tick labels.\n\n See `.Axes.tick_params` for full documentation. Because this function\n applies to 3D Axes, *axis* can also be set to 'z', and setting *axis*\n to 'both' autoscales all three axes.\n\n Also, because of how Axes3D objects are drawn very differently\n from regular 2D axes, some of these settings may have\n ambiguous meaning. For simplicity, the 'z' axis will\n accept settings as if it was like the 'y' axis.\n\n .. note::\n Axes3D currently ignores some of these settings.\n ", "language": "en", "n_whitespaces": 176, "n_words": 86, "vocab_size": 72 }
https://github.com/matplotlib/matplotlib.git
1
get_fullname
def get_fullname(self, filesafe=False): return _get_name_and_version(self['Name'], self['Version'], filesafe)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
9
metadata.py
42
upd; format
12,863
0
21
25
7
62,078
7
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/metadata.py
Python
2
{ "docstring": "Return the distribution name with version.\n\n If filesafe is true, return a filename-escaped form.", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
https://github.com/jindongwang/transferlearning.git
10
eliminate_word
def eliminate_word(self, gen, by=None, _all=False, inverse=True): if by is None: by = self.group.identity if self.is_independent(gen) or gen == by: return self if gen == self: return by if gen**-1 == by: _all = False word = self l = len(gen) try: i = word.subword_index(gen) k = 1 except ValueError: if not inverse: return word try: i = word.subword_index(gen**-1) k = -1 except ValueError: return word word = word.subword(0, i)*by**k*word.subword(i+l, len(word)).eliminate_word(gen, by) if _all: return word.eliminate_word(gen, by, _all=True, inverse=inverse) else: return word
498015021131af4dbb07eb110e5badaba8250c7b
15
free_groups.py
276
Updated import locations
47,567
0
346
176
47
196,067
81
sympy
17
sympy/combinatorics/free_groups.py
Python
27
{ "docstring": "\n For an associative word `self`, a subword `gen`, and an associative\n word `by` (identity by default), return the associative word obtained by\n replacing each occurrence of `gen` in `self` by `by`. If `_all = True`,\n the occurrences of `gen` that may appear after the first substitution will\n also be replaced and so on until no occurrences are found. This might not\n always terminate (e.g. `(x).eliminate_word(x, x**2, _all=True)`).\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, x, y = free_group(\"x y\")\n >>> w = x**5*y*x**2*y**-4*x\n >>> w.eliminate_word( x, x**2 )\n x**10*y*x**4*y**-4*x**2\n >>> w.eliminate_word( x, y**-1 )\n y**-11\n >>> w.eliminate_word(x**5)\n y*x**2*y**-4*x\n >>> w.eliminate_word(x*y, y)\n x**4*y*x**2*y**-4*x\n\n See Also\n ========\n substituted_word\n\n ", "language": "en", "n_whitespaces": 270, "n_words": 108, "vocab_size": 82 }
https://github.com/sympy/sympy.git
1
__copy__
def __copy__(self): return PandasOnDaskDataframePartition( self._data, length=self._length_cache, width=self._width_cache, ip=self._ip_cache, call_queue=self.call_queue, )
4ec7f6347903f9133c65ebc5b6e0e15553b98577
9
partition.py
57
REFACTOR-#4530: Standardize access to physical data in partitions (#4563) Signed-off-by: Alexey Prutskov <[email protected]>
35,665
0
86
38
10
153,859
10
modin
11
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py
Python
8
{ "docstring": "\n Create a copy of this partition.\n\n Returns\n -------\n PandasOnDaskDataframePartition\n A copy of this partition.\n ", "language": "en", "n_whitespaces": 61, "n_words": 14, "vocab_size": 10 }
https://github.com/modin-project/modin.git
1
Kumaraswamy
def Kumaraswamy(name, a, b): r return rv(name, KumaraswamyDistribution, (a, b)) #------------------------------------------------------------------------------- # Laplace distribution ---------------------------------------------------------
9ad8ab9fe58051cf11626ba6654852fcfec60147
8
crv_types.py
36
Documentation cleanup 5
48,108
0
18
24
15
196,690
15
sympy
6
sympy/stats/crv_types.py
Python
53
{ "docstring": "\n Create a Continuous Random Variable with a Kumaraswamy distribution.\n\n Explanation\n ===========\n\n The density of the Kumaraswamy distribution is given by\n\n .. math::\n f(x) := a b x^{a-1} (1-x^a)^{b-1}\n\n with :math:`x \\in [0,1]`.\n\n Parameters\n ==========\n\n a : Real number, `a > 0`, a shape\n b : Real number, `b > 0`, a shape\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import Kumaraswamy, density, cdf\n >>> from sympy import Symbol, pprint\n\n >>> a = Symbol(\"a\", positive=True)\n >>> b = Symbol(\"b\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = Kumaraswamy(\"x\", a, b)\n\n >>> D = density(X)(z)\n >>> pprint(D, use_unicode=False)\n b - 1\n a - 1 / a\\\n a*b*z *\\1 - z /\n\n >>> cdf(X)(z)\n Piecewise((0, z < 0), (1 - (1 - z**a)**b, z <= 1), (1, True))\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Kumaraswamy_distribution\n\n ", "language": "en", "n_whitespaces": 267, "n_words": 131, "vocab_size": 86 }
https://github.com/sympy/sympy.git
1
num_healthy_workers
def num_healthy_workers(self) -> int: return ( int(bool(self._local_worker)) + self.__worker_manager.num_healthy_actors() )
e707ce4fb3717e3c05118c57f503dfbd03552ca9
12
worker_set.py
48
[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938) Signed-off-by: Jun Gong <[email protected]>
30,792
0
42
28
10
136,001
10
ray
7
rllib/evaluation/worker_set.py
Python
5
{ "docstring": "Returns the number of healthy workers, including local and remote workers.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/ray-project/ray.git