complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
__reduce__
def __reduce__(self): (serialized, _) = self._serialization_helper() # There is no outer object ref when the actor handle is # deserialized out-of-band using pickle. return ActorHandle._deserialization_helper, (serialized, None)
f084546d41f0533c1e9e96a7249532d0eb4ff47d
8
actor.py
46
Fix out-of-band deserialization of actor handle (#27700) When we deserialize actor handle via pickle, we will register it with an outer object ref equaling to itself which is wrong. For out-of-band deserialization, there should be no outer object ref. Signed-off-by: Jiajun Yao <[email protected]>
28,292
0
62
27
24
126,889
27
ray
7
python/ray/actor.py
Python
3
{ "docstring": "This code path is used by pickling but not by Ray forking.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/ray-project/ray.git
2
resample
def resample(self, memo): if self.label in memo: self._sampled = memo[self.label] else: self._sampled = random.choice(self.op_names) return {self.label: self._sampled}
14d2966b9e91ae16dcc39de8f41017a75cec8ff9
12
sampling.py
79
Valuechoice oneshot lightning (#4602)
24,618
0
67
49
15
112,187
17
nni
8
nni/retiarii/oneshot/pytorch/supermodule/sampling.py
Python
6
{ "docstring": "Random choose one path if label is not found in memo.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/microsoft/nni.git
3
__getitem__
def __getitem__(self, key): use_func = key.startswith(self.prefix) if use_func: key = key[len(self.prefix) :] value = super().__getitem__(key) if use_func: return self.func(value) return value
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
datastructures.py
91
Refs #33476 -- Reformatted code with Black.
51,581
0
85
55
15
206,594
21
django
10
django/utils/datastructures.py
Python
8
{ "docstring": "\n Retrieve the real value after stripping the prefix string (if\n present). If the prefix is present, pass the value through self.func\n before returning, otherwise return the raw value.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 22 }
https://github.com/django/django.git
4
_read_until
def _read_until(s, start, term): if start == len(s): return "", start + 1 for i in range(start, len(s)): if s[i] in term: return s[start:i], i return s[start : i + 1], i + 1
b3587b52b25077f68116b9852b041d33e7fc6601
12
cookies.py
104
make it black!
73,717
0
71
67
23
251,401
34
mitmproxy
7
mitmproxy/net/http/cookies.py
Python
7
{ "docstring": "\n Read until one of the characters in term is reached.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/mitmproxy/mitmproxy.git
3
save_preview
def save_preview(self, *args) -> None: if not self._save_var.get() and not isinstance(args[0], tk.Event): return root_path = os.path.join(os.path.realpath(os.path.dirname(sys.argv[0]))) now = datetime.now().strftime("%Y-%m-%d_%H.%M.%S") filename = os.path.join(root_path, f"preview_{now}.png") cv2.imwrite(filename, self.source) print("") logger.info("Saved preview to: '%s'", filename) self._save_var.set(False)
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
14
preview_tk.py
200
Training - Use custom preview pop-out
20,967
0
106
119
29
101,557
32
faceswap
27
lib/training/preview_tk.py
Python
18
{ "docstring": " Save out the full size preview to the faceswap folder on a save button press\n\n Parameters\n ----------\n args: tuple\n Tuple containing either the key press event (Ctrl+s shortcut) or the tk variable\n arguments (save button press)\n ", "language": "en", "n_whitespaces": 87, "n_words": 36, "vocab_size": 31 }
https://github.com/deepfakes/faceswap.git
1
subgridspec
def subgridspec(self, nrows, ncols, **kwargs): return GridSpecFromSubplotSpec(nrows, ncols, self, **kwargs)
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
8
gridspec.py
37
DOC: improve grammar and consistency
24,026
0
24
25
9
110,284
10
matplotlib
6
lib/matplotlib/gridspec.py
Python
2
{ "docstring": "\n Create a GridSpec within this subplot.\n\n The created `.GridSpecFromSubplotSpec` will have this `SubplotSpec` as\n a parent.\n\n Parameters\n ----------\n nrows : int\n Number of rows in grid.\n\n ncols : int\n Number of columns in grid.\n\n Returns\n -------\n `.GridSpecFromSubplotSpec`\n\n Other Parameters\n ----------------\n **kwargs\n All other parameters are passed to `.GridSpecFromSubplotSpec`.\n\n See Also\n --------\n matplotlib.pyplot.subplots\n\n Examples\n --------\n Adding three subplots in the space occupied by a single subplot::\n\n fig = plt.figure()\n gs0 = fig.add_gridspec(3, 1)\n ax1 = fig.add_subplot(gs0[0])\n ax2 = fig.add_subplot(gs0[1])\n gssub = gs0[2].subgridspec(1, 3)\n for i in range(3):\n fig.add_subplot(gssub[0, i])\n ", "language": "en", "n_whitespaces": 343, "n_words": 88, "vocab_size": 70 }
https://github.com/matplotlib/matplotlib.git
2
random_brightness
def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError( "`brightness_range should be tuple or list of two floats. " "Received: %s" % (brightness_range,) ) u = np.random.uniform(brightness_range[0], brightness_range[1]) return apply_brightness_shift(x, u, scale)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
image.py
92
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,454
0
81
58
33
275,725
33
keras
11
keras/preprocessing/image.py
Python
8
{ "docstring": "Performs a random brightness shift.\n\n Deprecated: `tf.keras.preprocessing.image.random_brightness` does not operate\n on tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomBrightness` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n brightness_range: Tuple of floats; brightness range.\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Default: True.\n\n Returns:\n Numpy image tensor.\n\n Raises:\n ValueError if `brightness_range` isn't a tuple.\n ", "language": "en", "n_whitespaces": 172, "n_words": 90, "vocab_size": 77 }
https://github.com/keras-team/keras.git
1
get_job_id
def get_job_id(self) -> str: job_id = self.worker.current_job_id assert not job_id.is_nil() return job_id.hex()
90cea203befa8f2e86e9c1c18bb3972296358e7b
8
runtime_context.py
49
Ray 2.0 API deprecation (#26116) Ray 2.0 API deprecation for: ray.remote(): placement_group ray.remote(): placement_group_bundle_index ray.remote(): placement_group_capture_child_tasks ray.get_dashboard_url() ray.get_resource_ids() ray.disconnect() ray.connect() ray.util.ActorGroup ray.util.ActorPool Add get_xx_id() to return hex (rather than object), and then deprecate the xx_id() (which returns Cython object): the xx here can be node, task etc. ray start: --plasma-store-socket-name ray start: --raylet-socket-name
27,932
0
40
28
12
125,635
12
ray
8
python/ray/runtime_context.py
Python
13
{ "docstring": "Get current job ID for this worker or driver.\n\n Job ID is the id of your Ray drivers that create tasks or actors.\n\n Returns:\n If called by a driver, this returns the job ID. If called in\n a task, return the job ID of the associated driver. The\n job ID will be hex format.\n ", "language": "en", "n_whitespaces": 116, "n_words": 54, "vocab_size": 38 }
https://github.com/ray-project/ray.git
1
test_select_related
def test_select_related(self, expected_queries=4): request = self.get_request() self._create_importantpages_object() # force site query beforehand Site.find_for_request(request) # fetch settings and access foreiegn keys with self.assertNumQueries(expected_queries): settings = ImportantPages.for_request(request) settings.sign_up_page settings.general_terms_page settings.privacy_policy_page
d10f15e55806c6944827d801cd9c2d53f5da4186
10
test_model.py
94
Reformat with black
16,035
0
121
53
25
73,513
28
wagtail
15
wagtail/contrib/settings/tests/test_model.py
Python
9
{ "docstring": "The `select_related` attribute on setting models is `None` by default, so fetching foreign keys values requires additional queries", "language": "en", "n_whitespaces": 17, "n_words": 18, "vocab_size": 18 }
https://github.com/wagtail/wagtail.git
4
_check_input_folder
def _check_input_folder(self) -> bool: if not os.path.exists(self._args.input_dir): logger.error("Input location %s not found.", self._args.input_dir) sys.exit(1) if (os.path.isfile(self._args.input_dir) and os.path.splitext(self._args.input_dir)[1].lower() in _video_extensions): logger.info("Input Video: %s", self._args.input_dir) retval = True else: logger.info("Input Directory: %s", self._args.input_dir) retval = False return retval
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
15
fsmedia.py
186
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,808
0
153
113
28
101,393
37
faceswap
18
scripts/fsmedia.py
Python
19
{ "docstring": " Check whether the input is a folder or video.\n\n Returns\n -------\n bool\n ``True`` if the input is a video otherwise ``False``\n ", "language": "en", "n_whitespaces": 61, "n_words": 21, "vocab_size": 17 }
https://github.com/deepfakes/faceswap.git
1
test_empty
def test_empty(self): response = self.client.get(reverse("admin:admin_views_podcast_changelist")) self.assertNotContains(response, "release_date__year=") self.assertNotContains(response, "release_date__month=") self.assertNotContains(response, "release_date__day=")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
78
Refs #33476 -- Reformatted code with Black.
52,156
0
46
43
9
207,903
11
django
7
tests/admin_views/tests.py
Python
5
{ "docstring": "\n No date hierarchy links display with empty changelist.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/django/django.git
1
test_async_generate_path
async def test_async_generate_path(hass): path = webhook.async_generate_path("some_id") assert path == "/api/webhook/some_id"
7872f87dd74fb4e2b610bb589facc0f763f153ae
9
test_init.py
37
Allow registering a webhook as local only (#63516)
107,567
0
19
18
9
308,838
10
core
5
tests/components/webhook/test_init.py
Python
3
{ "docstring": "Test generating just the path component of the url correctly.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
test_not_logged_in_redirect_default_settings
def test_not_logged_in_redirect_default_settings(self): # Get dashboard with default LOGIN_URL setting with self.settings(LOGIN_URL="django.contrib.auth.views.login"): response = self.client.get(reverse("wagtailadmin_home")) # Check that the user was redirected to the login page and that next was set correctly # Note: The user will be redirected to 'django.contrib.auth.views.login' but # this must be the same URL as 'wagtailadmin_login' self.assertEqual(response.status_code, 302) self.assertRedirects( response, reverse("wagtailadmin_login") + "?next=" + reverse("wagtailadmin_home"), )
d10f15e55806c6944827d801cd9c2d53f5da4186
13
test_account_management.py
110
Reformat with black
15,743
0
156
58
47
71,787
60
wagtail
11
wagtail/admin/tests/test_account_management.py
Python
8
{ "docstring": "\n This does the same as the above test but checks that it\n redirects to the correct place when the user has not set\n the LOGIN_URL setting correctly\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 23 }
https://github.com/wagtail/wagtail.git
8
send_beacon
def send_beacon(): from sentry import options from sentry.models import Broadcast, Organization, Project, Team, User install_id = get_install_id() if should_skip_beacon(install_id): return end = timezone.now() events_24h = tsdb.get_sums( model=tsdb.models.internal, keys=["events.total"], start=end - timedelta(hours=24), end=end )["events.total"] # we need this to be explicitly configured and it defaults to None, # which is the same as False anonymous = options.get("beacon.anonymous") is not False payload = { "install_id": install_id, "version": sentry.get_version(), "docker": sentry.is_docker(), "python_version": platform.python_version(), "data": { "users": User.objects.count(), "projects": Project.objects.count(), "teams": Team.objects.count(), "organizations": Organization.objects.count(), "events.24h": events_24h, }, "packages": get_all_package_versions(), "anonymous": anonymous, } if not anonymous: payload["admin_email"] = options.get("system.admin-email") # TODO(dcramer): relay the response 'notices' as admin broadcasts try: request = safe_urlopen(BEACON_URL, json=payload, timeout=5) response = safe_urlread(request) except Exception: logger.warning("beacon.failed", exc_info=True, extra={"install_id": install_id}) return else: logger.info("beacon.sent", extra={"install_id": install_id}) data = json.loads(response) if "version" in data: options.set("sentry:latest_version", data["version"]["stable"]) if "notices" in data: upstream_ids = set() for notice in data["notices"]: upstream_ids.add(notice["id"]) defaults = { "title": notice["title"], "link": notice.get("link"), "message": notice["message"], } # XXX(dcramer): we're missing a unique constraint on upstream_id # so we're using a lock to work around that. In the future we'd like # to have a data migration to clean up the duplicates and add the constraint lock = locks.get("broadcasts:{}".format(notice["id"]), duration=60, name="broadcasts") with lock.acquire(): affected = Broadcast.objects.filter(upstream_id=notice["id"]).update(**defaults) if not affected: Broadcast.objects.create(upstream_id=notice["id"], **defaults) Broadcast.objects.filter(upstream_id__isnull=False).exclude( upstream_id__in=upstream_ids ).update(is_active=False) @instrumented_task(name="sentry.tasks.send_beacon_metric", queue="update")
1c949dfcf87384cdf976710ebf8fe3c536e26807
@instrumented_task(name="sentry.tasks.send_beacon_metric", queue="update")
19
beacon.py
772
feat(locks): Add locks count metrics tagged with name (#36002)
18,853
1
670
434
159
92,011
213
sentry
71
src/sentry/tasks/beacon.py
Python
56
{ "docstring": "\n Send a Beacon to a remote server operated by the Sentry team.\n\n See the documentation for more details.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 16 }
https://github.com/getsentry/sentry.git
1
add_to_apply_calls
def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnUnidistDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, )
193505fdf0c984743397ba3df56262f30aee13a8
11
partition.py
76
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <[email protected]>
36,279
0
83
54
18
155,188
18
modin
10
modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py
Python
7
{ "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable or unidist.ObjectRef\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n length : unidist.ObjectRef or int, optional\n Length, or reference to length, of wrapped ``pandas.DataFrame``.\n width : unidist.ObjectRef or int, optional\n Width, or reference to width, of wrapped ``pandas.DataFrame``.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnUnidistDataframePartition\n A new ``PandasOnUnidistDataframePartition`` object.\n\n Notes\n -----\n It does not matter if `func` is callable or an ``unidist.ObjectRef``. Unidist will\n handle it correctly either way. The keyword arguments are sent as a dictionary.\n ", "language": "en", "n_whitespaces": 286, "n_words": 107, "vocab_size": 70 }
https://github.com/modin-project/modin.git
6
compose_all
def compose_all(graphs): R = None # add graph attributes, H attributes take precedent over G attributes for i, G in enumerate(graphs): if i == 0: # create new graph R = G.__class__() elif G.is_multigraph() != R.is_multigraph(): raise nx.NetworkXError("All graphs must be graphs or multigraphs.") R.graph.update(G.graph) R.add_nodes_from(G.nodes(data=True)) R.add_edges_from( G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) ) if R is None: raise ValueError("cannot apply compose_all to an empty list") return R
50ff08de69c6e9541cd6c029bede5dabf56cfe73
13
all.py
197
Make all.py generator friendly (#5984) * Make compose_all generator friendly * Make disjoint_union_all and intersection_all generator friendly * Refactor disjoint_union_all to yield relabeled graphs * Make union_all generator friendly * Fix intersection_all * Fix union_all signature * Allow passing an infinite rename generator to union_all * Copy over generalizations to binary.py * Clean up rename * Simplify first_label in disjoint_union_all * Simplify disjoint_union_all * Add missing R.graph.update in intersection_all
42,318
0
179
118
55
177,254
68
networkx
19
networkx/algorithms/operators/all.py
Python
15
{ "docstring": "Returns the composition of all graphs.\n\n Composition is the simple union of the node sets and edge sets.\n The node sets of the supplied graphs need not be disjoint.\n\n Parameters\n ----------\n graphs : iterable\n Iterable of NetworkX graphs\n\n Returns\n -------\n C : A graph with the same type as the first graph in list\n\n Raises\n ------\n ValueError\n If `graphs` is an empty list.\n\n Notes\n -----\n It is recommended that the supplied graphs be either all directed or all\n undirected.\n\n Graph, edge, and node attributes are propagated to the union graph.\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n ", "language": "en", "n_whitespaces": 183, "n_words": 114, "vocab_size": 73 }
https://github.com/networkx/networkx.git
2
test_backfill_with_many_backward_extremities
def test_backfill_with_many_backward_extremities(self) -> None: # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") requester = create_requester(user_id) room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) ev1 = self.helper.send(room_id, "first message", tok=tok) # Create "many" backward extremities. The magic number we're trying to # create more than is 5 which corresponds to the number of backward # extremities we slice off in `_maybe_backfill_inner` for _ in range(0, 8): event_handler = self.hs.get_event_creation_handler() event, context = self.get_success( event_handler.create_event( requester, { "type": "m.room.message", "content": { "msgtype": "m.text", "body": "message connected to fake event", }, "room_id": room_id, "sender": user_id, }, prev_event_ids=[ ev1["event_id"], # We're creating an backward extremity each time thanks # to this fake event generate_fake_event_id(), ], ) ) self.get_success( event_handler.handle_new_client_event(requester, event, context) ) current_depth = 1 limit = 100 with LoggingContext("receive_pdu"): # Make sure backfill still works d = run_in_background( self.hs.get_federation_handler().maybe_backfill, room_id, current_depth, limit, ) self.get_success(d)
9e06e220649cc0139749c388a894bee0d65d5f4e
17
test_federation.py
335
Add type hints to more tests files. (#12240)
71,879
0
795
197
106
247,729
141
synapse
33
tests/handlers/test_federation.py
Python
47
{ "docstring": "\n Check that we can backfill with many backward extremities.\n The goal is to make sure that when we only use a portion\n of backwards extremities(the magic number is more than 5),\n no errors are thrown.\n\n Regression test, see #11027\n ", "language": "en", "n_whitespaces": 82, "n_words": 39, "vocab_size": 36 }
https://github.com/matrix-org/synapse.git
2
project_ids_with_team_membership
def project_ids_with_team_membership(self) -> FrozenSet[int]: teams = self._team_memberships.keys() if not teams: return frozenset() with sentry_sdk.start_span(op="get_project_access_in_teams") as span: projects = frozenset( Project.objects.filter(status=ProjectStatus.VISIBLE, teams__in=teams) .distinct() .values_list("id", flat=True) ) span.set_data("Project Count", len(projects)) span.set_data("Team Count", len(teams)) return projects
b3ce25d7c3ce85a9b7195f97c6d3d76c764e1808
18
access.py
168
ref(access): Remove models from Access fields (#40940) Anticipating changes for Hybrid Cloud silo boundaries, change the public interface of the `Access` class to not expose any ORM models as dataclass fields. As a first step, replace all such objects with their raw IDs. (Credit to @corps for the underlying idea. Future steps: replace models as method parameters; replace raw IDs with API object representations.)
18,338
0
168
98
29
87,840
33
sentry
25
src/sentry/auth/access.py
Python
22
{ "docstring": "Return the IDs of projects to which the user has access via actual team membership.\n\n This represents the set of all projects for which `has_project_membership`\n returns true. Use that method where possible and use this property only when\n you need to iterate or query for all such teams.\n\n Compare to accessible_project_ids, which is equal to this property in the\n typical case but represents a superset of IDs in case of superuser access.\n ", "language": "en", "n_whitespaces": 114, "n_words": 72, "vocab_size": 52 }
https://github.com/getsentry/sentry.git
10
setdiff1d
def setdiff1d(ar1, ar2, assume_unique=False, *, size=None, fill_value=None): _check_arraylike("setdiff1d", ar1, ar2) if size is None: ar1 = core.concrete_or_error(None, ar1, "The error arose in setdiff1d()") else: size = core.concrete_or_error(operator.index, size, "The error arose in setdiff1d()") ar1 = asarray(ar1) fill_value = asarray(0 if fill_value is None else fill_value, dtype=ar1.dtype) if ar1.size == 0: return full_like(ar1, fill_value, shape=size or 0) if not assume_unique: ar1 = unique(ar1, size=size and ar1.size) mask = in1d(ar1, ar2, invert=True) if size is None: return ar1[mask] else: if not (assume_unique or size is None): # Set mask to zero at locations corresponding to unique() padding. n_unique = ar1.size + 1 - (ar1 == ar1[0]).sum() mask = where(arange(ar1.size) < n_unique, mask, False) return where(arange(size) < mask.sum(), ar1[where(mask, size=size)], fill_value) @_wraps(np.union1d, lax_description=_dedent(), extra_params=_dedent())
466bea1662ea13d1c5334f4db414bec20deb1fb4
@_wraps(np.union1d, lax_description=_dedent(""" Because the size of the output of ``union1d`` is data-dependent, the function is not typically compatible with JIT. The JAX version adds the optional ``size`` argument which must be specified statically for ``jnp.union1d`` to be used within some of JAX's transformations."""), extra_params=_dedent(""" size : int, optional If specified, the first ``size`` elements of the result will be returned. If there are fewer elements than ``size`` indicates, the return value will be padded with ``fill_value``. fill_value : array_like, optional When ``size`` is specified and there are fewer than the indicated number of elements, the remaining elements will be filled with ``fill_value``, which defaults to the minimum value of the union."""))
17
setops.py
374
lax_numpy: refactor set operations into separate private submodule
26,702
1
167
223
80
119,872
121
jax
29
jax/_src/numpy/setops.py
Python
20
{ "docstring": "\n Because the size of the output of ``union1d`` is data-dependent, the function is not\n typically compatible with JIT. The JAX version adds the optional ``size`` argument which\n must be specified statically for ``jnp.union1d`` to be used within some of JAX's\n transformations.\n size : int, optional\n If specified, the first ``size`` elements of the result will be returned. If there are\n fewer elements than ``size`` indicates, the return value will be padded with ``fill_value``.\n fill_value : array_like, optional\n When ``size`` is specified and there are fewer than the indicated number of elements, the\n remaining elements will be filled with ``fill_value``, which defaults to the minimum\n value of the union.", "language": "en", "n_whitespaces": 161, "n_words": 108, "vocab_size": 65 }
https://github.com/google/jax.git
4
select_related
def select_related(self, *fields): self._not_support_combined_queries("select_related") if self._fields is not None: raise TypeError( "Cannot call select_related() after .values() or .values_list()" ) obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
query.py
122
Refs #33476 -- Reformatted code with Black.
51,212
0
164
72
33
205,781
38
django
10
django/db/models/query.py
Python
14
{ "docstring": "\n Return a new QuerySet instance that will select related objects.\n\n If fields are specified, they must be ForeignKey fields and only those\n related objects are included in the selection.\n\n If select_related(None) is called, clear the list.\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 31 }
https://github.com/django/django.git
1
update_mask
def update_mask(self, padding_mask, dataset_batch): original_batch_size = self.get_real_batch_size(dataset_batch) missing_count = self.padded_batch_size - original_batch_size mask = backend.concatenate( [tf.ones(original_batch_size), tf.zeros(missing_count)], axis=0 ) return backend.concatenate([padding_mask, mask], axis=0)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
partial_batch_padding_handler.py
101
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,781
0
76
66
20
271,475
23
keras
15
keras/engine/partial_batch_padding_handler.py
Python
7
{ "docstring": "Calculate and cache the amount of padding required for a batch.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/keras-team/keras.git
11
release_zoom
def release_zoom(self, event): if self._zoom_info is None: return # We don't check the event button here, so that zooms can be cancelled # by (pressing and) releasing another mouse button. self.canvas.mpl_disconnect(self._zoom_info.cid) self.remove_rubberband() start_x, start_y = self._zoom_info.start_xy key = event.key # Force the key on colorbars to ignore the zoom-cancel on the # short-axis side if self._zoom_info.cbar == "horizontal": key = "x" elif self._zoom_info.cbar == "vertical": key = "y" # Ignore single clicks: 5 pixels is a threshold that allows the user to # "cancel" a zoom action by zooming by less than 5 pixels. if ((abs(event.x - start_x) < 5 and key != "y") or (abs(event.y - start_y) < 5 and key != "x")): self.canvas.draw_idle() self._zoom_info = None return for i, ax in enumerate(self._zoom_info.axes): # Detect whether this Axes is twinned with an earlier Axes in the # list of zoomed Axes, to avoid double zooming. twinx = any(ax.get_shared_x_axes().joined(ax, prev) for prev in self._zoom_info.axes[:i]) twiny = any(ax.get_shared_y_axes().joined(ax, prev) for prev in self._zoom_info.axes[:i]) ax._set_view_from_bbox( (start_x, start_y, event.x, event.y), self._zoom_info.direction, key, twinx, twiny) self.canvas.draw_idle() self._zoom_info = None self.push_current()
f156db08eee54d285ab0fb4e031e48d078ba6aa3
14
backend_bases.py
392
DOC: More cleanup axes -> Axes
22,787
0
521
240
118
107,500
176
matplotlib
31
lib/matplotlib/backend_bases.py
Python
27
{ "docstring": "Callback for mouse button release in zoom to rect mode.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/matplotlib/matplotlib.git
1
set_text_props
def set_text_props(self, **kwargs): self._text._internal_update(kwargs) self.stale = True
d69be2554cf6d1ac711bf433b1d6f176e3290d4f
8
table.py
38
Clarify error message for bad keyword arguments. `plot([], [], foo=42)` previously emitted ``` 'Line2D' object has no property 'foo' ``` which refers to the Matplotlib-specific concept of "properties". It now instead emits ``` Line2D.set() got an unexpected keyword argument 'foo' ``` which is modeled after the standard error message for unknown keyword arguments. (To maximize backcompat, the implementation goes through a new _internal_update, which does *not* error when the same prop is passed under different aliases. This could be changed later, but is not the goal of this PR.)
22,849
0
28
22
7
107,638
7
matplotlib
6
lib/matplotlib/table.py
Python
3
{ "docstring": "\n Update the text properties.\n\n Valid keyword arguments are:\n\n %(Text:kwdoc)s\n ", "language": "en", "n_whitespaces": 38, "n_words": 9, "vocab_size": 9 }
https://github.com/matplotlib/matplotlib.git
8
get_quantifier
def get_quantifier(ch, input_iter): if ch in "*?+": try: ch2, escaped = next(input_iter) except StopIteration: ch2 = None if ch2 == "?": ch2 = None if ch == "+": return 1, ch2 return 0, ch2 quant = [] while ch != "}": ch, escaped = next(input_iter) quant.append(ch) quant = quant[:-1] values = "".join(quant).split(",") # Consume the trailing '?', if necessary. try: ch, escaped = next(input_iter) except StopIteration: ch = None if ch == "?": ch = None return int(values[0]), ch
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
regex_helper.py
225
Refs #33476 -- Reformatted code with Black.
51,659
0
226
128
39
206,725
79
django
13
django/utils/regex_helper.py
Python
24
{ "docstring": "\n Parse a quantifier from the input, where \"ch\" is the first character in the\n quantifier.\n\n Return the minimum number of occurrences permitted by the quantifier and\n either None or the next character from the input_iter if the next character\n is not part of the quantifier.\n ", "language": "en", "n_whitespaces": 64, "n_words": 45, "vocab_size": 29 }
https://github.com/django/django.git
2
placement_map
def placement_map(self) -> Mapping[tuple[int, int], list[WidgetPlacement]]: if self._map is None: self._map = self._build_placements(self._placements) return self._map return self._map
8d3b0f22eca8babf95470acf39fc626a1b0be1b4
11
_spatial_map.py
74
first iteration of spatial map
45,286
0
60
48
13
186,002
17
textual
10
src/textual/_spatial_map.py
Python
10
{ "docstring": "A mapping of block coordinate on to widget placement.\n\n Returns:\n Mapping[tuple[int, int], list[WidgetPlacement]]: Mapping.\n ", "language": "en", "n_whitespaces": 39, "n_words": 14, "vocab_size": 14 }
https://github.com/Textualize/textual.git
2
_get_tightbbox_for_layout_only
def _get_tightbbox_for_layout_only(obj, *args, **kwargs): try: return obj.get_tightbbox(*args, **{**kwargs, "for_layout_only": True}) except TypeError: return obj.get_tightbbox(*args, **kwargs)
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
13
artist.py
76
DOC: improve grammar and consistency
23,986
0
38
46
13
110,244
15
matplotlib
6
lib/matplotlib/artist.py
Python
5
{ "docstring": "\n Matplotlib's `.Axes.get_tightbbox` and `.Axis.get_tightbbox` support a\n *for_layout_only* kwarg; this helper tries to use the kwarg but skips it\n when encountering third-party subclasses that do not support it.\n ", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 26 }
https://github.com/matplotlib/matplotlib.git
4
_worker_health_check
def _worker_health_check(self) -> List[int]: logger.info("Health checking all workers ...") remote_results = self.__worker_manager.foreach_actor( lambda w: w.sample_with_count(), healthy_only=False, ) return [ r.actor_id for r in remote_results if not r.ok and isinstance(r.get(), RayError) ]
e707ce4fb3717e3c05118c57f503dfbd03552ca9
12
worker_set.py
105
[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938) Signed-off-by: Jun Gong <[email protected]>
30,789
0
128
65
30
135,997
31
ray
18
rllib/evaluation/worker_set.py
Python
17
{ "docstring": "Performs a health-check on each remote worker.\n\n Returns:\n List of indices (into `self._remote_workers` list) of faulty workers.\n Note that index=1 is the 0th item in `self._remote_workers`.\n ", "language": "en", "n_whitespaces": 62, "n_words": 26, "vocab_size": 25 }
https://github.com/ray-project/ray.git
2
get_running_loop
def get_running_loop(): # NOTE: this function is implemented in C (see _asynciomodule.c) loop = _get_running_loop() if loop is None: raise RuntimeError('no running event loop') return loop
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
events.py
43
add python 3.10.4 for windows
55,997
0
48
22
23
220,442
26
XX-Net
4
python3.10.4/Lib/asyncio/events.py
Python
5
{ "docstring": "Return the running event loop. Raise a RuntimeError if there is none.\n\n This function is thread-specific.\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 15 }
https://github.com/XX-net/XX-Net.git
4
_get_feature_index
def _get_feature_index(fx, feature_names=None): if isinstance(fx, str): if feature_names is None: raise ValueError( f"Cannot plot partial dependence for feature {fx!r} since " "the list of feature names was not provided, neither as " "column names of a pandas data-frame nor via the feature_names " "parameter." ) try: return feature_names.index(fx) except ValueError as e: raise ValueError(f"Feature {fx!r} not in feature_names") from e return fx
c1cfc4d4f36f9c00413e20d0ef85bed208a502ca
14
_pd_utils.py
107
ENH Extend PDP for nominal categorical features (#18298) Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Closes https://github.com/scikit-learn/scikit-learn/issues/14969
76,930
0
196
56
50
261,669
62
scikit-learn
8
sklearn/inspection/_pd_utils.py
Python
14
{ "docstring": "Get feature index.\n\n Parameters\n ----------\n fx : int or str\n Feature index or name.\n\n feature_names : list of str, default=None\n All feature names from which to search the indices.\n\n Returns\n -------\n idx : int\n Feature index.\n ", "language": "en", "n_whitespaces": 81, "n_words": 36, "vocab_size": 29 }
https://github.com/scikit-learn/scikit-learn.git
1
GeneralizedMultivariateLogGamma
def GeneralizedMultivariateLogGamma(syms, delta, v, lamda, mu): return multivariate_rv(GeneralizedMultivariateLogGammaDistribution, syms, delta, v, lamda, mu)
9ad8ab9fe58051cf11626ba6654852fcfec60147
7
joint_rv_types.py
39
Documentation cleanup 5
48,117
0
46
29
10
196,699
13
sympy
8
sympy/stats/joint_rv_types.py
Python
3
{ "docstring": "\n Creates a joint random variable with generalized multivariate log gamma\n distribution.\n\n The joint pdf can be found at [1].\n\n Parameters\n ==========\n\n syms: list/tuple/set of symbols for identifying each component\n delta: A constant in range $[0, 1]$\n v: Positive real number\n lamda: List of positive real numbers\n mu: List of positive real numbers\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import density\n >>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma\n >>> from sympy import symbols, S\n >>> v = 1\n >>> l, mu = [1, 1, 1], [1, 1, 1]\n >>> d = S.Half\n >>> y = symbols('y_1:4', positive=True)\n >>> Gd = GeneralizedMultivariateLogGamma('G', d, v, l, mu)\n >>> density(Gd)(y[0], y[1], y[2])\n Sum(exp((n + 1)*(y_1 + y_2 + y_3) - exp(y_1) - exp(y_2) -\n exp(y_3))/(2**n*gamma(n + 1)**3), (n, 0, oo))/2\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Generalized_multivariate_log-gamma_distribution\n .. [2] https://www.researchgate.net/publication/234137346_On_a_multivariate_log-gamma_distribution_and_the_use_of_the_distribution_in_the_Bayesian_analysis\n\n Note\n ====\n\n If the GeneralizedMultivariateLogGamma is too long to type use,\n ``from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma as GMVLG``\n If you want to pass the matrix omega instead of the constant delta, then use,\n ``GeneralizedMultivariateLogGammaOmega``.\n\n ", "language": "en", "n_whitespaces": 279, "n_words": 167, "vocab_size": 122 }
https://github.com/sympy/sympy.git
4
test_dqn_compilation_integer_rewards
def test_dqn_compilation_integer_rewards(self): num_iterations = 1 config = ( dqn.dqn.DQNConfig() .rollouts(num_rollout_workers=2) .training(num_steps_sampled_before_learning_starts=0) ) for _ in framework_iterator(config, with_eager_tracing=True): # Double-dueling DQN. print("Double-dueling") plain_config = deepcopy(config) trainer = dqn.DQN(config=plain_config, env="Taxi-v3") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop() # Rainbow. print("Rainbow") rainbow_config = deepcopy(config).training( num_atoms=10, noisy=True, double_q=True, dueling=True, n_step=5 ) trainer = dqn.DQN(config=rainbow_config, env="Taxi-v3") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop()
7ba37885c67844d2eb18a63dcf3b7ac7f66ce89f
14
test_dqn.py
300
Cast rewards as tf.float32 to fix error in DQN in tf2 (#28384) * Cast rewards as tf.float32 to fix error in DQN in tf2 Signed-off-by: mgerstgrasser <[email protected]> * Add test case for DQN with integer rewards Signed-off-by: mgerstgrasser <[email protected]> Signed-off-by: mgerstgrasser <[email protected]>
28,479
0
404
181
43
127,648
66
ray
32
rllib/algorithms/dqn/tests/test_dqn.py
Python
28
{ "docstring": "Test whether DQN can be built on all frameworks.\n Unlike the previous test, this uses an environment with integer rewards\n in order to test that type conversions are working correctly.", "language": "en", "n_whitespaces": 43, "n_words": 30, "vocab_size": 30 }
https://github.com/ray-project/ray.git
2
_estimator_has
def _estimator_has(attr): return lambda self: ( hasattr(self.estimators_[0], attr) if hasattr(self, "estimators_") else hasattr(self.base_estimator, attr) )
a794c58692a1f3e7a85a42d8c7f7ddd5fcf18baa
12
_bagging.py
62
MNT Replace if_delegate_has_method with available_if in ensemble and semi_supervised (#20545) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
75,632
0
45
39
14
259,191
15
scikit-learn
6
sklearn/ensemble/_bagging.py
Python
6
{ "docstring": "Check if we can delegate a method to the underlying estimator.\n\n First, we check the first fitted estimator if available, otherwise we\n check the base estimator.\n ", "language": "en", "n_whitespaces": 35, "n_words": 26, "vocab_size": 19 }
https://github.com/scikit-learn/scikit-learn.git
1
test_project_config_dynamic_sampling_is_none
def test_project_config_dynamic_sampling_is_none(default_project): default_project.update_option("sentry:dynamic_sampling", None) with Feature({"organizations:server-side-sampling": True}): cfg = get_project_config(default_project) cfg = cfg.to_dict() dynamic_sampling = get_path(cfg, "config", "dynamicSampling") assert dynamic_sampling is None @pytest.mark.django_db
e0e2c4ff4248042abda3cc93024930dada416af8
@pytest.mark.django_db
12
test_config.py
103
feat(dynamic-sampling): Handles updating ProjectConfig with uniform DS rule for v2 [TET-465] (#40268) This PR forces your uniform rule by your plan or respect old logic. If both feature flags are enabled dynamic-sampling-basic flag takes the highest precedence. Original PR https://github.com/getsentry/sentry/pull/40180 was reverted via https://github.com/getsentry/sentry/pull/40266 due to issue of removing incorrect line. Co-authored-by: Joris Bayer <[email protected]>
18,216
1
47
51
19
87,078
23
sentry
12
tests/sentry/relay/test_config.py
Python
7
{ "docstring": "\n Tests test check inc-237 that dynamic sampling is None,\n so it's pass when we have fix and fails when we dont\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 19 }
https://github.com/getsentry/sentry.git
3
get_all_distribution_names
def get_all_distribution_names(url=None): if url is None: url = DEFAULT_INDEX client = ServerProxy(url, timeout=3.0) try: return client.list_packages() finally: client('close')()
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
12
locators.py
75
upd; format
12,837
0
54
44
16
62,028
18
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py
Python
8
{ "docstring": "\n Return all distribution names known by an index.\n :param url: The URL of the index.\n :return: A list of all known distribution names.\n ", "language": "en", "n_whitespaces": 36, "n_words": 23, "vocab_size": 18 }
https://github.com/jindongwang/transferlearning.git
1
convert_tokens_to_string
def convert_tokens_to_string(self, tokens): out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string
c1c79b06550b587b2a975016ef9d18b53258025b
13
tokenization_nllb.py
52
NLLB tokenizer (#18126) * NLLB tokenizer * Apply suggestions from code review - Thanks Stefan! Co-authored-by: Stefan Schweter <[email protected]> * Final touches * Style :) * Update docs/source/en/model_doc/nllb.mdx Co-authored-by: Stefan Schweter <[email protected]> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * PR reviews * Auto models Co-authored-by: Stefan Schweter <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
5,877
0
31
29
9
32,198
10
transformers
8
src/transformers/models/nllb/tokenization_nllb.py
Python
3
{ "docstring": "Converts a sequence of tokens (strings for sub-words) in a single string.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/huggingface/transformers.git
1
start
def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state
d10f15e55806c6944827d801cd9c2d53f5da4186
10
__init__.py
122
Reformat with black
16,102
0
120
77
20
73,775
24
wagtail
20
wagtail/core/models/__init__.py
Python
12
{ "docstring": "Start this task on the provided workflow state by creating an instance of TaskState", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
https://github.com/wagtail/wagtail.git
1
test_parse_qsd
def test_parse_qsd(self): assert validate(parse_qsd(), 'foo=bar&foo=baz') == {"foo": "baz"} with self.assertRaises(ValueError) as cm: validate(parse_qsd(), 123) assert_validationerror(cm.exception, )
3d44da082b3ba202b9d0557bfd8ce747a1d7960c
11
test_api_validate.py
84
plugin.api.validate: implement ValidationError - Implement `ValidationError` - Inherit from `ValueError` to preserve backwards compatiblity - Allow collecting multiple errors (AnySchema) - Keep an error stack of parent `ValidationError`s or other exceptions - Format error stack when converting error to string - Raise `ValidationError` instead of `ValueError` - Add error contexts where it makes sense - Add schema names to error instances - Add and update tests
45,710
0
47
46
15
187,150
16
streamlink
9
tests/test_api_validate.py
Python
8
{ "docstring": "\n ValidationError:\n Unable to parse query string: 'int' object has no attribute 'decode' (123)\n ", "language": "en", "n_whitespaces": 45, "n_words": 13, "vocab_size": 13 }
https://github.com/streamlink/streamlink.git
1
upgrade
def upgrade(): # install pg_trgm op.execute("CREATE EXTENSION IF NOT EXISTS pg_trgm;") with op.get_context().autocommit_block(): op.execute( )
0f0430b003e710f2e59a7e575dd2fac183b9ca28
11
2022_10_31_161719_41e5ed9e1034_.py
54
Allow filtering of Work Queues via the API based on name prefix (#7394)
11,915
0
49
27
15
59,614
15
prefect
5
src/prefect/orion/database/migrations/versions/postgresql/2022_10_31_161719_41e5ed9e1034_.py
Python
10
{ "docstring": "\n CREATE INDEX CONCURRENTLY\n trgm_ix_work_queue_name\n ON work_queue USING gin (name gin_trgm_ops);\n ", "language": "en", "n_whitespaces": 55, "n_words": 10, "vocab_size": 10 }
https://github.com/PrefectHQ/prefect.git
5
add_trusted_host
def add_trusted_host(self, host, source=None, suppress_logging=False): # type: (str, Optional[str], bool) -> None if not suppress_logging: msg = f'adding trusted host: {host!r}' if source is not None: msg += f' (from {source})' logger.info(msg) host_port = parse_netloc(host) if host_port not in self.pip_trusted_origins: self.pip_trusted_origins.append(host_port) self.mount( build_url_from_netloc(host) + '/', self._trusted_host_adapter ) if not host_port[1]: # Mount wildcard ports for the same host. self.mount( build_url_from_netloc(host) + ':', self._trusted_host_adapter )
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
12
session.py
172
upd; format
12,338
0
264
99
49
60,907
64
transferlearning
15
.venv/lib/python3.8/site-packages/pip/_internal/network/session.py
Python
18
{ "docstring": "\n :param host: It is okay to provide a host that has previously been\n added.\n :param source: An optional source string, for logging where the host\n string came from.\n ", "language": "en", "n_whitespaces": 72, "n_words": 28, "vocab_size": 26 }
https://github.com/jindongwang/transferlearning.git
1
test_paginate_pages
def test_paginate_pages(user_api_client, page, page_type): page.is_published = True data_02 = { "slug": "test02-url", "title": "Test page", "content": dummy_editorjs("Content for page 1"), "is_published": True, "page_type": page_type, } data_03 = { "slug": "test03-url", "title": "Test page", "content": dummy_editorjs("Content for page 1"), "is_published": True, "page_type": page_type, } Page.objects.create(**data_02) Page.objects.create(**data_03) query = response = user_api_client.post_graphql(query) content = get_graphql_content(response) pages_data = content["data"]["pages"] assert len(pages_data["edges"]) == 2
85681143d185863ae1a54af03f0690d11af04563
11
test_page_update.py
221
Update files structure of page tests (#9182)
4,929
0
166
122
39
26,031
60
saleor
18
saleor/graphql/page/tests/mutations/test_page_update.py
Python
34
{ "docstring": "\n query PagesQuery {\n pages(first: 2) {\n edges {\n node {\n id\n title\n }\n }\n }\n }\n ", "language": "en", "n_whitespaces": 174, "n_words": 16, "vocab_size": 10 }
https://github.com/saleor/saleor.git
10
prettify_exc
def prettify_exc(error): errors = [] for exc in KNOWN_EXCEPTIONS: search_string = exc.match_string if exc.match_string else exc.exception_name split_string = ( exc.show_from_string if exc.show_from_string else exc.exception_name ) if search_string in error: # for known exceptions with no display rules and no prefix # we should simply show nothing if not exc.show_from_string and not exc.prefix: errors.append("") continue elif exc.prefix and exc.prefix in error: _, error, info = error.rpartition(exc.prefix) else: _, error, info = error.rpartition(split_string) errors.append(f"{error} {info}") if not errors: return f"{vistir.misc.decode_for_output(error)}" return "\n".join(errors)
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
16
exceptions.py
231
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
3,069
0
267
126
51
19,700
80
pipenv
19
pipenv/exceptions.py
Python
19
{ "docstring": "Catch known errors and prettify them instead of showing the\n entire traceback, for better UX", "language": "en", "n_whitespaces": 17, "n_words": 15, "vocab_size": 15 }
https://github.com/pypa/pipenv.git
1
log_deletion
def log_deletion(self, request, obj, object_repr): from django.contrib.admin.models import DELETION, LogEntry return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(obj).pk, object_id=obj.pk, object_repr=object_repr, action_flag=DELETION, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
options.py
90
Refs #33476 -- Reformatted code with Black.
50,382
0
101
63
18
203,451
18
django
20
django/contrib/admin/options.py
Python
9
{ "docstring": "\n Log that an object will be deleted. Note that this method must be\n called before the deletion.\n\n The default implementation creates an admin LogEntry object.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 22 }
https://github.com/django/django.git
139
_parse_query
def _parse_query(self, sql): mindsdb_sql_struct = parse_sql(sql, dialect='mindsdb') # is it query to 'predictors'? if ( isinstance(mindsdb_sql_struct.from_table, Identifier) and mindsdb_sql_struct.from_table.parts[-1].lower() == 'predictors' and ( self.database == 'mindsdb' or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb' ) ): dn = self.datahub.get(self.mindsdb_database_name) data, columns = dn.get_predictors(mindsdb_sql_struct) table_name = ('mindsdb', 'predictors', 'predictors') data = [{(key, key): value for key, value in row.items()} for row in data] data = [{table_name: x} for x in data] self.columns_list = [ (table_name + (column_name, column_name)) for column_name in columns ] columns = [(column_name, column_name) for column_name in columns] self.fetched_data = { 'values': data, 'columns': {table_name: columns}, 'tables': [table_name] } return # is it query to 'commands'? if ( isinstance(mindsdb_sql_struct.from_table, Identifier) and mindsdb_sql_struct.from_table.parts[-1].lower() == 'commands' and ( self.database == 'mindsdb' or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb' ) ): self.fetched_data = { 'values': [], 'columns': {('mindsdb', 'commands', 'commands'): [('command', 'command')]}, 'tables': [('mindsdb', 'commands', 'commands')] } self.columns_list = [('mindsdb', 'commands', 'commands', 'command', 'command')] return # is it query to 'datasources'? if ( isinstance(mindsdb_sql_struct.from_table, Identifier) and mindsdb_sql_struct.from_table.parts[-1].lower() == 'datasources' and ( self.database == 'mindsdb' or mindsdb_sql_struct.from_table.parts[0].lower() == 'mindsdb' ) ): dn = self.datahub.get(self.mindsdb_database_name) data, columns = dn.get_datasources(mindsdb_sql_struct) table_name = ('mindsdb', 'datasources', 'datasources') data = [{(key, key): value for key, value in row.items()} for row in data] data = [{table_name: x} for x in data] self.columns_list = [ (table_name + (column_name, column_name)) for column_name in columns ] columns = [(column_name, column_name) for column_name in columns] self.fetched_data = { 'values': data, 'columns': {table_name: columns}, 'tables': [table_name] } return integrations_names = self.datahub.get_datasources_names() integrations_names.append('information_schema') integrations_names.append('file') all_tables = get_all_tables(mindsdb_sql_struct) predictor_metadata = {} predictors = db.session.query(db.Predictor).filter_by(company_id=self.session.company_id) for model_name in set(all_tables): for p in predictors: if p.name == model_name: if isinstance(p.data, dict) and 'error' not in p.data: ts_settings = p.learn_args.get('timeseries_settings', {}) if ts_settings.get('is_timeseries') is True: window = ts_settings.get('window') order_by = ts_settings.get('order_by')[0] group_by = ts_settings.get('group_by') if isinstance(group_by, list): group_by = ts_settings.get('group_by')[0] predictor_metadata[model_name] = { 'timeseries': True, 'window': window, 'order_by_column': order_by, 'group_by_column': group_by } else: predictor_metadata[model_name] = { 'timeseries': False } self.model_types.update(p.data.get('dtypes', {})) plan = plan_query( mindsdb_sql_struct, integrations=integrations_names, predictor_namespace=self.mindsdb_database_name, predictor_metadata=predictor_metadata, default_namespace=self.database ) steps_data = [] for step in plan.steps: data = [] if type(step) == GetPredictorColumns: predictor_name = step.predictor.parts[-1] dn = self.datahub.get(self.mindsdb_database_name) columns = dn.get_table_columns(predictor_name) columns = [ (column_name, column_name) for column_name in columns ] data = { 'values': [], 'columns': { (self.mindsdb_database_name, predictor_name, predictor_name): columns }, 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)] } elif type(step) == FetchDataframeStep: data = self._fetch_dataframe_step(step) elif type(step) == UnionStep: raise Exception('Union step is not implemented') # TODO add union support # left_data = steps_data[step.left.step_num] # right_data = steps_data[step.right.step_num] # data = left_data + right_data elif type(step) == MapReduceStep: if step.reduce != 'union': raise Exception(f'Unknown MapReduceStep type: {step.reduce}') step_data = steps_data[step.values.step_num] values = [] step_data_values = step_data['values'] for row in step_data_values: for row_data in row.values(): for name, value in row_data.items(): if name[0] != '__mindsdb_row_id': values.append(value) data = { 'values': [], 'columns': {}, 'tables': [] } substep = step.step if type(substep) == FetchDataframeStep: query = substep.query markQueryVar(query.where) for value in values: replaceQueryVar(query.where, value) sub_data = self._fetch_dataframe_step(substep) if len(data['columns']) == 0: data['columns'] = sub_data['columns'] if len(data['tables']) == 0: data['tables'] = sub_data['tables'] data['values'].extend(sub_data['values']) elif type(substep) == MultipleSteps: data = self._multiple_steps_reduce(substep, values) else: raise Exception(f'Unknown step type: {step.step}') elif type(step) == ApplyPredictorRowStep: predictor = '.'.join(step.predictor.parts) dn = self.datahub.get(self.mindsdb_database_name) where_data = step.row_dict data = dn.select( table=predictor, columns=None, where_data=where_data, integration_name=self.session.integration, integration_type=self.session.integration_type ) data = [{(key, key): value for key, value in row.items()} for row in data] table_name = get_preditor_alias(step, self.database) values = [{table_name: x} for x in data] columns = {table_name: []} if len(data) > 0: row = data[0] columns[table_name] = list(row.keys()) # TODO else data = { 'values': values, 'columns': columns, 'tables': [table_name] } elif type(step) == ApplyPredictorStep or type(step) == ApplyTimeseriesPredictorStep: dn = self.datahub.get(self.mindsdb_database_name) predictor = '.'.join(step.predictor.parts) where_data = [] for row in steps_data[step.dataframe.step_num]['values']: new_row = {} for table_name in row: keys_intersection = set(new_row) & set(row[table_name]) if len(keys_intersection) > 0: raise Exception( f'The predictor got two identical keys from different datasources: {keys_intersection}' ) new_row.update(row[table_name]) where_data.append(new_row) where_data = [{key[1]: value for key, value in row.items()} for row in where_data] is_timeseries = predictor_metadata[predictor]['timeseries'] _mdb_make_predictions = None if is_timeseries: if 'LATEST' in self.raw: _mdb_make_predictions = False else: _mdb_make_predictions = True for row in where_data: if '__mdb_make_predictions' not in row: row['__mdb_make_predictions'] = _mdb_make_predictions for row in where_data: for key in row: if isinstance(row[key], datetime.date): row[key] = str(row[key]) data = dn.select( table=predictor, columns=None, where_data=where_data, integration_name=self.session.integration, integration_type=self.session.integration_type ) data = [{(key, key): value for key, value in row.items()} for row in data] table_name = get_preditor_alias(step, self.database) values = [{table_name: x} for x in data] columns = {table_name: []} if len(data) > 0: row = data[0] columns[table_name] = list(row.keys()) # TODO else data = { 'values': values, 'columns': columns, 'tables': [table_name] } elif type(step) == JoinStep: left_data = steps_data[step.left.step_num] right_data = steps_data[step.right.step_num] # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136 if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]: right_data = steps_data[step.left.step_num] left_data = steps_data[step.right.step_num] if step.query.condition is not None: raise Exception('At this moment supported only JOIN without condition') if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'): raise Exception('At this moment supported only JOIN and LEFT JOIN') if ( len(left_data['tables']) != 1 or len(right_data['tables']) != 1 or left_data['tables'][0] == right_data['tables'][0] ): raise Exception('At this moment supported only JOIN of two different tables') data = { 'values': [], 'columns': {}, 'tables': list(set(left_data['tables'] + right_data['tables'])) } for data_part in [left_data, right_data]: for table_name in data_part['columns']: if table_name not in data['columns']: data['columns'][table_name] = data_part['columns'][table_name] else: data['columns'][table_name].extend(data_part['columns'][table_name]) for table_name in data['columns']: data['columns'][table_name] = list(set(data['columns'][table_name])) left_key = left_data['tables'][0] right_key = right_data['tables'][0] left_columns_map = {} left_columns_map_reverse = {} for i, column_name in enumerate(left_data['columns'][left_key]): left_columns_map[f'a{i}'] = column_name left_columns_map_reverse[column_name] = f'a{i}' right_columns_map = {} right_columns_map_reverse = {} for i, column_name in enumerate(right_data['columns'][right_key]): right_columns_map[f'b{i}'] = column_name right_columns_map_reverse[column_name] = f'b{i}' left_df_data = [] for row in left_data['values']: row = row[left_key] left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()}) right_df_data = [] for row in right_data['values']: row = row[right_key] right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()}) df_a = pd.DataFrame(left_df_data) df_b = pd.DataFrame(right_df_data) a_name = f'a{round(time.time()*1000)}' b_name = f'b{round(time.time()*1000)}' con = duckdb.connect(database=':memory:') con.register(a_name, df_a) con.register(b_name, df_b) resp_df = con.execute(f).fetchdf() con.unregister(a_name) con.unregister(b_name) con.close() resp_df = resp_df.where(pd.notnull(resp_df), None) resp_dict = resp_df.to_dict(orient='records') for row in resp_dict: new_row = {left_key: {}, right_key: {}} for key, value in row.items(): if key.startswith('a'): new_row[left_key][left_columns_map[key]] = value else: new_row[right_key][right_columns_map[key]] = value data['values'].append(new_row) elif type(step) == FilterStep: raise Exception('FilterStep is not implemented') # elif type(step) == ApplyTimeseriesPredictorStep: # raise Exception('ApplyTimeseriesPredictorStep is not implemented') elif type(step) == ProjectStep: step_data = steps_data[step.dataframe.step_num] columns_list = [] for column_full_name in step.columns: table_name = None if type(column_full_name) == Star: for table_name, table_columns_list in step_data['columns'].items(): for column in table_columns_list: columns_list.append(table_name + column) elif type(column_full_name) == Identifier: column_name_parts = column_full_name.parts column_alias = None if column_full_name.alias is None else '.'.join(column_full_name.alias.parts) if len(column_name_parts) > 2: raise Exception(f'Column name must contain no more than 2 parts. Got name: {".".join(column_full_name)}') elif len(column_name_parts) == 1: column_name = column_name_parts[0] appropriate_table = None if len(step_data['tables']) == 1: appropriate_table = step_data['tables'][0] else: for table_name, table_columns in step_data['columns'].items(): if (column_name, column_name) in table_columns: if appropriate_table is not None: raise Exception('Found multiple appropriate tables for column {column_name}') else: appropriate_table = table_name if appropriate_table is None: # it is probably constaint # FIXME https://github.com/mindsdb/mindsdb_sql/issues/133 # column_name = column_name.strip("'") # name_or_alias = column_alias or column_name # column_alias = name_or_alias # for row in step_data['values']: # for table in row: # row[table][(column_name, name_or_alias)] = row[table][(column_name, column_name)] # appropriate_table = step_data['tables'][0] columns_list.append(appropriate_table + (column_alias, column_alias)) else: columns_list.append(appropriate_table + (column_name, column_alias or column_name)) # column_name elif len(column_name_parts) == 2: table_name_or_alias = column_name_parts[0] column_name = column_name_parts[1] appropriate_table = None for table_name, table_columns in step_data['columns'].items(): checkig_table_name_or_alias = table_name[2] or table_name[1] if table_name_or_alias == checkig_table_name_or_alias: for table_column_name in table_columns: if ( table_column_name[1] == column_name or table_column_name[1] is None and table_column_name[0] == column_name ): break else: raise Exception(f'Can not find column "{column_name}" in table "{table_name}"') appropriate_table = table_name break if appropriate_table is None: raise Exception(f'Can not find approproate table for column {column_name}') columns_to_copy = None for column in step_data['columns'][appropriate_table]: if column[0] == column_name and (column[1] is None or column[1] == column_name): columns_to_copy = column break else: raise Exception(f'Can not find approproate column in data: {(column_name, column_alias)}') for row in step_data['values']: row[appropriate_table][(column_name, column_alias)] = row[appropriate_table][columns_to_copy] columns_list.append(appropriate_table + (column_name, column_alias)) else: raise Exception('Undefined column name') else: raise Exception(f'Unexpected column name type: {column_full_name}') self.columns_list = columns_list data = step_data else: raise Exception(F'Unknown planner step: {step}') steps_data.append(data) if self.outer_query is not None: data = [] # +++ result = [] for row in steps_data[-1]: data_row = {} for column_record in self.columns_list: table_name = column_record[:3] column_name = column_record[3] data_row[column_record[4] or column_record[3]] = row[table_name][column_name] result.append(data_row) # --- data = self._make_list_result_view(result) df = pd.DataFrame(data) result = query_df(df, self.outer_query) try: self.columns_list = [ ('', '', '', x, x) for x in result.columns ] except Exception: self.columns_list = [ ('', '', '', result.name, result.name) ] # +++ make list result view new_result = [] for row in result.to_dict(orient='records'): data_row = [] for column_record in self.columns_list: column_name = column_record[4] or column_record[3] data_row.append(row.get(column_name)) new_result.append(data_row) result = new_result # --- self.fetched_data = result else: self.fetched_data = steps_data[-1] if hasattr(self, 'columns_list') is False: self.columns_list = [] for row in self.fetched_data: for table_key in row: for column_name in row[table_key]: if (table_key + (column_name, column_name)) not in self.columns_list: self.columns_list.append((table_key + (column_name, column_name))) # if there was no 'ProjectStep', then get columns list from last step: if self.columns_list is None: self.columns_list = [] for table_name in self.fetched_data['columns']: self.columns_list.extend([ table_name + column for column in self.fetched_data['columns'][table_name] ]) self.columns_list = [x for x in self.columns_list if x[3] != '__mindsdb_row_id']
bc3b53170ee1bc542682366ed93bc362cf26e152
30
sql_query.py
5,091
fix selecting from files
25,033
0
9,407
3,055
531
113,817
1,533
mindsdb
176
mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
Python
413
{ "docstring": "\n SELECT * FROM {a_name} as ta full join {b_name} as tb\n ON ta.{left_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}\n = tb.{right_columns_map_reverse[('__mindsdb_row_id', '__mindsdb_row_id')]}\n ", "language": "en", "n_whitespaces": 91, "n_words": 17, "vocab_size": 15 }
https://github.com/mindsdb/mindsdb.git
1
test_payment_refund_or_void_refund_called_txn_exist
def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment): # given payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fields=["charge_status"]) assert payment.can_refund() is True payment.captured_amount = payment.total payment.save(update_fields=["captured_amount"]) txn = payment.transactions.create( is_success=True, action_required=False, kind=TransactionKind.REFUND_ONGOING, amount=payment.captured_amount / 2, currency=payment.currency, token="test", gateway_response={}, ) # when gateway.payment_refund_or_void( payment, get_plugins_manager(), None, transaction_id=txn.token ) # then assert refund_mock.called_once() @patch("saleor.payment.gateway.refund")
0881beec1ac02dfa97525c5173687defb356d85c
@patch("saleor.payment.gateway.refund")
11
test_gateway.py
202
Fix payment flow (#9504) * Do not capture payment again when it should be refunded or voided * Do not create order when then is ongoing refund
5,046
1
140
120
37
26,687
43
saleor
29
saleor/payment/tests/test_gateway.py
Python
19
{ "docstring": "Ensure that the refund method is called when the refund process\n is already ongoing but not covered full payment captured amount.", "language": "en", "n_whitespaces": 23, "n_words": 21, "vocab_size": 18 }
https://github.com/saleor/saleor.git
6
get_item_price
def get_item_price(args, item_code, ignore_party=False): args["item_code"] = item_code conditions = conditions += "and ifnull(batch_no, '') in ('', %(batch_no)s)" if not ignore_party: if args.get("customer"): conditions += " and customer=%(customer)s" elif args.get("supplier"): conditions += " and supplier=%(supplier)s" else: conditions += "and (customer is null or customer = '') and (supplier is null or supplier = '')" if args.get("transaction_date"): conditions += if args.get("posting_date"): conditions += return frappe.db.sql( .format( conditions=conditions ), args, )
494bd9ef78313436f0424b918f200dab8fc7c20b
12
get_item_details.py
181
style: format code with black
14,635
0
50
96
43
67,838
68
erpnext
10
erpnext/stock/get_item_details.py
Python
27
{ "docstring": "\n\tGet name, price_list_rate from Item Price based on conditions\n\t Check if the desired qty is within the increment of the packing list.\n\t:param args: dict (or frappe._dict) with mandatory fields price_list, uom\n\t optional fields transaction_date, customer, supplier\n\t:param item_code: str, Item Doctype field item_code\n\twhere item_code=%(item_code)s\n\t\tand price_list=%(price_list)s\n\t\tand ifnull(uom, '') in ('', %(uom)s) and %(transaction_date)s between\n\t\t\tifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31') and %(posting_date)s between\n\t\t\tifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31') select name, price_list_rate, uom\n\t\tfrom `tabItem Price` {conditions}\n\t\torder by valid_from desc, batch_no desc, uom desc ", "language": "en", "n_whitespaces": 91, "n_words": 86, "vocab_size": 66 }
https://github.com/frappe/erpnext.git
3
make_compound_path_from_polys
def make_compound_path_from_polys(cls, XY): # for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for # the CLOSEPOLY; the vert for the closepoly is ignored but we still # need it to keep the codes aligned with the vertices numpolys, numsides, two = XY.shape if two != 2: raise ValueError("The third dimension of 'XY' must be 2") stride = numsides + 1 nverts = numpolys * stride verts = np.zeros((nverts, 2)) codes = np.full(nverts, cls.LINETO, dtype=cls.code_type) codes[0::stride] = cls.MOVETO codes[numsides::stride] = cls.CLOSEPOLY for i in range(numsides): verts[i::stride] = XY[:, i] return cls(verts, codes)
b26040884de54145ae857a37a6737e110b2607c4
10
path.py
183
Cleanup make_compound_path_from_poly doc, example.
23,762
0
215
115
70
109,822
95
matplotlib
22
lib/matplotlib/path.py
Python
13
{ "docstring": "\n Make a compound `Path` object to draw a number of polygons with equal\n numbers of sides.\n\n .. plot:: gallery/misc/histogram_path.py\n\n Parameters\n ----------\n XY : (numpolys, numsides, 2) array\n ", "language": "en", "n_whitespaces": 77, "n_words": 27, "vocab_size": 25 }
https://github.com/matplotlib/matplotlib.git
4
_ip_int_from_string
def _ip_int_from_string(cls, ip_str): if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: return int.from_bytes(map(cls._parse_octet, octets), 'big') except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) from None
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
ipaddress.py
131
add python 3.10.4 for windows
55,382
0
130
76
37
218,550
44
XX-Net
13
python3.10.4/Lib/ipaddress.py
Python
10
{ "docstring": "Turn the given IP string into an integer for comparison.\n\n Args:\n ip_str: A string, the IP ip_str.\n\n Returns:\n The IP ip_str as an integer.\n\n Raises:\n AddressValueError: if ip_str isn't a valid IPv4 Address.\n\n ", "language": "en", "n_whitespaces": 94, "n_words": 33, "vocab_size": 28 }
https://github.com/XX-net/XX-Net.git
2
user_can_authenticate
def user_can_authenticate(self, user): is_active = getattr(user, "is_active", None) return is_active or is_active is None
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
backends.py
40
Refs #33476 -- Reformatted code with Black.
50,472
0
35
24
12
203,605
14
django
5
django/contrib/auth/backends.py
Python
3
{ "docstring": "\n Reject users with is_active=False. Custom user models that don't have\n that attribute are allowed.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/django/django.git
20
get_tokens_unprocessed
def get_tokens_unprocessed(self, text=None, context=None): tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: yield from action(self, m, ctx) if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(ctx.stack) > 1: ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # see RegexLexer for why this check is made if abs(new_state) >= len(ctx.stack): del ctx.state[1:] else: del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, '\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break
f3166e673fe8d40277b804d35d77dcdb760fc3b3
24
lexer.py
609
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,369
0
1,512
373
108
20,440
193
pipenv
30
pipenv/patched/notpip/_vendor/pygments/lexer.py
Python
56
{ "docstring": "\n Split ``text`` into (tokentype, text) pairs.\n If ``context`` is given, use this lexer context instead.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/pypa/pipenv.git
6
update_runtime_cls
def update_runtime_cls(args, copy=False) -> 'Namespace': _args = deepcopy(args) if copy else args gateway_runtime_dict = { GatewayProtocolType.GRPC: 'GRPCGatewayRuntime', GatewayProtocolType.WEBSOCKET: 'WebSocketGatewayRuntime', GatewayProtocolType.HTTP: 'HTTPGatewayRuntime', } if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses): _hub_args = deepcopy(_args) _hub_args.uri = _args.uses _hub_args.no_usage = True _args.uses = HubIO(_hub_args).pull() if hasattr(_args, 'protocol'): _args.runtime_cls = gateway_runtime_dict[_args.protocol] if _args.pea_role == PeaRoleType.HEAD: _args.runtime_cls = 'HeadRuntime' return _args
933415bfa1f9eb89f935037014dfed816eb9815d
12
helper.py
204
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
1,737
0
142
122
40
9,864
55
jina
23
jina/peapods/peas/helper.py
Python
23
{ "docstring": "Get runtime_cls as a string from args\n\n :param args: pea/pod namespace args\n :param copy: True if args shouldn't be modified in-place\n :return: runtime class as a string\n ", "language": "en", "n_whitespaces": 39, "n_words": 27, "vocab_size": 21 }
https://github.com/jina-ai/jina.git
4
set_thetalim
def set_thetalim(self, *args, **kwargs): r orig_lim = self.get_xlim() # in radians if 'thetamin' in kwargs: kwargs['xmin'] = np.deg2rad(kwargs.pop('thetamin')) if 'thetamax' in kwargs: kwargs['xmax'] = np.deg2rad(kwargs.pop('thetamax')) new_min, new_max = self.set_xlim(*args, **kwargs) # Parsing all permutations of *args, **kwargs is tricky; it is simpler # to let set_xlim() do it and then validate the limits. if abs(new_max - new_min) > 2 * np.pi: self.set_xlim(orig_lim) # un-accept the change raise ValueError("The angle range must be less than a full circle") return tuple(np.rad2deg((new_min, new_max)))
162bd59f50a9c59a51574aee5dc9e932133bb971
13
polar.py
200
DOC: More capitalization of Axes In line with #18726. Triggered by #22242.
22,680
0
188
115
65
107,297
80
matplotlib
17
lib/matplotlib/projections/polar.py
Python
26
{ "docstring": "\n Set the minimum and maximum theta values.\n\n Can take the following signatures:\n\n - ``set_thetalim(minval, maxval)``: Set the limits in radians.\n - ``set_thetalim(thetamin=minval, thetamax=maxval)``: Set the limits\n in degrees.\n\n where minval and maxval are the minimum and maximum limits. Values are\n wrapped in to the range :math:`[0, 2\\pi]` (in radians), so for example\n it is possible to do ``set_thetalim(-np.pi / 2, np.pi / 2)`` to have\n an axis symmetric around 0. A ValueError is raised if the absolute\n angle difference is larger than a full circle.\n ", "language": "en", "n_whitespaces": 165, "n_words": 85, "vocab_size": 63 }
https://github.com/matplotlib/matplotlib.git
1
get_tables
def get_tables(self) -> Response: result = {} result['data_frame'] = pd.DataFrame([self.connection_data.get('collection')]) df = result.data_frame result.data_frame = df.rename(columns={df.columns[0]: 'table_name'}) return result
eb78c9016971609746d063b135e76dc01898d821
13
solr_handler.py
103
Added Solr Handler #2740
25,838
0
61
60
14
116,815
19
mindsdb
12
mindsdb/integrations/handlers/solr_handler/solr_handler.py
Python
9
{ "docstring": "\n Get a list with all of the tables in Solr\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/mindsdb/mindsdb.git
10
attach
def attach(self, filename=None, content=None, mimetype=None): if isinstance(filename, MIMEBase): if content is not None or mimetype is not None: raise ValueError( "content and mimetype must not be given when a MIMEBase " "instance is provided." ) self.attachments.append(filename) elif content is None: raise ValueError("content must be provided.") else: mimetype = ( mimetype or mimetypes.guess_type(filename)[0] or DEFAULT_ATTACHMENT_MIME_TYPE ) basetype, subtype = mimetype.split("/", 1) if basetype == "text": if isinstance(content, bytes): try: content = content.decode() except UnicodeDecodeError: # If mimetype suggests the file is text but it's # actually binary, read() raises a UnicodeDecodeError. mimetype = DEFAULT_ATTACHMENT_MIME_TYPE self.attachments.append((filename, content, mimetype))
9c19aff7c7561e3a82978a272ecdaad40dda5c00
17
message.py
221
Refs #33476 -- Reformatted code with Black.
50,791
0
470
134
67
204,574
96
django
19
django/core/mail/message.py
Python
24
{ "docstring": "\n Attach a file with the given filename and content. The filename can\n be omitted and the mimetype is guessed, if not provided.\n\n If the first parameter is a MIMEBase subclass, insert it directly\n into the resulting message attachments.\n\n For a text/* mimetype (guessed or specified), when a bytes object is\n specified as content, decode it as UTF-8. If that fails, set the\n mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.\n ", "language": "en", "n_whitespaces": 127, "n_words": 70, "vocab_size": 50 }
https://github.com/django/django.git
2
safestring_in_template_exception
def safestring_in_template_exception(request): template = Template('{% extends "<script>alert(1);</script>" %}') try: template.render(Context()) except Exception: return technical_500_response(request, *sys.exc_info())
c5c7a15b09368a58340d3a65ba9d1f1441e92dc8
13
views.py
67
Fixed #33461 -- Escaped template errors in the technical 500 debug page.
50,242
0
41
37
15
203,167
15
django
10
tests/view_tests/views.py
Python
6
{ "docstring": "\n Trigger an exception in the template machinery which causes a SafeString\n to be inserted as args[0] of the Exception.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 18 }
https://github.com/django/django.git
1
test_fillna_frame
def test_fillna_frame(self): super().test_fillna_frame() unhashable = pytest.mark.xfail(reason="Unhashable")
24652cf178c12562585639cba39c46d62b95f107
9
test_json.py
47
TST: Convert skip -> xfail (#46427)
39,706
0
19
13
6
165,750
6
pandas
8
pandas/tests/extension/json/test_json.py
Python
2
{ "docstring": "We treat dictionaries as a mapping in fillna, not a scalar.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/pandas-dev/pandas.git
1
test_import_missing_required
def test_import_missing_required(self): # Set one of our CustomFields to required CustomField.objects.filter(name='text').update(required=True) form_data = { 'name': 'Site 1', 'slug': 'site-1', } form = SiteImportForm(data=form_data) self.assertFalse(form.is_valid()) self.assertIn('cf_text', form.errors)
23c0ca456f2753dc35aa2717f69cdc62b405a233
11
test_customfields.py
115
#4347: Rename NetBoxModelCSVForm to NetBoxModelImportForm
78,301
0
104
63
25
266,134
26
netbox
16
netbox/extras/tests/test_customfields.py
Python
9
{ "docstring": "\n Attempt to import an object missing a required custom field.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/netbox-community/netbox.git
1
disable_constraint_checking
def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute("SET foreign_key_checks=0") return True
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
base.py
46
Refs #33476 -- Reformatted code with Black.
50,989
0
42
23
10
204,952
10
django
4
django/db/backends/mysql/base.py
Python
4
{ "docstring": "\n Disable foreign key checks, primarily for use in adding rows with\n forward references. Always return True to indicate constraint checks\n need to be re-enabled.\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 23 }
https://github.com/django/django.git
2
_set_mouse_bindings
def _set_mouse_bindings(self) -> None: logger.debug("Binding mouse events") if system() == "Linux": self._canvas.tag_bind(self._canvas.image_id, "<Button-4>", self._on_bound_zoom) self._canvas.tag_bind(self._canvas.image_id, "<Button-5>", self._on_bound_zoom) else: self._canvas.tag_bind(self._canvas.image_id, "<MouseWheel>", self._on_bound_zoom) self._canvas.tag_bind(self._canvas.image_id, "<Button-1>", self._on_mouse_click) self._canvas.tag_bind(self._canvas.image_id, "<B1-Motion>", self._on_mouse_drag) logger.debug("Bound mouse events")
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
12
preview_tk.py
198
Training - Use custom preview pop-out
20,985
0
112
119
22
101,575
30
faceswap
11
lib/training/preview_tk.py
Python
15
{ "docstring": " Set the mouse bindings for interacting with the preview image\n\n Mousewheel: Zoom in and out\n Mouse click: Move image\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 17 }
https://github.com/deepfakes/faceswap.git
3
sections
def sections(self) -> List[str]: return sorted(set(plugin.split(".")[0] for plugin in self._config.config.sections() if plugin.split(".")[0] != "writer"))
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
15
preview.py
87
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,856
0
53
51
14
101,443
14
faceswap
10
tools/preview/preview.py
Python
4
{ "docstring": " list: The sorted section names that exist within the convert Configuration options. ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 12 }
https://github.com/deepfakes/faceswap.git
1
test_parse_dockerfile_label
def test_parse_dockerfile_label(self): mock_dockerfile = assert "airbyte/source-salesforce" == parse_dockerfile_repository_label(mock_dockerfile)
fa22d32e146dff62a4be881c12e2f611e556abca
8
build_report.py
29
Filter daily build report to only GA & Beta connectors (#12684)
711
0
30
15
8
5,043
8
airbyte
4
tools/bin/build_report.py
Python
7
{ "docstring": "\nENTRYPOINT [\"python\", \"/airbyte/integration_code/main.py\"]\n\nLABEL io.airbyte.version=1.0.8\nLABEL io.airbyte.name=airbyte/source-salesforce", "language": "en", "n_whitespaces": 4, "n_words": 7, "vocab_size": 6 }
https://github.com/airbytehq/airbyte.git
6
_fetch_next_result
def _fetch_next_result(self) -> Optional[List[Dict]]: while True: results = ray.get(self._backend_executor_actor.get_next_results.remote()) if results is None: return None first_result = results[0] result_type = first_result.type if result_type is TrainingResultType.REPORT: result_data = [self._backend.decode_data(r.data) for r in results] return result_data elif result_type is TrainingResultType.CHECKPOINT: self._checkpoint_manager._process_checkpoint( results, decode_checkpoint_fn=self._backend.decode_data ) # Iterate until next REPORT call or training has finished. else: raise TrainBackendError( f"Unexpected result type: " f"{result_type}. " f"Expected one of " f"{[type in TrainingResultType]}" )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
17
trainer.py
198
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,641
0
375
115
54
132,035
69
ray
26
python/ray/train/trainer.py
Python
31
{ "docstring": "Fetch next results produced by ``train.report()`` from each worker.\n\n Assumes ``start_training`` has already been called.\n\n Returns:\n A list of dictionaries of values passed to ``train.report()`` from\n each worker. Each item corresponds to an intermediate result\n a single worker. If there are no more items to fetch,\n returns None.\n ", "language": "en", "n_whitespaces": 125, "n_words": 48, "vocab_size": 40 }
https://github.com/ray-project/ray.git
3
requires
def requires(self, extras=()): dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
14
__init__.py
113
upd; format
13,115
0
162
69
32
63,049
34
transferlearning
12
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
Python
13
{ "docstring": "List of Requirements needed for this distro if `extras` are used", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/jindongwang/transferlearning.git
1
upgrade
def upgrade(): with op.get_context().autocommit_block(): op.execute( ) op.execute( ) op.execute( ) op.execute( )
b5b3d808bf059294a7adf17156e4ccdb5a3799da
11
2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py
77
Add index migrations
11,462
0
118
39
6
56,199
12
prefect
5
src/prefect/orion/database/migrations/versions/postgresql/2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py
Python
30
{ "docstring": "\n CREATE INDEX CONCURRENTLY \n trgm_ix_flow_name \n ON flow USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_flow_run_name \n ON flow_run USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_task_run_name \n ON task_run USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_deployment_name \n ON deployment USING gin (name gin_trgm_ops);\n ", "language": "en", "n_whitespaces": 228, "n_words": 40, "vocab_size": 16 }
https://github.com/PrefectHQ/prefect.git
9
_in_unstable_openblas_configuration
def _in_unstable_openblas_configuration(): # Import libraries which might load OpenBLAS. import numpy # noqa import scipy # noqa modules_info = threadpool_info() open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info) if not open_blas_used: return False # OpenBLAS 0.3.16 fixed unstability for arm64, see: # https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58 # noqa openblas_arm64_stable_version = parse_version("0.3.16") for info in modules_info: if info["internal_api"] != "openblas": continue openblas_version = info.get("version") openblas_architecture = info.get("architecture") if openblas_version is None or openblas_architecture is None: # Cannot be sure that OpenBLAS is good enough. Assume unstable: return True if ( openblas_architecture == "neoversen1" and parse_version(openblas_version) < openblas_arm64_stable_version ): # See discussions in https://github.com/numpy/numpy/issues/19411 return True return False
6a16763c008446953ed9380e084ed70a285a9f7e
12
__init__.py
186
ENH Introduce `PairwiseDistancesReduction` and `PairwiseDistancesArgKmin` (feature branch) (#22134) * MAINT Introduce Pairwise Distances Reductions private submodule (#22064) * MAINT Introduce FastEuclideanPairwiseArgKmin (#22065) * fixup! Merge branch 'main' into pairwise-distances-argkmin Remove duplicated Bunch * MAINT Plug `PairwiseDistancesArgKmin` as a back-end (#22288) * Forward pairwise_dist_chunk_size in the configuration * Flip finalized results for PairwiseDistancesArgKmin The previous would have made the code more complex by introducing some boilerplate for the interface plugs. Having it this way actually simplifies the code. This also removes the haversine branch for test_pairwise_distances_argkmin * Plug PairwiseDistancesArgKmin as a back-end * Adapt test accordingly * Add whats_new entry * Change input validation order for kneighbors * Remove duplicated test_neighbors_distance_metric_deprecation * Adapt the documentation * Add mahalanobis case to test fixtures * Correct whats_new entry * CLN Remove unneeded private metric attribute This was needed when 'fast_sqeuclidean' and 'fast_euclidean' were present to choose the best implementation based on the user specification. Those metric have been removed since then, making this attribute useless. * TST Assert FutureWarning instead of DeprecationWarning in test_neighbors_metrics * MAINT Add use_pairwise_dist_activate to scikit-learn config * TST Add a test for the 'brute' backends' results' consistency Co-authored-by: Thomas J. Fan <[email protected]> * fixup! MAINT Add use_pairwise_dist_activate to scikit-learn config * fixup! fixup! MAINT Add use_pairwise_dist_activate to scikit-learn config * TST Filter FutureWarning for WMinkowskiDistance * MAINT pin numpydoc in arm for now (#22292) * fixup! TST Filter FutureWarning for WMinkowskiDistance * Revert keywords arguments removal for the GEMM trick for 'euclidean' * MAINT pin max numpydoc for now (#22286) * Add 'haversine' to CDIST_PAIRWISE_DISTANCES_REDUCTION_COMMON_METRICS * fixup! Add 'haversine' to CDIST_PAIRWISE_DISTANCES_REDUCTION_COMMON_METRICS * Apply suggestions from code review * MAINT Document some config parameters for maintenance Also rename one of them. * FIX Support and test one of 'sqeuclidean' specification Co-authored-by: Olivier Grisel <[email protected]> * FIX Various typos fix and correct haversine 'haversine' is not supported by cdist. * Directly use get_config * CLN Apply comments from review * Motivate swapped returned values * TST Remove mahalanobis from test fixtures * MNT Add comment regaduction functions' signatures * TST Complete test for `pairwise_distance_{argmin,argmin_min}` (#22371) * DOC Add sub-pull requests to the whats_new entry * DOC place comment inside functions * DOC move up whatsnew entry Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Christian Lorentzen <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
75,509
0
268
100
69
258,990
104
scikit-learn
13
sklearn/utils/__init__.py
Python
21
{ "docstring": "Return True if in an unstable configuration for OpenBLAS", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/scikit-learn/scikit-learn.git
1
test_result_list_set_empty_value_display_in_model_admin
def test_result_list_set_empty_value_display_in_model_admin(self): new_child = Child.objects.create(name="name", parent=None) request = self.factory.get("/child/") request.user = self.superuser m = EmptyValueChildAdmin(Child, admin.site) cl = m.get_changelist_instance(request) cl.formset = None template = Template( "{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}" ) context = Context({"cl": cl, "opts": Child._meta}) table_output = template.render(context) link = reverse("admin:admin_changelist_child_change", args=(new_child.id,)) row_html = build_tbody_html( new_child.id, link, '<td class="field-age_display">&amp;dagger;</td>' '<td class="field-age">-empty-</td>', ) self.assertNotEqual( table_output.find(row_html), -1, "Failed to find expected row element: %s" % table_output, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
235
Refs #33476 -- Reformatted code with Black.
51,813
0
273
140
57
206,964
73
django
35
tests/admin_changelist/tests.py
Python
24
{ "docstring": "\n Empty value display can be set in ModelAdmin or individual fields.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/django/django.git
5
get_stacktrace_processing_task
def get_stacktrace_processing_task(infos, processors): by_processor = {} to_lookup = {} # by_stacktrace_info requires stable sorting as it is used in # StacktraceProcessingTask.iter_processable_stacktraces. This is important # to guarantee reproducible symbolicator requests. by_stacktrace_info = {} for info in infos: processable_frames = get_processable_frames(info, processors) for processable_frame in processable_frames: processable_frame.processor.preprocess_frame(processable_frame) by_processor.setdefault(processable_frame.processor, []).append(processable_frame) by_stacktrace_info.setdefault(processable_frame.stacktrace_info, []).append( processable_frame ) if processable_frame.cache_key is not None: to_lookup[processable_frame.cache_key] = processable_frame frame_cache = lookup_frame_cache(to_lookup) for cache_key, processable_frame in to_lookup.items(): processable_frame.cache_value = frame_cache.get(cache_key) return StacktraceProcessingTask( processable_stacktraces=by_stacktrace_info, processors=by_processor )
286bf2ae7ecfdd6698d8fb1cd4753f107159d4d2
14
processing.py
211
ref: use dict instead of OrderedDict since sentry is >python3.6 (#39695) partially automated (especially the fixtures) also via `\(([^]+), (.*)\),$` -> `\1: \2,`
18,122
0
225
132
54
86,534
76
sentry
23
src/sentry/stacktraces/processing.py
Python
20
{ "docstring": "Returns a list of all tasks for the processors. This can skip over\n processors that seem to not handle any frames.\n ", "language": "en", "n_whitespaces": 28, "n_words": 21, "vocab_size": 21 }
https://github.com/getsentry/sentry.git
3
dup_chebyshevu
def dup_chebyshevu(n, K): if n < 1: return [K.one] m2, m1 = [K.one], [K(2), K.zero] for i in range(2, n+1): m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K) return m1 @public
3d30d00c37371f142e6a0e9dc5058752d8c9d401
@public
15
orthopolys.py
129
Restore domain elements in dup_* functions
49,373
1
62
84
27
199,717
34
sympy
13
sympy/polys/orthopolys.py
Python
7
{ "docstring": "Low-level implementation of Chebyshev polynomials of the second kind.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/sympy/sympy.git
1
test_media_from_actions_form
def test_media_from_actions_form(self): response = self.client.get(reverse("admin:admin_views_subscriber_changelist")) media_path = MediaActionForm.Media.js[0] self.assertIsInstance(response.context["action_form"], MediaActionForm) self.assertIn("media", response.context) self.assertIn(media_path, response.context["media"]._js) self.assertContains(response, media_path)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
test_actions.py
125
Refs #33476 -- Reformatted code with Black.
51,978
0
65
75
15
207,487
16
django
15
tests/admin_views/test_actions.py
Python
7
{ "docstring": "\n The action form's media is included in the changelist view's media.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/django/django.git
3
create_cgroup
def create_cgroup(self, name): if isinstance(name, str): name = name.encode() node = Node(name, parent=self) if node in self.children: raise RuntimeError('Node {} already exists under {}'.format(name, self.path)) fp = os.path.join(self.full_path, name) os.mkdir(fp) self.children.append(node) return node
68aa01936c37f9b03468cc83e3f32766ae0ba5cb
12
nodes.py
135
Add cgroupspy to _vendor folder (#22206) This is just importing existing cgroupspy library without code modifications. In the next step we will modify the cgroupspy code to work from the new location, then we will fix it to implement Python 3.10 compatibility and finally we will change airflow to use the vendored package instead of the original package. This is part of the effort needed to implement Python 3.10 compatibility: #22050
8,836
0
111
84
28
46,269
33
airflow
19
airflow/_vendor/cgroupspy/nodes.py
Python
10
{ "docstring": "\n Create a cgroup by name and attach it under this node.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/apache/airflow.git
2
_get_all_child_nodes
def _get_all_child_nodes(self) -> Set["DAGNode"]: scanner = _PyObjScanner() children = set() for n in scanner.find_nodes( [ self._bound_args, self._bound_kwargs, self._bound_other_args_to_resolve, ] ): children.add(n) return children
54a71e6c4f430f210685e3b1bccef8051dd1260c
9
dag_node.py
85
[Ray DAG] Add `execute()` interface to take user inputs with ENTRY_POINT tag. (#22196) ## Diff Summary Current implementation of DAGNode pre-bind inputs and the signature of `def execute(self)` doesn't take user input yet. This PR extends the interface to take user input, mark DAG entrypoint methods as first stop of all user requests in a DAG. It's needed to unblock next step serve pipeline implementation to serve user requests. Closes #22196 #22197 Notable changes: - Added a `DAG_ENTRY_POINT` flag in ray dag API to annotate DAG entrypoint functions. Function or class method only. All marked functions will receive identical input from user as first layer of DAG. - Changed implementations of ClassNode and FunctionNode accordingly to handle different execution for a node marked as entrypoint or not. - Added a `kwargs_to_resolve` kwarg in the interface of `DAGNode` to handle args that sub-classes need to use to resolve it's implementation without exposing all terms to parent class level. - This is particularly important for ClassMethodNode binding, so we can have implementations to track method name, parent ClassNode as well as previous class method call without existiting - Changed implementation of `_copy()` to handle execution of `kwargs_to_resolve`. - Changed implementation of `_apply_and_replace_all_child_nodes()` to fetch DAGNode type in `kwargs_to_resolve`. - Added pretty printed lines for `kwargs_to_resolve`
33,256
0
143
52
21
144,569
23
ray
13
python/ray/experimental/dag/dag_node.py
Python
19
{ "docstring": "Return the set of nodes referenced by the args, kwargs, and\n args_to_resolve in current node, even they're deeply nested.\n\n Examples:\n f.remote(a, [b]) -> set(a, b)\n f.remote(a, [b], key={\"nested\": [c]}) -> set(a, b, c)\n ", "language": "en", "n_whitespaces": 76, "n_words": 33, "vocab_size": 29 }
https://github.com/ray-project/ray.git
1
get_feature_names_out
def get_feature_names_out(self, input_features=None): input_features = _check_feature_names_in(self, input_features) return self._encoder.get_feature_names_out(input_features)
279388d9ed2ea83194dd45a2d78161be30b43aa7
8
_discretization.py
44
DOC Improve get_feature_names_out docstrings (#22718) Co-authored-by: Thomas J. Fan <[email protected]>
75,576
0
30
27
9
259,117
9
scikit-learn
5
sklearn/preprocessing/_discretization.py
Python
3
{ "docstring": "Get output feature names.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n ", "language": "en", "n_whitespaces": 219, "n_words": 74, "vocab_size": 51 }
https://github.com/scikit-learn/scikit-learn.git
1
test_orders_without_channel
def test_orders_without_channel(staff_api_client, permission_manage_orders, orders): query = staff_api_client.user.user_permissions.add(permission_manage_orders) response = staff_api_client.post_graphql(query) edges = get_graphql_content(response)["data"]["orders"]["edges"] assert len(edges) == Order.objects.non_draft().count()
9effd5aec81acbdd2a1076c1d72bbee1afcc65a1
11
test_order.py
105
restructure order app tests (#11226)
5,253
0
32
62
15
29,670
17
saleor
17
saleor/graphql/order/tests/queries/test_order.py
Python
16
{ "docstring": "\n query OrdersQuery {\n orders(first: 10) {\n edges {\n node {\n id\n }\n }\n }\n }\n ", "language": "en", "n_whitespaces": 110, "n_words": 15, "vocab_size": 9 }
https://github.com/saleor/saleor.git
1
test_basic_add_GET
def test_basic_add_GET(self): add_url = reverse("admin_custom_urls:admin_custom_urls_action_add") self.assertTrue(add_url.endswith("/!add/")) response = self.client.get(add_url) self.assertIsInstance(response, TemplateResponse) self.assertEqual(response.status_code, 200)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
tests.py
88
Refs #33476 -- Reformatted code with Black.
51,855
0
55
51
12
207,064
13
django
13
tests/admin_custom_urls/tests.py
Python
6
{ "docstring": "\n Ensure GET on the add_view works.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/django/django.git
2
shutdown
def shutdown(self): for deployment_state in self._deployment_states.values(): deployment_state.delete() # TODO(jiaodong): This might not be 100% safe since we deleted # everything without ensuring all shutdown goals are completed # yet. Need to address in follow-up PRs. self._kv_store.delete(CHECKPOINT_KEY) # TODO(jiaodong): Need to add some logic to prevent new replicas # from being created once shutdown signal is sent.
48adb6f7bb335b28fb0fb0d1190bd6c5dfc8ddfa
9
deployment_state.py
57
[serve] Introduce DeploymentStatus, poll for statuses instead of using async goals (#22121)
33,278
0
123
30
46
144,664
56
ray
8
python/ray/serve/deployment_state.py
Python
4
{ "docstring": "\n Shutdown all running replicas by notifying the controller, and leave\n it to the controller event loop to take actions afterwards.\n\n Once shutdown signal is received, it will also prevent any new\n deployments or replicas from being created.\n\n One can send multiple shutdown signals but won't effectively make any\n difference compare to calling it once.\n ", "language": "en", "n_whitespaces": 104, "n_words": 54, "vocab_size": 46 }
https://github.com/ray-project/ray.git
2
from_spmatrix
def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame: from pandas._libs.sparse import IntIndex from pandas import DataFrame data = data.tocsc() index, columns = cls._prep_index(data, index, columns) n_rows, n_columns = data.shape # We need to make sure indices are sorted, as we create # IntIndex with no input validation (i.e. check_integrity=False ). # Indices may already be sorted in scipy in which case this adds # a small overhead. data.sort_indices() indices = data.indices indptr = data.indptr array_data = data.data dtype = SparseDtype(array_data.dtype, 0) arrays = [] for i in range(n_columns): sl = slice(indptr[i], indptr[i + 1]) idx = IntIndex(n_rows, indices[sl], check_integrity=False) arr = SparseArray._simple_new(array_data[sl], idx, dtype) arrays.append(arr) return DataFrame._from_arrays( arrays, columns=columns, index=index, verify_integrity=False )
f65417656ba8c59438d832b6e2a431f78d40c21c
12
accessor.py
261
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
40,093
0
299
171
91
167,731
111
pandas
34
pandas/core/arrays/sparse/accessor.py
Python
49
{ "docstring": "\n Create a new DataFrame from a scipy sparse matrix.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n data : scipy.sparse.spmatrix\n Must be convertible to csc format.\n index, columns : Index, optional\n Row and column labels to use for the resulting DataFrame.\n Defaults to a RangeIndex.\n\n Returns\n -------\n DataFrame\n Each column of the DataFrame is stored as a\n :class:`arrays.SparseArray`.\n\n Examples\n --------\n >>> import scipy.sparse\n >>> mat = scipy.sparse.eye(3)\n >>> pd.DataFrame.sparse.from_spmatrix(mat)\n 0 1 2\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n ", "language": "en", "n_whitespaces": 290, "n_words": 81, "vocab_size": 59 }
https://github.com/pandas-dev/pandas.git
3
has_no_leverage
def has_no_leverage(self) -> bool: return ((self.leverage == 1.0 or self.leverage is None) and not self.is_short)
b58e811b1486ae62e835cbea3e40cf88128243a0
11
trade_model.py
48
Move trade/order Models to their own class
34,502
0
29
31
15
149,719
15
freqtrade
5
freqtrade/persistence/trade_model.py
Python
3
{ "docstring": "Returns true if this is a non-leverage, non-short trade", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/freqtrade/freqtrade.git
1
test_limited_filter
def test_limited_filter(self): response = self.client.get(reverse("admin:admin_views_thing_changelist")) self.assertContains( response, '<div id="changelist-filter">', msg_prefix="Expected filter not found in changelist view", ) self.assertNotContains( response, '<a href="?color__id__exact=3">Blue</a>', msg_prefix="Changelist filter not correctly limited by limit_choices_to", )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
80
Refs #33476 -- Reformatted code with Black.
52,009
0
137
45
25
207,583
29
django
9
tests/admin_views/tests.py
Python
12
{ "docstring": "Ensure admin changelist filters do not contain objects excluded via limit_choices_to.\n This also tests relation-spanning filters (e.g. 'color__value').\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 17 }
https://github.com/django/django.git
3
new_locator
def new_locator(self, nx, ny, nx1=None, ny1=None): return AxesLocator( self, nx, ny, nx1 if nx1 is not None else nx + 1, ny1 if ny1 is not None else ny + 1)
df6f95703b60348e01603f98a439b133da2938a0
9
axes_divider.py
67
Improve mpl_toolkit documentation
23,789
0
78
49
21
109,880
31
matplotlib
7
lib/mpl_toolkits/axes_grid1/axes_divider.py
Python
5
{ "docstring": "\n Return a new `.AxesLocator` for the specified cell.\n\n Parameters\n ----------\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise, location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ny, ny1 : int\n Same as *nx* and *nx1*, but for row positions.\n ", "language": "en", "n_whitespaces": 157, "n_words": 59, "vocab_size": 46 }
https://github.com/matplotlib/matplotlib.git
5
random_port
def random_port() -> Optional[int]: import threading import multiprocessing from contextlib import closing import socket
933415bfa1f9eb89f935037014dfed816eb9815d
6
helper.py
38
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
1,713
0
29
131
11
9,827
14
jina
8
jina/helper.py
Python
28
{ "docstring": "\n Get a random available port number from '49153' to '65535'.\n\n :return: A random port.\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 13 }
https://github.com/jina-ai/jina.git
1
local_devices_fixture
def local_devices_fixture(): return json.loads(load_fixture("awair/local_devices.json")) @pytest.fixture(name="gen1_data", scope="session")
ebbff7b60e43f17d65ead811d314602b9daddfc4
@pytest.fixture(name="gen1_data", scope="session")
10
conftest.py
54
Add Awair Local API support (#75535)
102,572
1
11
15
6
303,763
6
core
8
tests/components/awair/conftest.py
Python
2
{ "docstring": "Fixture representing devices returned by Awair local API.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
in_test_phase
def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) # NN OPERATIONS @keras_export("keras.backend.relu") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.relu") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
8
backend.py
65
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,162
1
16
23
14
269,534
14
keras
12
keras/backend.py
Python
2
{ "docstring": "Selects `x` in test phase, and `alt` otherwise.\n\n Note that `alt` should have the *same shape* as `x`.\n\n Args:\n x: What to return in test phase\n (tensor or callable that returns a tensor).\n alt: What to return otherwise\n (tensor or callable that returns a tensor).\n training: Optional scalar tensor\n (or Python boolean, or Python integer)\n specifying the learning phase.\n\n Returns:\n Either `x` or `alt` based on `K.learning_phase`.\n ", "language": "en", "n_whitespaces": 151, "n_words": 67, "vocab_size": 47 }
https://github.com/keras-team/keras.git
1
parse
def parse(self, stream, *args, **kwargs): self._parse(stream, False, None, *args, **kwargs) return self.tree.getDocument()
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
8
html5parser.py
58
upd; format
12,985
0
33
38
11
62,541
12
transferlearning
8
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/html5parser.py
Python
3
{ "docstring": "Parse a HTML document into a well-formed tree\n\n :arg stream: a file-like object or string containing the HTML to be parsed\n\n The optional encoding parameter must be a string that indicates\n the encoding. If specified, that encoding will be used,\n regardless of any BOM or later declaration (such as in a meta\n element).\n\n :arg scripting: treat noscript elements as if JavaScript was turned on\n\n :returns: parsed tree\n\n Example:\n\n >>> from html5lib.html5parser import HTMLParser\n >>> parser = HTMLParser()\n >>> parser.parse('<html><body><p>This is a doc</p></body></html>')\n <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>\n\n ", "language": "en", "n_whitespaces": 194, "n_words": 86, "vocab_size": 67 }
https://github.com/jindongwang/transferlearning.git
4
numeric_assortativity_coefficient
def numeric_assortativity_coefficient(G, attribute, nodes=None): if nodes is None: nodes = G.nodes vals = {G.nodes[n][attribute] for n in nodes} mapping = {d: i for i, d, in enumerate(vals)} M = attribute_mixing_matrix(G, attribute, nodes, mapping) return _numeric_ac(M, mapping)
34d9d630bb02426d297d3e20fedb7da8c3ced03a
10
correlation.py
111
MAINT: Cleanup assortativity module, remove unused variables (#5301) Remove unused variables, sort imports, raise errors instead of accepting invalid arguments silently Co-authored-by: Dan Schult <[email protected]>
41,834
0
61
75
28
176,320
36
networkx
13
networkx/algorithms/assortativity/correlation.py
Python
7
{ "docstring": "Compute assortativity for numerical node attributes.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the given numeric attribute.\n\n Parameters\n ----------\n G : NetworkX graph\n\n attribute : string\n Node attribute key.\n\n nodes: list or iterable (optional)\n Compute numeric assortativity only for attributes of nodes in\n container. The default is all nodes.\n\n Returns\n -------\n r: float\n Assortativity of graph for given attribute\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_nodes_from([0, 1], size=2)\n >>> G.add_nodes_from([2, 3], size=3)\n >>> G.add_edges_from([(0, 1), (2, 3)])\n >>> print(nx.numeric_assortativity_coefficient(G, \"size\"))\n 1.0\n\n Notes\n -----\n This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation\n coefficient of the specified (scalar valued) attribute across edges.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks\n Physical Review E, 67 026126, 2003\n ", "language": "en", "n_whitespaces": 244, "n_words": 129, "vocab_size": 99 }
https://github.com/networkx/networkx.git
3
pretty_duration
def pretty_duration(hours): seconds = int(3600 * hours) days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) if days > 0: return "%dd %dh %dm" % (days, hours, minutes) if hours > 0: return "%dh %dm" % (hours, minutes) return "%dm" % minutes
73a368c24246b081cdb98923ca3180937d436c3b
9
helpers.py
123
Refactor history_stats to minimize database access (part 2) (#70255)
95,828
0
126
76
30
296,854
48
core
7
homeassistant/components/history_stats/helpers.py
Python
10
{ "docstring": "Format a duration in days, hours, minutes, seconds.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
2
_normalize_entries
def _normalize_entries(entries, separators=None): norm_files = {} for entry in entries: norm_files[normalize_file(entry.path, separators=separators)] = entry return norm_files
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
12
util.py
57
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,206
0
35
36
13
130,281
16
ray
7
python/ray/_private/thirdparty/pathspec/util.py
Python
5
{ "docstring": "\n Normalizes the entry paths to use the POSIX path separator.\n\n *entries* (:class:`~collections.abc.Iterable` of :class:`.TreeEntry`)\n contains the entries to be normalized.\n\n *separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n :data:`None`) optionally contains the path separators to normalize.\n See :func:`normalize_file` for more information.\n\n Returns a :class:`dict` mapping the each normalized file path (:class:`str`)\n to the entry (:class:`.TreeEntry`)\n ", "language": "en", "n_whitespaces": 80, "n_words": 52, "vocab_size": 39 }
https://github.com/ray-project/ray.git
1
_connect_picklable
def _connect_picklable(self, signal, func): cid = self.connect(signal, func) self._pickled_cids.add(cid) return cid # Keep a reference to sys.is_finalizing, as sys may have been cleared out # at that point.
4caa5ba85c11cc499eb1a45372eb249173e6b1fd
8
__init__.py
50
Simplify the internal API to connect picklable callbacks. The API may be made public in the future, but right now the point is only to make its internal use more ergonomic as it can be helpful in some other internal places too (to be updated in a separate patch).
22,893
0
62
30
26
107,727
28
matplotlib
8
lib/matplotlib/cbook/__init__.py
Python
4
{ "docstring": "\n Like `.connect`, but the callback is kept when pickling/unpickling.\n\n Currently internal-use only.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
https://github.com/matplotlib/matplotlib.git
1
_get_win_folder_from_registry
def _get_win_folder_from_registry(csidl_name): import winreg as _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) dir, type = _winreg.QueryValueEx(key, shell_folder_name) return dir
47b5c8af57f0f927d50fe94f3474b09a54292553
10
appdirs.py
99
Standardize cache directory
7,372
0
91
56
29
40,277
32
seaborn
11
seaborn/external/appdirs.py
Python
13
{ "docstring": "This is a fallback technique at best. I'm not sure if using the\n registry for this guarantees us the correct answer for all CSIDL_*\n names.\n ", "language": "en", "n_whitespaces": 34, "n_words": 25, "vocab_size": 23 }
https://github.com/mwaskom/seaborn.git
4
getPrimeNumbers
def getPrimeNumbers(N): # precondition assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2" ans = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, N + 1): if isPrime(number): ans.append(number) # precondition assert isinstance(ans, list), "'ans' must been from type list" return ans # -----------------------------------------
f0af0c43340763724f139fa68aa1e5a9ffe458b4
11
primelib.py
102
refactor: clean code Signed-off-by: slowy07 <[email protected]>
4,442
0
115
60
54
22,751
68
Python
10
primelib/primelib.py
Python
8
{ "docstring": "\n input: positive integer 'N' > 2\n returns a list of prime numbers from 2 up to N (inclusive)\n This function is more efficient as function 'sieveEr(...)'\n ", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 24 }
https://github.com/geekcomputers/Python.git
1
onBootstrapBinary
def onBootstrapBinary(self, filename): # Virtual method, pylint: disable=no-self-use,unused-argument return None
68e62e62d6d1c92209dcc1c7b60c3b5e09bcf308
6
PluginBase.py
19
Plugins: Add support for upx compression of onefile tempdir Linux * This also avoids the padding done for Windows signing tool to be applied to other platforms, not needed there. * For UPX not preserving payload outside of Windows, we need to apply it before the payload gets attached on Linux with temp file mode. * Also do not do it AppImage, it hates that to happen, probably that is also a payload attachment problem.
42,707
0
31
10
10
178,479
10
Nuitka
3
nuitka/plugins/PluginBase.py
Python
2
{ "docstring": "Called after successfully creating a bootstrap binary, but without payload.\n\n Args:\n filename: the created bootstrap binary, will be modified later\n\n Returns:\n None\n ", "language": "en", "n_whitespaces": 65, "n_words": 22, "vocab_size": 20 }
https://github.com/Nuitka/Nuitka.git
1
FDistribution
def FDistribution(name, d1, d2): r return rv(name, FDistributionDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Fisher Z distribution --------------------------------------------------------
9ad8ab9fe58051cf11626ba6654852fcfec60147
8
crv_types.py
36
Documentation cleanup 5
48,098
0
19
24
16
196,680
16
sympy
6
sympy/stats/crv_types.py
Python
58
{ "docstring": "\n Create a continuous random variable with a F distribution.\n\n Explanation\n ===========\n\n The density of the F distribution is given by\n\n .. math::\n f(x) := \\frac{\\sqrt{\\frac{(d_1 x)^{d_1} d_2^{d_2}}\n {(d_1 x + d_2)^{d_1 + d_2}}}}\n {x \\mathrm{B} \\left(\\frac{d_1}{2}, \\frac{d_2}{2}\\right)}\n\n with :math:`x > 0`.\n\n Parameters\n ==========\n\n d1 : `d_1 > 0`, where `d_1` is the degrees of freedom (`n_1 - 1`)\n d2 : `d_2 > 0`, where `d_2` is the degrees of freedom (`n_2 - 1`)\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import FDistribution, density\n >>> from sympy import Symbol, pprint\n\n >>> d1 = Symbol(\"d1\", positive=True)\n >>> d2 = Symbol(\"d2\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = FDistribution(\"x\", d1, d2)\n\n >>> D = density(X)(z)\n >>> pprint(D, use_unicode=False)\n d2\n -- ______________________________\n 2 / d1 -d1 - d2\n d2 *\\/ (d1*z) *(d1*z + d2)\n --------------------------------------\n /d1 d2\\\n z*B|--, --|\n \\2 2 /\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/F-distribution\n .. [2] http://mathworld.wolfram.com/F-Distribution.html\n\n ", "language": "en", "n_whitespaces": 374, "n_words": 148, "vocab_size": 100 }
https://github.com/sympy/sympy.git
13
read_lines_to_outerboundary
def read_lines_to_outerboundary(self): next_boundary = b"--" + self.outerboundary last_boundary = next_boundary + b"--" delim = b"" last_line_lfend = True _read = 0 while 1: if self.limit is not None and 0 <= self.limit <= _read: break line = self.fp.readline(1<<16) # bytes self.bytes_read += len(line) _read += len(line) if not line: self.done = -1 break if delim == b"\r": line = delim + line delim = b"" if line.startswith(b"--") and last_line_lfend: strippedline = line.rstrip() if strippedline == next_boundary: break if strippedline == last_boundary: self.done = 1 break odelim = delim if line.endswith(b"\r\n"): delim = b"\r\n" line = line[:-2] last_line_lfend = True elif line.endswith(b"\n"): delim = b"\n" line = line[:-1] last_line_lfend = True elif line.endswith(b"\r"): # We may interrupt \r\n sequences if they span the 2**16 # byte boundary delim = b"\r" line = line[:-1] last_line_lfend = False else: delim = b"" last_line_lfend = False self.__write(odelim + line)
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
cgi.py
393
add python 3.10.4 for windows
56,312
0
709
230
69
221,275
145
XX-Net
21
python3.10.4/Lib/cgi.py
Python
42
{ "docstring": "Internal: read lines until outerboundary.\n Data is read as bytes: boundaries and line ends must be converted\n to bytes for comparisons.\n ", "language": "en", "n_whitespaces": 42, "n_words": 21, "vocab_size": 20 }
https://github.com/XX-net/XX-Net.git
3
suggested_unit_of_measurement
def suggested_unit_of_measurement(self) -> str | None: if hasattr(self, "_attr_suggested_unit_of_measurement"): return self._attr_suggested_unit_of_measurement if hasattr(self, "entity_description"): return self.entity_description.suggested_unit_of_measurement return None
6979cd95b0fe85c3ee8eca3dbc9881b8d05591e8
9
__init__.py
65
Add suggested_unit_of_measurement attribute to sensors (#80638) * Add suggested_unit_of_measurement attribute to sensors * Lazy calculation of initial entity options * Add type alias for entity options * Small tweak * Add tests * Store suggested_unit_of_measurement in its own option key * Adapt to renaming of IMPERIAL_SYSTEM * Fix rebase mistakes * Apply suggestions from code review Co-authored-by: epenet <[email protected]> Co-authored-by: epenet <[email protected]>
88,882
0
68
38
14
289,746
18
core
6
homeassistant/components/sensor/__init__.py
Python
22
{ "docstring": "Return the unit which should be used for the sensor's state.\n\n This can be used by integrations to override automatic unit conversion rules,\n for example to make a temperature sensor display in °C even if the configured\n unit system prefers °F.\n\n For sensors without a `unique_id`, this takes precedence over legacy\n temperature conversion rules only.\n\n For sensors with a `unique_id`, this is applied only if the unit is not set by the user,\n and takes precedence over automatic device-class conversion rules.\n\n Note:\n suggested_unit_of_measurement is stored in the entity registry the first time\n the entity is seen, and then never updated.\n ", "language": "en", "n_whitespaces": 185, "n_words": 100, "vocab_size": 65 }
https://github.com/home-assistant/core.git
2
manual_download_instructions
def manual_download_instructions(self): *processed, _ = self.config.name.split(".") return ( None if processed else )
eab78694e17f10c200bceb60c6f21a2f70eadf68
10
medical_dialog.py
55
Add configs with processed data to medical_dialog dataset (#4127) * Add configs with processed data in medical_dialog dataset * Update metadata JSON * Update dataset card * Rename dummy data dirs * Fix script
21,934
0
67
28
13
104,693
13
datasets
7
datasets/medical_dialog/medical_dialog.py
Python
25
{ "docstring": "\\\n \\n For English:\\nYou need to go to https://drive.google.com/drive/folders/1g29ssimdZ6JzTST6Y8g6h-ogUNReBtJD?usp=sharing,\\\n and manually download the dataset from Google Drive. Once it is completed,\n a file named Medical-Dialogue-Dataset-English-<timestamp-info>.zip will appear in your Downloads folder(\n or whichever folder your browser chooses to save files to). Unzip the folder to obtain\n a folder named \"Medical-Dialogue-Dataset-English\" several text files.\n\n Now, you can specify the path to this folder for the data_dir argument in the\n datasets.load_dataset(...) option.\n The <path/to/folder> can e.g. be \"/Downloads/Medical-Dialogue-Dataset-English\".\n The data can then be loaded using the below command:\\\n `datasets.load_dataset(\"medical_dialog\", name=\"en\", data_dir=\"/Downloads/Medical-Dialogue-Dataset-English\")`.\n\n \\n For Chinese:\\nFollow the above process. Change the 'name' to 'zh'.The download link is https://drive.google.com/drive/folders/1r09_i8nJ9c1nliXVGXwSqRYqklcHd9e2\n\n **NOTE**\n - A caution while downloading from drive. It is better to download single files since creating a zip might not include files <500 MB. This has been observed mutiple times.\n - After downloading the files and adding them to the appropriate folder, the path of the folder can be given as input tu the data_dir path.\n ", "language": "en", "n_whitespaces": 213, "n_words": 161, "vocab_size": 112 }
https://github.com/huggingface/datasets.git
5
set_bbox_to_anchor
def set_bbox_to_anchor(self, bbox, transform=None): if bbox is None or isinstance(bbox, BboxBase): self._bbox_to_anchor = bbox else: try: l = len(bbox) except TypeError as err: raise ValueError(f"Invalid bbox: {bbox}") from err if l == 2: bbox = [bbox[0], bbox[1], 0, 0] self._bbox_to_anchor = Bbox.from_bounds(*bbox) self._bbox_to_anchor_transform = transform self.stale = True
075ff0952896f44d7d0b0b3318f0978ae53f84d7
15
offsetbox.py
153
Small style fixes.
23,014
0
183
95
38
108,010
48
matplotlib
16
lib/matplotlib/offsetbox.py
Python
13
{ "docstring": "\n Set the bbox that the box is anchored to.\n\n *bbox* can be a Bbox instance, a list of [left, bottom, width,\n height], or a list of [left, bottom] where the width and\n height will be assumed to be zero. The bbox will be\n transformed to display coordinate by the given transform.\n ", "language": "en", "n_whitespaces": 94, "n_words": 51, "vocab_size": 37 }
https://github.com/matplotlib/matplotlib.git
1
test_mark_checked_unexpected_exception
def test_mark_checked_unexpected_exception(self, mock_patch_already_checked, mock_delete_pod): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", name="test", task_id="task", is_delete_operator_pod=False, ) self.await_pod_mock.side_effect = AirflowException("oops") context = create_context(k) with pytest.raises(AirflowException): k.execute(context=context) mock_patch_already_checked.assert_called_once() mock_delete_pod.assert_not_called()
c3d883a971a8e4e65ccc774891928daaaa0f4442
10
test_kubernetes_pod.py
133
KubernetesPodOperator should patch "already checked" always (#22734) When not configured to delete pods, at end of task execution the current behavior is to patch the pod as "already checked", but only if pod not successful. We should also patch when successful so it isn't "reattached" to after a task clear.
9,243
0
146
77
22
47,750
24
airflow
21
tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py
Python
14
{ "docstring": "If we aren't deleting pods and have an exception, mark it so we don't reattach to it", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 15 }
https://github.com/apache/airflow.git
1
is_immutable_rev_checkout
def is_immutable_rev_checkout(self, url, dest): # type: (str, str) -> bool return False
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
6
versioncontrol.py
21
upd; format
12,563
0
33
12
12
61,420
12
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
Python
2
{ "docstring": "\n Return true if the commit hash checked out at dest matches\n the revision in url.\n\n Always return False, if the VCS does not support immutable commit\n hashes.\n\n This method does not check if there are local uncommitted changes\n in dest after checkout, as pip currently has no use case for that.\n ", "language": "en", "n_whitespaces": 101, "n_words": 51, "vocab_size": 42 }
https://github.com/jindongwang/transferlearning.git
1
unauthorized_update_message_text
def unauthorized_update_message_text(update_message_text): update_message_text["message"]["from"]["id"] = 1234 update_message_text["message"]["chat"]["id"] = 1234 return update_message_text @pytest.fixture
d7375f1a9c4a69858a65a56bd524f5a78ecab23c
@pytest.fixture
10
conftest.py
69
Refactor telegram_bot polling/webhooks platforms and add tests (#66433) Co-authored-by: Pär Berge <[email protected]>
94,390
1
22
32
9
295,372
11
core
4
tests/components/telegram_bot/conftest.py
Python
4
{ "docstring": "Fixture for mocking an incoming update of type message/text that is not in our `allowed_chat_ids`.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
https://github.com/home-assistant/core.git
15
installed
def installed(name, updates=None): if isinstance(updates, str): updates = [updates] if not updates: updates = name ret = {"name": name, "changes": {}, "result": True, "comment": ""} wua = salt.utils.win_update.WindowsUpdateAgent() # Search for updates install_list = wua.search(updates) # No updates found if install_list.count() == 0: ret["comment"] = "No updates found" return ret # List of updates to download download = salt.utils.win_update.Updates() for item in install_list.updates: if not salt.utils.data.is_true(item.IsDownloaded): download.updates.Add(item) # List of updates to install install = salt.utils.win_update.Updates() installed_updates = [] for item in install_list.updates: if not salt.utils.data.is_true(item.IsInstalled): install.updates.Add(item) else: installed_updates.extend("KB" + kb for kb in item.KBArticleIDs) if install.count() == 0: ret["comment"] = "Updates already installed: " ret["comment"] += "\n - ".join(installed_updates) return ret # Return comment of changes if test. if __opts__["test"]: ret["result"] = None ret["comment"] = "Updates will be installed:" for update in install.updates: ret["comment"] += "\n" ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title]) return ret # Download updates wua.download(download) # Install updates wua.install(install) # Refresh windows update info wua.refresh() post_info = wua.updates().list() # Verify the installation for item in install.list(): if not salt.utils.data.is_true(post_info[item]["Installed"]): ret["changes"]["failed"] = { item: { "Title": post_info[item]["Title"], "KBs": post_info[item]["KBs"], } } ret["result"] = False else: ret["changes"]["installed"] = { item: { "Title": post_info[item]["Title"], "NeedsReboot": post_info[item]["NeedsReboot"], "KBs": post_info[item]["KBs"], } } if ret["changes"].get("failed", False): ret["comment"] = "Updates failed" else: ret["comment"] = "Updates installed successfully" return ret
52c922760e8447f0c9efd23b12481ba1a7509dcd
17
win_wua.py
772
Remove 40 character limit to update Title
54,320
0
699
441
114
216,011
215
salt
37
salt/states/win_wua.py
Python
59
{ "docstring": "\n Ensure Microsoft Updates are installed. Updates will be downloaded if\n needed.\n\n Args:\n\n name (str):\n The identifier of a single update to install.\n\n updates (list):\n A list of identifiers for updates to be installed. Overrides\n ``name``. Default is None.\n\n .. note:: Identifiers can be the GUID, the KB number, or any part of the\n Title of the Microsoft update. GUIDs and KBs are the preferred method\n to ensure you're installing the correct update.\n\n .. warning:: Using a partial KB number or a partial Title could result in\n more than one update being installed.\n\n Returns:\n dict: A dictionary containing the results of the update\n\n CLI Example:\n\n .. code-block:: yaml\n\n # using a GUID\n install_update:\n wua.installed:\n - name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n\n # using a KB\n install_update:\n wua.installed:\n - name: KB3194343\n\n # using the full Title\n install_update:\n wua.installed:\n - name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)\n\n # Install multiple updates\n install_updates:\n wua.installed:\n - updates:\n - KB3194343\n - 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n ", "language": "en", "n_whitespaces": 423, "n_words": 161, "vocab_size": 101 }
https://github.com/saltstack/salt.git
5
unquote_header_value
def unquote_header_value(value, is_filename=False): r if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != "\\\\": return value.replace("\\\\", "\\").replace('\\"', '"') return value
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
14
utils.py
127
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,230
0
226
66
87
22,160
127
pipenv
4
pipenv/patched/pip/_vendor/requests/utils.py
Python
13
{ "docstring": "Unquotes a header value. (Reversal of :func:`quote_header_value`).\n This does not use the real unquoting but what browsers are actually\n using for quoting.\n\n :param value: the header value to unquote.\n :rtype: str\n ", "language": "en", "n_whitespaces": 47, "n_words": 31, "vocab_size": 29 }
https://github.com/pypa/pipenv.git
7
create_ray_logs_for_failed_test
def create_ray_logs_for_failed_test(rep): # We temporarily restrict to Linux until we have artifact dirs # for Windows and Mac if platform.system() != "Linux": return # Only archive failed tests after the "call" phase of the test if rep.when != "call" or not rep.failed: return # Get dir to write zipped logs to archive_dir = os.environ.get("RAY_TEST_FAILURE_LOGS_ARCHIVE_DIR") if not archive_dir: return if not os.path.exists(archive_dir): os.makedirs(archive_dir) # Get logs dir from the latest ray session tmp_dir = gettempdir() logs_dir = os.path.join(tmp_dir, "ray", "session_latest", "logs") if not os.path.exists(logs_dir): return # Write zipped logs to logs archive dir test_name = rep.nodeid.replace(os.sep, "::") output_file = os.path.join(archive_dir, f"{test_name}_{time.time():.4f}") shutil.make_archive(output_file, "zip", logs_dir)
fc1cd89020ca1362951a9af301547b4890fb93ae
12
conftest.py
249
[ci] Add short failing test summary for pytests (#24104) It is sometimes hard to find all failing tests in buildkite output logs - even filtering for "FAILED" is cumbersome as the output can be overloaded. This PR adds a small utility to add a short summary log in a separate output section at the end of the buildkite job. The only shared directory between the Buildkite host machine and the test docker container is `/tmp/artifacts:/artifact-mount`. Thus, we write the summary file to this directory, and delete it before actually uploading it as an artifact in the `post-commands` hook.
31,492
0
192
131
69
138,636
103
ray
25
python/ray/tests/conftest.py
Python
17
{ "docstring": "Creates artifact zip of /tmp/ray/session_latest/logs for failed tests", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git