complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
8
send_robust
def send_robust(self, sender, **named): responses = [] if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS: return responses # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. for receiver in self._live_receivers(sender): try: response = receiver(signal=self, sender=sender, **named) except Exception as err: if "pytest" in sys.modules: if _receivers_that_raise is _all or receiver in _receivers_that_raise: raise logging.error("signal.failure", extra={"receiver": repr(receiver)}, exc_info=True) responses.append((receiver, err)) else: responses.append((receiver, response)) return responses buffer_incr_complete = BetterSignal(providing_args=["model", "columns", "extra", "result"]) pending_delete = BetterSignal(providing_args=["instance", "actor"]) event_processed = BetterSignal(providing_args=["project", "event"]) # This signal should eventually be removed as we should not send # transactions through post processing transaction_processed = BetterSignal(providing_args=["project", "event"]) # DEPRECATED event_received = BetterSignal(providing_args=["ip", "project"]) event_accepted = BetterSignal(providing_args=["ip", "data", "project"]) # Organization Onboarding Signals project_created = BetterSignal(providing_args=["project", "user", "default_rules"]) first_event_pending = BetterSignal(providing_args=["project", "user"]) first_event_received = BetterSignal(providing_args=["project", "event"]) first_transaction_received = BetterSignal(providing_args=["project", "event"]) first_profile_received = BetterSignal(providing_args=["project"]) first_replay_received = BetterSignal(providing_args=["project"]) member_invited = BetterSignal(providing_args=["member", "user"]) member_joined = BetterSignal(providing_args=["member", "organization"]) issue_tracker_used = BetterSignal(providing_args=["plugin", "project", "user"]) plugin_enabled = BetterSignal(providing_args=["plugin", "project", "user"]) email_verified = BetterSignal(providing_args=["email"]) mocks_loaded = BetterSignal(providing_args=["project"]) user_feedback_received = BetterSignal(providing_args=["project"]) advanced_search = BetterSignal(providing_args=["project"]) advanced_search_feature_gated = BetterSignal(providing_args=["organization", "user"]) save_search_created = BetterSignal(providing_args=["project", "user"]) inbound_filter_toggled = BetterSignal(providing_args=["project"]) sso_enabled = BetterSignal(providing_args=["organization", "user", "provider"]) data_scrubber_enabled = BetterSignal(providing_args=["organization"]) alert_rule_created = BetterSignal( providing_args=[ "project", "rule", "user", "rule_type", "is_api_token", "duplicate_rule", "wizard_v3", ] ) alert_rule_edited = BetterSignal( providing_args=["project", "rule", "user", "rule_type", "is_api_token"] ) repo_linked = BetterSignal(providing_args=["repo", "user"]) release_created = BetterSignal(providing_args=["release"]) deploy_created = BetterSignal(providing_args=["deploy"]) ownership_rule_created = BetterSignal(providing_args=["project"]) # issues issue_assigned = BetterSignal(providing_args=["project", "group", "user"]) issue_deleted = BetterSignal(providing_args=["group", "user", "delete_type"]) issue_resolved = BetterSignal( providing_args=["organization_id", "project", "group", "user", "resolution_type"] ) issue_unresolved = BetterSignal(providing_args=["project", "user", "group", "transition_type"]) issue_ignored = BetterSignal(providing_args=["project", "user", "group_list", "activity_data"]) issue_unignored = BetterSignal(providing_args=["project", "user", "group", "transition_type"]) issue_mark_reviewed = BetterSignal(providing_args=["project", "user", "group"]) # comments comment_created = BetterSignal(providing_args=["project", "user", "group", "activity_data"]) comment_updated = BetterSignal(providing_args=["project", "user", "group", "activity_data"]) comment_deleted = BetterSignal(providing_args=["project", "user", "group", "activity_data"]) inbox_in = BetterSignal(providing_args=["project", "user", "group", "reason"]) inbox_out = BetterSignal( providing_args=["project", "user", "group", "action", "inbox_date_added", "referrer"] ) terms_accepted = BetterSignal(providing_args=["organization", "user", "ip_address"]) team_created = BetterSignal(providing_args=["organization", "user", "team"]) integration_added = BetterSignal(providing_args=["integration", "organization", "user"]) integration_issue_created = BetterSignal(providing_args=["integration", "organization", "user"]) integration_issue_linked = BetterSignal(providing_args=["integration", "organization", "user"]) monitor_failed = BetterSignal(providing_args=["monitor"]) # experiments join_request_created = BetterSignal(providing_args=["member"]) join_request_link_viewed = BetterSignal(providing_args=["organization"]) user_signup = BetterSignal(providing_args=["user", "source"])
102ca3bf902d7af977b0528f89fbd23415825fe3
17
signals.py
1,481
ref(hybrid-cloud): Enforce silo isolation on many model tests (#40946) Greatly increase the number of stable hybrid cloud model tests, adding more hybrid cloud service interfaces.
18,372
0
563
128
190
88,296
352
sentry
79
src/sentry/signals.py
Python
16
{ "docstring": "\n A reimplementation of send_robust which logs failures, thus recovering stacktraces.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/getsentry/sentry.git
2
convert_exception_to_response
def convert_exception_to_response(get_response): if asyncio.iscoroutinefunction(get_response):
9c19aff7c7561e3a82978a272ecdaad40dda5c00
7
exception.py
26
Refs #33476 -- Reformatted code with Black.
50,777
0
10
35
4
204,538
4
django
4
django/core/handlers/exception.py
Python
9
{ "docstring": "\n Wrap the given get_response callable in exception-to-response conversion.\n\n All exceptions will be converted. All known 4xx exceptions (Http404,\n PermissionDenied, MultiPartParserError, SuspiciousOperation) will be\n converted to the appropriate response, and all other exceptions will be\n converted to 500 responses.\n\n This decorator is automatically applied to all middleware to ensure that\n no middleware leaks an exception and that the next middleware in the stack\n can rely on getting a response instead of an exception.\n ", "language": "en", "n_whitespaces": 100, "n_words": 72, "vocab_size": 51 }
https://github.com/django/django.git
2
convert_python_to_json
def convert_python_to_json(self, par_data_dic, par_json_file=""): if par_json_file: with open(par_json_file, "w") as outfile: return json.dump(par_data_dic, outfile) else: return json.dump(par_data_dic)
f0af0c43340763724f139fa68aa1e5a9ffe458b4
13
JsonParser.py
75
refactor: clean code Signed-off-by: slowy07 <[email protected]>
4,347
0
75
43
16
22,466
17
Python
8
JsonParser.py
Python
6
{ "docstring": "\n this function converts dictionary of data to json string and store it in json file if\n json file pass provided if not it only returns the json string\n args:\n par_data_dic: dictionary of data\n par_json_file: the output json file\n return: json string\n ", "language": "en", "n_whitespaces": 101, "n_words": 41, "vocab_size": 26 }
https://github.com/geekcomputers/Python.git
2
_maybe_broadcast_to_outputs
def _maybe_broadcast_to_outputs(self, outputs, objects): if not self._should_broadcast(objects): return objects # When there is more than one Model output, this is needed to keep # each Metric / Loss separate. When there is only one Model output, # the user-supplied object should be used. should_copy_objects = len(tf.nest.flatten(outputs)) > 1
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
compile_utils.py
62
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,680
0
101
54
39
271,043
48
keras
10
keras/engine/compile_utils.py
Python
6
{ "docstring": "Determines if losses / metrics should be applied to all outputs.\n\n NOTE: This method should only be called for Metrics / Losses, not for\n y_true / sample_weight.\n\n Args:\n outputs: Model predictions.\n objects: Arbitrary nested structure (e.g. of losses or metrics)\n\n Returns:\n Arbitrary nested structure of objects, maybe copied to each output.\n\n Applies a Loss / Metric to all outputs.\n ", "language": "en", "n_whitespaces": 128, "n_words": 59, "vocab_size": 44 }
https://github.com/keras-team/keras.git
1
test_post_save_add_redirect
def test_post_save_add_redirect(self): post_data = {"name": "John Doe"} self.assertEqual(Person.objects.count(), 0) response = self.client.post( reverse("admin_custom_urls:admin_custom_urls_person_add"), post_data ) persons = Person.objects.all() self.assertEqual(len(persons), 1) redirect_url = reverse( "admin_custom_urls:admin_custom_urls_person_history", args=[persons[0].pk] ) self.assertRedirects(response, redirect_url)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
tests.py
146
Refs #33476 -- Reformatted code with Black.
51,853
0
120
87
23
207,061
28
django
18
tests/admin_custom_urls/tests.py
Python
12
{ "docstring": "\n ModelAdmin.response_post_save_add() controls the redirection after\n the 'Save' button has been pressed when adding a new object.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
https://github.com/django/django.git
2
prepare_metadata
def prepare_metadata(self): # type: () -> None assert self.source_dir with indent_log(): self.metadata_directory = self._generate_metadata() # Act on the newly generated metadata, based on the name and version. if not self.name: self._set_requirement() else: self.warn_on_mismatching_name() self.assert_source_matches_version()
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
req_install.py
88
upd; format
12,377
0
123
47
31
60,993
34
transferlearning
10
.venv/lib/python3.8/site-packages/pip/_internal/req/req_install.py
Python
9
{ "docstring": "Ensure that project metadata is available.\n\n Under PEP 517, call the backend hook to prepare the metadata.\n Under legacy processing, call setup.py egg-info.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 20 }
https://github.com/jindongwang/transferlearning.git
5
set_terminator
def set_terminator(self, term): if isinstance(term, str) and self.use_encoding: term = bytes(term, self.encoding) elif isinstance(term, int) and term < 0: raise ValueError('the number of received bytes must be positive') self.terminator = term
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
asynchat.py
84
add python 3.10.4 for windows
55,951
0
81
52
26
220,267
31
XX-Net
11
python3.10.4/Lib/asynchat.py
Python
6
{ "docstring": "Set the input delimiter.\n\n Can be a fixed string of any length, an integer, or None.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 16 }
https://github.com/XX-net/XX-Net.git
2
_object2proto
def _object2proto(self) -> RunClassMethodSMPCAction_PB: return RunClassMethodSMPCAction_PB( path=self.path, _self=sy.serialize(self._self), args=list(map(lambda x: sy.serialize(x), self.args)), kwargs={k: sy.serialize(v) for k, v in self.kwargs.items()}, id_at_location=sy.serialize(self.id_at_location), seed_id_locations=self.seed_id_locations, address=sy.serialize(self.address), msg_id=sy.serialize(self.id), )
5dad7d57c63189553eaa303ece930c639c1b6196
15
run_class_method_smpc_action.py
173
modified action creation context to sharetensor level created global location for context retrieval during multiplication
91
0
133
115
24
612
24
PySyft
20
packages/syft/src/syft/core/node/common/action/run_class_method_smpc_action.py
Python
25
{ "docstring": "Returns a protobuf serialization of self.\n\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n\n :return: returns a protobuf object\n :rtype: RunClassMethodSMPCAction_PB\n\n .. note::\n This method is purely an internal method. Please use sy.serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "language": "en", "n_whitespaces": 150, "n_words": 68, "vocab_size": 56 }
https://github.com/OpenMined/PySyft.git
17
pytest_collection_modifyitems
def pytest_collection_modifyitems(session, config, items): not_services = set(config.getoption("--not-service")) if config.getoption("--all-services"): skip_exclude_services(not_services, items) if config.getoption("--only-service") or config.getoption("--only-services"): warnings.warn( "`--only-service` cannot be used with `--all-services`. " "`--only-service` will be ignored." ) return only_services = set(config.getoption("--only-services")) if only_services: for item in items: item_services = {mark.args[0] for mark in item.iter_markers(name="service")} if not item_services: item.add_marker(pytest.mark.skip("Only running tests for services.")) skip_exclude_services(not_services, items) if config.getoption("--service"): warnings.warn( "`--service` cannot be used with `--only-services`. " "`--service` will be ignored." ) return only_service = set(config.getoption("--only-service")) if only_service: only_running_blurb = f"Only running tests for service(s): {', '.join(repr(s) for s in only_service)}." for item in items: item_services = {mark.args[0] for mark in item.iter_markers(name="service")} not_in_only_services = only_services.difference(item_services) if not_in_only_services: item.add_marker(pytest.mark.skip(only_running_blurb)) if config.getoption("--service"): warnings.warn( "`--service` cannot be used with `--only-service`. " "`--service` will be ignored." ) return run_services = set(config.getoption("--service")) for item in items: item_services = {mark.args[0] for mark in item.iter_markers(name="service")} missing_services = item_services.difference(run_services) if missing_services: item.add_marker( pytest.mark.skip( f"Requires service(s): {', '.join(repr(s) for s in missing_services)}. " "Use '--service NAME' to include." ) ) @pytest.fixture(scope="session")
38a7efafd45dbe64b292593461330a2de6499bb1
@pytest.fixture(scope="session")
20
conftest.py
558
Cleanup pytest service CLI options
10,960
1
615
284
76
53,964
160
prefect
31
tests/conftest.py
Python
48
{ "docstring": "\n Update tests to skip in accordance with service requests\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/PrefectHQ/prefect.git
1
test_approve_task_and_workflow
def test_approve_task_and_workflow(self): # Unset WAGTAIL_FINISH_WORKFLOW_ACTION - default action should be to publish del settings.WAGTAIL_FINISH_WORKFLOW_ACTION # Connect a mock signal handler to page_published signal mock_handler = mock.MagicMock() page_published.connect(mock_handler) # Post self.client.post( reverse( "wagtailadmin_pages:workflow_action", args=( self.page.id, "approve", self.page.current_workflow_task_state.id, ), ), {"comment": "my comment"}, ) # Check that the workflow was approved workflow_state = WorkflowState.objects.get( page=self.page, requested_by=self.submitter ) self.assertEqual(workflow_state.status, workflow_state.STATUS_APPROVED) # Check that the task was approved task_state = workflow_state.current_task_state self.assertEqual(task_state.status, task_state.STATUS_APPROVED) # Check that the comment was added to the task state correctly self.assertEqual(task_state.comment, "my comment") page = Page.objects.get(id=self.page.id) # Page must be live self.assertTrue(page.live, "Approving moderation failed to set live=True") # Page should now have no unpublished changes self.assertFalse( page.has_unpublished_changes, "Approving moderation failed to set has_unpublished_changes=False", ) # Check that the page_published signal was fired self.assertEqual(mock_handler.call_count, 1) mock_call = mock_handler.mock_calls[0][2] self.assertEqual(mock_call["sender"], self.page.specific_class) self.assertEqual(mock_call["instance"], self.page) self.assertIsInstance(mock_call["instance"], self.page.specific_class)
d10f15e55806c6944827d801cd9c2d53f5da4186
14
test_workflows.py
365
Reformat with black
15,869
0
513
223
89
72,276
135
wagtail
38
wagtail/admin/tests/test_workflows.py
Python
33
{ "docstring": "\n This posts to the approve task view and checks that the page was approved and published\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 14 }
https://github.com/wagtail/wagtail.git
3
delete_accounting_dimension
def delete_accounting_dimension(doc): doclist = get_doctypes_with_dimensions() frappe.db.sql( % ("%s", ", ".join(["%s"] * len(doclist))), # nosec tuple([doc.fieldname] + doclist), ) frappe.db.sql( % ("%s", ", ".join(["%s"] * len(doclist))), # nosec tuple([doc.fieldname] + doclist), ) budget_against_property = frappe.get_doc("Property Setter", "Budget-budget_against-options") value_list = budget_against_property.value.split("\n")[3:] if doc.document_type in value_list: value_list.remove(doc.document_type) budget_against_property.value = "\nCost Center\nProject\n" + "\n".join(value_list) budget_against_property.save() for doctype in doclist: frappe.clear_cache(doctype=doctype) @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
14
accounting_dimension.py
285
style: format code with black
13,719
1
40
155
39
64,771
57
erpnext
22
erpnext/accounts/doctype/accounting_dimension/accounting_dimension.py
Python
26
{ "docstring": "\n\t\tDELETE FROM `tabCustom Field`\n\t\tWHERE fieldname = %s\n\t\tAND dt IN (%s)\n\t\tDELETE FROM `tabProperty Setter`\n\t\tWHERE field_name = %s\n\t\tAND doc_type IN (%s)", "language": "en", "n_whitespaces": 18, "n_words": 24, "vocab_size": 16 }
https://github.com/frappe/erpnext.git
1
_dispatch_run_batch
def _dispatch_run_batch(self, **kwargs): return self._dispatch_run_general(self.run_batch, **kwargs)
738e008020f146ff9820c290311782f515749c48
8
base.py
35
Add `run_batch` method to all nodes and `Pipeline` to allow batch querying (#2481) * Add run_batch methods for batch querying * Update Documentation & Code Style * Fix mypy * Update Documentation & Code Style * Fix mypy * Fix linter * Fix tests * Update Documentation & Code Style * Fix tests * Update Documentation & Code Style * Fix mypy * Fix rest api test * Update Documentation & Code Style * Add Doc strings * Update Documentation & Code Style * Add batch_size as attribute to nodes supporting batching * Adapt error messages * Adapt type of filters in retrievers * Revert change about truncation_warning in summarizer * Unify multiple_doc_lists tests * Use smaller models in extractor tests * Add return types to JoinAnswers and RouteDocuments * Adapt return statements in reader's run_batch method * Allow list of filters * Adapt error messages * Update Documentation & Code Style * Fix tests * Fix mypy * Adapt print_questions * Remove disabling warning about too many public methods * Add flag for pylint to disable warning about too many public methods in pipelines/base.py and document_stores/base.py * Add type check * Update Documentation & Code Style * Adapt tutorial 11 * Update Documentation & Code Style * Add query_batch method for DCDocStore * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
75,061
0
20
21
6
257,301
6
haystack
5
haystack/nodes/base.py
Python
2
{ "docstring": "\n The Pipelines call this method when run_batch() is executed. This method in turn executes the\n _dispatch_run_general() method with the correct run method.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 19 }
https://github.com/deepset-ai/haystack.git
1
test_local_user_not_exists
def test_local_user_not_exists(self): user = "@unknown:" + self.hs.config.server.server_name self._test_status( users=[user], expected_statuses={ user: { "exists": False, }, }, expected_failures=[], )
250104d357c17a1c87fa46af35bbf3612f4ef171
13
test_account.py
80
Implement account status endpoints (MSC3720) (#12001) See matrix-org/matrix-doc#3720 Co-authored-by: Sean Quah <[email protected]>
71,189
0
139
49
17
246,381
18
synapse
11
tests/rest/client/test_account.py
Python
11
{ "docstring": "Tests that the account status endpoints correctly reports that a user doesn't\n exist.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git
3
add
def add(self, level, message, extra_tags=""): if not message: return # Check that the message level is not less than the recording level. level = int(level) if level < self.level: return # Add the message. self.added_new = True message = Message(level, message, extra_tags=extra_tags) self._queued_messages.append(message)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
base.py
96
Refs #33476 -- Reformatted code with Black.
50,670
0
128
58
31
204,175
43
django
10
django/contrib/messages/storage/base.py
Python
9
{ "docstring": "\n Queue a message to be stored.\n\n The message is only queued if it contained something and its level is\n not less than the recording level (``self.level``).\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 23 }
https://github.com/django/django.git
14
read_shp
def read_shp(path, simplify=True, geom_attrs=True, strict=True): msg = ( "read_shp is deprecated and will be removed in 3.0." "See https://networkx.org/documentation/latest/auto_examples/index.html#geospatial." ) warnings.warn(msg, DeprecationWarning, stacklevel=2) try: from osgeo import ogr except ImportError as err: raise ImportError("read_shp requires OGR: http://www.gdal.org/") from err if not isinstance(path, str): return net = nx.DiGraph() shp = ogr.Open(path) if shp is None: raise RuntimeError(f"Unable to open {path}") for lyr in shp: fields = [x.GetName() for x in lyr.schema] for f in lyr: g = f.geometry() if g is None: if strict: raise nx.NetworkXError("Bad data: feature missing geometry") else: continue flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields] attributes = dict(zip(fields, flddata)) attributes["ShpName"] = lyr.GetName() # Note: Using layer level geometry type if g.GetGeometryType() == ogr.wkbPoint: net.add_node((g.GetPoint_2D(0)), **attributes) elif g.GetGeometryType() in (ogr.wkbLineString, ogr.wkbMultiLineString): for edge in edges_from_line(g, attributes, simplify, geom_attrs): e1, e2, attr = edge net.add_edge(e1, e2) net[e1][e2].update(attr) else: if strict: raise nx.NetworkXError( f"GeometryType {g.GetGeometryType()} not supported" ) return net
cc1db275efc709cb964ce88abbfa877798d58c10
20
nx_shp.py
463
Minor improvements from general code readthrough (#5414) * Add deprecated directive to reversed docstring. * Add missing dep directives to shpfiles. * Remove defn of INF sentinel. * typo. * str -> comment in forloop. * STY: appropriate casing for var name.
41,911
0
573
280
109
176,450
150
networkx
50
networkx/readwrite/nx_shp.py
Python
41
{ "docstring": "Generates a networkx.DiGraph from shapefiles.\n\n .. deprecated:: 2.6\n\n read_shp is deprecated and will be removed in NetworkX 3.0.\n See https://networkx.org/documentation/latest/auto_examples/index.html#geospatial.\n\n Point geometries are\n translated into nodes, lines into edges. Coordinate tuples are used as\n keys. Attributes are preserved, line geometries are simplified into start\n and end coordinates. Accepts a single shapefile or directory of many\n shapefiles.\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n path : file or string\n File, directory, or filename to read.\n\n simplify: bool\n If True, simplify line geometries to start and end coordinates.\n If False, and line feature geometry has multiple segments, the\n non-geometric attributes for that feature will be repeated for each\n edge comprising that feature.\n\n geom_attrs: bool\n If True, include the Wkb, Wkt and Json geometry attributes with\n each edge.\n\n NOTE: if these attributes are available, write_shp will use them\n to write the geometry. If nodes store the underlying coordinates for\n the edge geometry as well (as they do when they are read via\n this method) and they change, your geomety will be out of sync.\n\n strict: bool\n If True, raise NetworkXError when feature geometry is missing or\n GeometryType is not supported.\n If False, silently ignore missing or unsupported geometry in features.\n\n Returns\n -------\n G : NetworkX graph\n\n Raises\n ------\n ImportError\n If ogr module is not available.\n\n RuntimeError\n If file cannot be open or read.\n\n NetworkXError\n If strict=True and feature is missing geometry or GeometryType is\n not supported.\n\n Examples\n --------\n >>> G = nx.read_shp(\"test.shp\") # doctest: +SKIP\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Shapefile\n ", "language": "en", "n_whitespaces": 486, "n_words": 262, "vocab_size": 163 }
https://github.com/networkx/networkx.git
1
__copy__
def __copy__(self): # Shallow copy. return self.__constructor__( self.gpu_manager, self.key, self._length_cache, self._width_cache )
2bb9a1fab7b0092974853e616dfd5e7ed98f085d
8
partition.py
43
REFACTOR-#5363: introduce partition constructor; move `add_to_apply_calls` impl in base class (#5354) Signed-off-by: Myachev <[email protected]>
36,353
0
51
27
12
155,358
12
modin
7
modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition.py
Python
4
{ "docstring": "\n Create a copy of this object.\n\n Returns\n -------\n cuDFOnRayDataframePartition\n A copy of this object.\n ", "language": "en", "n_whitespaces": 61, "n_words": 14, "vocab_size": 10 }
https://github.com/modin-project/modin.git
2
disconnect
def disconnect(self): if self.is_connected is False: return self.connection.close() self.is_connected = False return self.is_connected
fc9776d9b342f873cbb3f36fd39955b9e1ea6f76
8
sqlite_handler.py
52
added connection_args and connection_args_example dicts
25,459
0
59
30
10
115,431
13
mindsdb
5
mindsdb/integrations/handlers/sqlite_handler/sqlite_handler.py
Python
6
{ "docstring": "\r\n Close any existing connections.\r\n ", "language": "en", "n_whitespaces": 19, "n_words": 4, "vocab_size": 4 }
https://github.com/mindsdb/mindsdb.git
2
for_each_smith
def for_each_smith(self, fn, selector=None, row=None, col=None) -> "FigureWidget": for obj in self.select_smiths(selector=selector, row=row, col=col): fn(obj) return self
c95b4fa4388f29e50b6966e45c94c5980013a01d
9
_figurewidget.py
73
type annotations for chainable Figure methods
68,409
0
49
48
17
240,298
17
plotly.py
8
packages/python/plotly/plotly/graph_objs/_figurewidget.py
Python
32
{ "docstring": "\n Apply a function to all smith objects that satisfy the\n specified selection criteria\n\n Parameters\n ----------\n fn:\n Function that inputs a single smith object.\n selector: dict, function, or None (default None)\n Dict to use as selection criteria.\n smith objects will be selected if they contain\n properties corresponding to all of the dictionary's keys, with\n values that exactly match the supplied values. If None\n (the default), all smith objects are selected. If a\n function, it must be a function accepting a single argument and\n returning a boolean. The function will be called on each\n smith and those for which the function returned True will\n be in the selection.\n row, col: int or None (default None)\n Subplot row and column index of smith objects to select.\n To select smith objects by row and column, the Figure\n must have been created using plotly.subplots.make_subplots.\n If None (the default), all smith objects are selected.\n Returns\n -------\n self\n Returns the FigureWidget object that the method was called on\n ", "language": "en", "n_whitespaces": 404, "n_words": 161, "vocab_size": 96 }
https://github.com/plotly/plotly.py.git
3
is_homepage
def is_homepage(self) -> bool: return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html') previous_page: Optional[Page] next_page: Optional[Page] parent: Optional[Section] children: None = None is_section: bool = False is_page: bool = True is_link: bool = False
32359f3e93f5ca7778b9f7c3d6d92f49a629c84c
9
pages.py
143
Relative links end with slash even for homepage links (#3022) Fixes #3015
57,470
0
99
30
27
225,565
36
mkdocs
17
mkdocs/structure/pages.py
Python
3
{ "docstring": "Evaluates to `True` for the homepage of the site and `False` for all other pages.The [page][mkdocs.structure.pages.Page] object for the previous page or `None`.\n The value will be `None` if the current page is the first item in the site navigation\n or if the current page is not included in the navigation at all.The [page][mkdocs.structure.pages.Page] object for the next page or `None`.\n The value will be `None` if the current page is the last item in the site navigation\n or if the current page is not included in the navigation at all.The immediate parent of the page in the site navigation. `None` if the\n page is at the top level.Pages do not contain children and the attribute is always `None`.Indicates that the navigation object is a \"section\" object. Always `False` for page objects.Indicates that the navigation object is a \"page\" object. Always `True` for page objects.Indicates that the navigation object is a \"link\" object. Always `False` for page objects.", "language": "en", "n_whitespaces": 172, "n_words": 158, "vocab_size": 57 }
https://github.com/mkdocs/mkdocs.git
1
test_solo_route_send_immediate_msg_with_reply
def test_solo_route_send_immediate_msg_with_reply() -> None: node = MockNode() destination = SpecificLocation() server = VirtualServerConnection(node=node) connection = VirtualClientConnection(server=server) h_solo = SoloRoute(destination=destination, connection=connection) msg = construct_dummy_message(SignedImmediateSyftMessageWithReply) ret = h_solo.send_immediate_msg_with_reply(msg=msg) assert isinstance( ret, SignedImmediateSyftMessageWithoutReply, )
dadbb7dfe1062f94e2d95e7670d0a7a4602460c4
9
route_test.py
114
fixed signature of send_immediate_msg_with_reply - changing default timeouts
187
0
75
69
25
1,393
31
PySyft
18
packages/syft/tests/syft/core/io/route_test.py
Python
13
{ "docstring": "Test SoloRoute.send_immediate_msg_with_reply method works.", "language": "en", "n_whitespaces": 3, "n_words": 4, "vocab_size": 4 }
https://github.com/OpenMined/PySyft.git
11
validate_parameter_constraints
def validate_parameter_constraints(parameter_constraints, params, caller_name): if len(set(parameter_constraints) - set(params)) != 0: raise ValueError( f"The parameter constraints {list(parameter_constraints)}" " contain unexpected parameters" f" {set(parameter_constraints) - set(params)}" ) for param_name, param_val in params.items(): # We allow parameters to not have a constraint so that third party estimators # can inherit from sklearn estimators without having to necessarily use the # validation tools. if param_name not in parameter_constraints: continue constraints = parameter_constraints[param_name] if constraints == "no_validation": continue constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: if constraint.is_satisfied_by(param_val): # this constraint is satisfied, no need to check further. break else: # No constraint is satisfied, raise with an informative message. # Ignore constraints that we don't want to expose in the error message, # i.e. options that are for internal purpose or not officially supported. constraints = [ constraint for constraint in constraints if not constraint.hidden ] if len(constraints) == 1: constraints_str = f"{constraints[0]}" else: constraints_str = ( f"{', '.join([str(c) for c in constraints[:-1]])} or" f" {constraints[-1]}" ) raise ValueError( f"The {param_name!r} parameter of {caller_name} must be" f" {constraints_str}. Got {param_val!r} instead." )
91f02270a8f49e3e52882dc0fa634eff4d138fc8
25
_param_validation.py
330
MAINT Add one-sided set differences for clarity in param validation (#23772) Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,307
0
590
137
117
260,513
181
scikit-learn
20
sklearn/utils/_param_validation.py
Python
32
{ "docstring": "Validate types and values of given parameters.\n\n Parameters\n ----------\n parameter_constraints : dict or {\"no_validation\"}\n If \"no_validation\", validation is skipped for this parameter.\n\n If a dict, it must be a dictionary `param_name: list of constraints`.\n A parameter is valid if it satisfies one of the constraints from the list.\n Constraints can be:\n - an Interval object, representing a continuous or discrete range of numbers\n - the string \"array-like\"\n - the string \"sparse matrix\"\n - the string \"random_state\"\n - callable\n - None, meaning that None is a valid value for the parameter\n - any type, meaning that any instance of this type is valid\n - a StrOptions object, representing a set of strings\n - the string \"boolean\"\n - the string \"verbose\"\n\n params : dict\n A dictionary `param_name: param_value`. The parameters to validate against the\n constraints.\n\n caller_name : str\n The name of the estimator or function or method that called this function.\n ", "language": "en", "n_whitespaces": 286, "n_words": 149, "vocab_size": 89 }
https://github.com/scikit-learn/scikit-learn.git
3
notify_clients
async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS) if cls.EVENT_QUEUE: await cls.broadcast_estimations()
b1dfc9a172440e9c9736566f326ba339ff559604
12
event_queue.py
61
Release new queue beta (#1969) * queue-refactor-backend (#1489) * queue-refactor-backend - create a template for the new design * queue-refactor-backend - clean after the old queue * queue-refactor-backend - add basic test to websocket endpoint * queue-refactor-backend - small fix * queue-refactor-backend - debugs&fixes&finalizations - test the flow with postman * queue-refactor-backend - tweaks on websocket closing * queue-refactor-backend - cleanup * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks - correct the exception handling * queue-refactor-backend - add websockets dependency * queue-refactor-backend - reformat * queue-refactor-backend - add single event test * queue-refactor-backend - tweaks - remove outdated tests * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - tweaks * queue-refactor-backend - make SLEEP_WHEN_FREE shorter Co-authored-by: Ali Abid <[email protected]> * Add estimation parameters to queue (#1889) * - tweaks on Estimation * version * Revert "version" This reverts commit bd1f4d7bfe3658a4967b93126859a62a511a70e2. * some fix and tweaks * implement queue frontend (#1950) * implement queue frontend * fix types * fix ws endpoint in build mode * cleanup * Queue tweaks (#1909) * tweaks on estimation payload * Queue keep ws connections open (#1910) * 1. keep ws connections open after the event process is completed 2. do not send estimations periodically if live queue updates is open * fix calculation * 1. tweaks on event_queue * fix issue - create new ws for each request * format * fix * fix tests * fix tests * tets * test * changes * changes * changes * change' * wtf * changes * changes * file perms * Release queue beta v1 (#1971) * - release the new queue * - bypass the issue in the tests - rewrite the lost part in the codebase * - add concurrent queue example (#1978) * rank_eta calc * Queue fixes (#1981) * change * format * - comment out queue tests as they dont work well * - reformat * Update gradio/event_queue.py Co-authored-by: Ömer Faruk Özdemir <[email protected]> * changes * changes * change * weird fix Co-authored-by: Ömer Faruk Özdemir <[email protected]> * release-queue-v3 (#1988) * Fix frontend queuing to target secure WSS (#1996) * change * format * changes * queue-concurrency-tweaks (#2002) 1. make gather_data and broadcast_estimation sequential instead of concurrent because they were deleting elements at the same time and raising expections which was lowering the performance * Update Queue API, documentation (#2026) * changes * changes * fixes * changes * change * fix Co-authored-by: Ömer Faruk Özdemir <[email protected]> Co-authored-by: pngwn <[email protected]>
43,215
0
65
34
13
180,689
14
gradio
8
gradio/event_queue.py
Python
8
{ "docstring": "\n Notify clients about events statuses in the queue periodically.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/gradio-app/gradio.git
4
async_purge_expired_orphaned_devices
def async_purge_expired_orphaned_devices(self) -> None: now_time = time.time() for deleted_device in list(self.deleted_devices.values()): if deleted_device.orphaned_timestamp is None: continue if ( deleted_device.orphaned_timestamp + ORPHANED_DEVICE_KEEP_SECONDS < now_time ): del self.deleted_devices[deleted_device.id]
56278a4421209a475558600d220906e35df80fe0
11
device_registry.py
93
Simplify device registry (#77715) * Simplify device registry * Fix test fixture * Update homeassistant/helpers/device_registry.py Co-authored-by: epenet <[email protected]> * Update device_registry.py * Remove dead code Co-authored-by: epenet <[email protected]>
104,308
0
140
57
22
305,522
26
core
11
homeassistant/helpers/device_registry.py
Python
15
{ "docstring": "Purge expired orphaned devices from the registry.\n\n We need to purge these periodically to avoid the database\n growing without bound.\n ", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 18 }
https://github.com/home-assistant/core.git
1
test_https_bad_referer
def test_https_bad_referer(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META["HTTP_HOST"] = "www.example.com" req.META["HTTP_REFERER"] = "https://www.evil.org/somepage" req.META["SERVER_PORT"] = "443" mw = CsrfViewMiddleware(post_form_view) response = mw.process_view(req, post_form_view, (), {}) self.assertContains( response, "Referer checking failed - https://www.evil.org/somepage does not " "match any trusted origins.", status_code=403, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
tests.py
138
Refs #33476 -- Reformatted code with Black.
50,106
0
156
78
36
202,398
42
django
13
tests/csrf_tests/tests.py
Python
14
{ "docstring": "\n A POST HTTPS request with a bad referer is rejected\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
4
call
def call(cls, map_func, reduce_func=None, **call_kwds): if isinstance(map_func, str):
58bbcc37477866d19c8b092a0e1974a4f0baa586
7
groupby.py
36
REFACTOR-#2656: Update modin to fit algebra (code only) (#3717) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: Rehan Durrani <[email protected]>
35,226
0
22
107
8
153,042
8
modin
7
modin/core/dataframe/algebra/groupby.py
Python
14
{ "docstring": "\n Build template GroupBy aggregation function.\n\n Resulted function is applied in parallel via TreeReduce algorithm.\n\n Parameters\n ----------\n map_func : str, dict or callable(pandas.DataFrameGroupBy) -> pandas.DataFrame\n If `str` this parameter will be treated as a function name to register,\n so `map_func` and `reduce_func` will be grabbed from `groupby_reduce_functions`.\n If dict or callable then this will be treated as a function to apply to the `GroupByObject`\n at the map phase.\n reduce_func : str, dict or callable(pandas.DataFrameGroupBy) -> pandas.DataFrame, optional\n Function to apply to the `GroupByObject` at the reduce phase. If not specified\n will be set the same as 'map_func'.\n **call_kwds : kwargs\n Kwargs that will be passed to the returned function.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes GroupBy aggregation\n with TreeReduce algorithm.\n ", "language": "en", "n_whitespaces": 300, "n_words": 123, "vocab_size": 74 }
https://github.com/modin-project/modin.git
9
_evaluate
def _evaluate(model, args, val_dataset, *, padder_mode, num_flow_updates=None, batch_size=None, header=None): batch_size = batch_size or args.batch_size device = torch.device(args.device) model.eval() if args.distributed: sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True) else: sampler = torch.utils.data.SequentialSampler(val_dataset) val_loader = torch.utils.data.DataLoader( val_dataset, sampler=sampler, batch_size=batch_size, pin_memory=True, num_workers=args.num_workers, ) num_flow_updates = num_flow_updates or args.num_flow_updates
3aa2a93d1e2df140866e1937767b83dc27dcf89e
13
train.py
190
RAFT training reference Improvement (#5590) * Change optical flow train.py function name from validate to evaluate so it is similar to other references * Add --device as parameter and enable to run in non distributed mode * Format with ufmt * Fix unneccessary param and bug * Enable saving the optimizer and scheduler on the checkpoint * Fix bug when evaluate before resume and save or load model without ddp * Fix case where --train-dataset is None Co-authored-by: Nicolas Hug <[email protected]>
46,885
0
119
256
33
192,382
43
vision
23
references/optical_flow/train.py
Python
35
{ "docstring": "Helper function to compute various metrics (epe, etc.) for a model on a given dataset.\n\n We process as many samples as possible with ddp, and process the rest on a single worker.\n ", "language": "en", "n_whitespaces": 38, "n_words": 32, "vocab_size": 27 }
https://github.com/pytorch/vision.git
4
_create_standalone_pip
def _create_standalone_pip() -> Generator[str, None, None]: source = pathlib.Path(pip_location).resolve().parent # Return the current instance if `source` is not a directory. We can't build # a zip from this, and it likely means the instance is already standalone. if not source.is_dir(): yield str(source) return with TempDirectory(kind="standalone-pip") as tmp_dir: pip_zip = os.path.join(tmp_dir.path, "__env_pip__.zip") kwargs = {} if sys.version_info >= (3, 8): kwargs["strict_timestamps"] = False with zipfile.ZipFile(pip_zip, "w", **kwargs) as zf: for child in source.rglob("*"): zf.write(child, child.relative_to(source.parent).as_posix()) yield os.path.join(pip_zip, "pip")
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
18
build_env.py
244
Vendor in pip 22.1.2
3,739
0
181
142
63
21,259
77
pipenv
28
pipenv/patched/notpip/_internal/build_env.py
Python
19
{ "docstring": "Create a \"standalone pip\" zip file.\n\n The zip file's content is identical to the currently-running pip.\n It will be used to install requirements into the build environment.\n ", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 24 }
https://github.com/pypa/pipenv.git
1
_get_ray_cr_with_overrides
def _get_ray_cr_with_overrides() -> dict: cr = _get_basic_ray_cr() cr["spec"]["workerGroupSpecs"][0]["rayStartParams"]["memory"] = "300000000" # num-gpus rayStartParam with no gpus in container limits cr["spec"]["workerGroupSpecs"][0]["rayStartParams"]["num-gpus"] = "100" # num-gpus rayStartParam overriding gpus in container limits cr["spec"]["workerGroupSpecs"][1]["rayStartParams"]["num-gpus"] = "100" cr["spec"]["workerGroupSpecs"][0]["rayStartParams"]["num-cpus"] = "100" return cr
7d3ceb222c8af98a5c101b1c28ab37ffcb0a3793
12
test_autoscaling_config.py
169
[kuberay][autoscaler] Improve CPU, GPU, and memory detection. (#26219) This PR improves the autoscaler's resource detection logic
27,527
0
65
86
24
124,152
38
ray
4
python/ray/tests/kuberay/test_autoscaling_config.py
Python
8
{ "docstring": "CR with memory, cpu, and gpu overrides from rayStartParams.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
1
test_from_percentage
def test_from_percentage(percentage, expected_result): assert _from_fan_percentage(percentage) == expected_result
b52a8ba37a5e5e05b80beddff06b116371941d86
8
test_util.py
26
Bump pytradfri to 8.0.1 and fix fan preset mode "Auto" bug (#63920) * Move util functions * Fix errors * Revert changes * Fix tests * Use self.async_set_percentage() * Fix calculation functions and associated tests * Handle case of 0 * Update tests/components/tradfri/test_util.py Co-authored-by: Martin Hjelmare <[email protected]> * Update tests/components/tradfri/test_util.py Co-authored-by: Martin Hjelmare <[email protected]> * Update tests/components/tradfri/test_util.py Co-authored-by: Martin Hjelmare <[email protected]> * Handle case of 0 * Update homeassistant/components/tradfri/fan.py Co-authored-by: Martin Hjelmare <[email protected]> Co-authored-by: Martin Hjelmare <[email protected]>
108,183
0
13
15
7
309,483
7
core
4
tests/components/tradfri/test_util.py
Python
2
{ "docstring": "Test that we can convert percentage value to fan speed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
set_3d_properties
def set_3d_properties(self, z=0, zdir='z'): self._z = z self._dir_vec = get_dir_vector(zdir) self.stale = True
df6f95703b60348e01603f98a439b133da2938a0
8
art3d.py
54
Improve mpl_toolkit documentation
23,827
0
41
32
11
109,920
13
matplotlib
8
lib/mpl_toolkits/mplot3d/art3d.py
Python
4
{ "docstring": "\n Set the *z* position and direction of the text.\n\n Parameters\n ----------\n z : float\n The z-position in 3D space.\n zdir : {'x', 'y', 'z', 3-tuple}\n The direction of the text. Default: 'z'.\n See `.get_dir_vector` for a description of the values.\n ", "language": "en", "n_whitespaces": 116, "n_words": 40, "vocab_size": 31 }
https://github.com/matplotlib/matplotlib.git
2
test_nav_missing_page
def test_nav_missing_page(self): nav_cfg = [ {'Home': 'index.md'}, ] expected = dedent( ) cfg = load_config(nav=nav_cfg, site_url='http://example.com/') fs = [ File('index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls']), File('page_not_in_nav.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls']) ] files = Files(fs) site_navigation = get_navigation(files, cfg) self.assertEqual(str(site_navigation).strip(), expected) self.assertEqual(len(site_navigation.items), 1) self.assertEqual(len(site_navigation.pages), 1) for file in files: self.assertIsInstance(file.page, Page)
372384d8102ddb4be6360f44d1bfddb8b45435a4
11
nav_tests.py
240
Some manual changes ahead of formatting code with Black
57,224
0
193
146
37
224,171
47
mkdocs
25
mkdocs/tests/structure/nav_tests.py
Python
21
{ "docstring": "\n Page(title='Home', url='/')\n ", "language": "en", "n_whitespaces": 25, "n_words": 2, "vocab_size": 2 }
https://github.com/mkdocs/mkdocs.git
1
test_large_remote_call
def test_large_remote_call(call_ray_start_shared): with ray_start_client_server_for_address(call_ray_start_shared) as ray:
297341e107daee1ea3aff991ae8ea8c90993c683
10
test_client.py
29
[Test][Client] Only start ray once in client tests (#28835) It looks like we're frequently starting and shutting down Ray in this test because `ray_start_client_server` isn't connecting to the Ray created by `ray_start_regular_shared`, and is instead starting a new Ray head process every time it launches. Ray client tests are failing frequently with: ``` [2022-10-06 07:31:46,253 E 13235 13751] core_worker_process.cc:277: The core worker has already been shutdown. This happens when the language frontend accesses the Ray's worker after it is shutdown. The process will exit ``` Which is probably caused by having multiple ray clusters running simultaneous, with some shutting down asynchronously. This refactor forces all of the tests in the module to use the same Ray cluster. Also fixes two other sources of potential flakiness: * Joins the thread in test_client_thread_safe (seems like this has a bad interaction when the client server is cleaned up) * Calls ray.get in `test_stdout_log_stream`, to make sure that the remote function is done running before we try searching for its output Should also have the happy side effect of speeding up test_client. Ran the `Small & Client` tests (regular and external redis) twice each, no flakes, and windows version of test_client.
30,161
0
12
157
6
133,953
6
ray
4
python/ray/tests/test_client.py
Python
19
{ "docstring": "\n Test remote calls with large (multiple chunk) arguments\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
2
legend
def legend(self, *args, **kwargs): handles, labels, extra_args, kwargs = mlegend._parse_legend_args( [self], *args, **kwargs) if len(extra_args): raise TypeError('legend only accepts two non-keyword arguments') self.legend_ = mlegend.Legend(self, handles, labels, **kwargs) self.legend_._remove_method = self._remove_legend return self.legend_
7c67cadfd903a26b3d56dd2c95faf2365367fd1a
10
_axes.py
116
Highlight information about avoiding labels in legend
23,529
0
131
75
26
109,334
33
matplotlib
15
lib/matplotlib/axes/_axes.py
Python
10
{ "docstring": "\n Place a legend on the Axes.\n\n Call signatures::\n\n legend()\n legend(handles, labels)\n legend(handles=handles)\n legend(labels)\n\n The call signatures correspond to the following different ways to use\n this method:\n\n **1. Automatic detection of elements to be shown in the legend**\n\n The elements to be added to the legend are automatically determined,\n when you do not pass in any extra arguments.\n\n In this case, the labels are taken from the artist. You can specify\n them either at artist creation or by calling the\n :meth:`~.Artist.set_label` method on the artist::\n\n ax.plot([1, 2, 3], label='Inline label')\n ax.legend()\n\n or::\n\n line, = ax.plot([1, 2, 3])\n line.set_label('Label via method')\n ax.legend()\n\n .. note::\n Specific artists can be excluded from the automatic legend element\n selection by using a label starting with an underscore, \"_\".\n A string starting with an underscore is the default label for all\n artists, so calling `.Axes.legend` without any arguments and\n without setting the labels manually will result in no legend being\n drawn.\n\n\n **2. Explicitly listing the artists and labels in the legend**\n\n For full control of which artists have a legend entry, it is possible\n to pass an iterable of legend artists followed by an iterable of\n legend labels respectively::\n\n ax.legend([line1, line2, line3], ['label1', 'label2', 'label3'])\n\n\n **3. Explicitly listing the artists in the legend**\n\n This is similar to 2, but the labels are taken from the artists'\n label properties. Example::\n\n line1, = ax.plot([1, 2, 3], label='label1')\n line2, = ax.plot([1, 2, 3], label='label2')\n ax.legend(handles=[line1, line2])\n\n\n **4. Labeling existing plot elements**\n\n .. admonition:: Discouraged\n\n This call signature is discouraged, because the relation between\n plot elements and labels is only implicit by their order and can\n easily be mixed up.\n\n To make a legend for all artists on an Axes, call this function with\n an iterable of strings, one for each legend item. For example::\n\n ax.plot([1, 2, 3])\n ax.plot([5, 6, 7])\n ax.legend(['First line', 'Second line'])\n\n\n Parameters\n ----------\n handles : sequence of `.Artist`, optional\n A list of Artists (lines, patches) to be added to the legend.\n Use this together with *labels*, if you need full control on what\n is shown in the legend and the automatic mechanism described above\n is not sufficient.\n\n The length of handles and labels should be the same in this\n case. If they are not, they are truncated to the smaller length.\n\n labels : list of str, optional\n A list of labels to show next to the artists.\n Use this together with *handles*, if you need full control on what\n is shown in the legend and the automatic mechanism described above\n is not sufficient.\n\n Returns\n -------\n `~matplotlib.legend.Legend`\n\n Other Parameters\n ----------------\n %(_legend_kw_doc)s\n\n See Also\n --------\n .Figure.legend\n\n Notes\n -----\n Some artists are not supported by this function. See\n :doc:`/tutorials/intermediate/legend_guide` for details.\n\n Examples\n --------\n .. plot:: gallery/text_labels_and_annotations/legend.py\n ", "language": "en", "n_whitespaces": 1141, "n_words": 446, "vocab_size": 237 }
https://github.com/matplotlib/matplotlib.git
8
_disallow_batch_hooks_in_ps_strategy
def _disallow_batch_hooks_in_ps_strategy(self): # pylint: disable=protected-access strategy = tf.distribute.get_strategy() if strategy._should_use_with_coordinator: unsupported_callbacks = [] for cb in self.callbacks: # These Callbacks can accept RemoteValues directly. if getattr(cb, "_supports_tf_logs", False): continue if ( cb._implements_train_batch_hooks() or cb._implements_test_batch_hooks() or cb._implements_predict_batch_hooks() ): unsupported_callbacks.append(cb) if unsupported_callbacks: raise ValueError( "Batch-level `Callback`s are not supported with " "`ParameterServerStrategy`. Found unsupported " f"callbacks: {unsupported_callbacks}" ) # pylint: enable=protected-access @keras_export("keras.callbacks.Callback")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.callbacks.Callback")
14
callbacks.py
156
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,365
1
369
80
51
269,969
60
keras
17
keras/callbacks.py
Python
19
{ "docstring": "Error out if batch-level callbacks are passed with PSStrategy.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/keras-team/keras.git
15
compatible_abstract_dep
def compatible_abstract_dep(self, other): from .requirements import Requirement if len(self.candidates) == 1 and next(iter(self.candidates)).editable: return self elif len(other.candidates) == 1 and next(iter(other.candidates)).editable: return other new_specifiers = self.specifiers & other.specifiers markers = set(self.markers) if self.markers else set() if other.markers: markers.add(other.markers) new_markers = None if markers: new_markers = Marker(" or ".join(str(m) for m in sorted(markers))) new_ireq = copy.deepcopy(self.requirement.ireq) new_ireq.req.specifier = new_specifiers new_ireq.req.marker = new_markers new_requirement = Requirement.from_line(format_requirement(new_ireq)) compatible_versions = self.compatible_versions(other) if isinstance(compatible_versions, AbstractDependency): return compatible_versions candidates = [ c for c in self.candidates if parse(version_from_ireq(c)) in compatible_versions ] dep_dict = {} candidate_strings = [format_requirement(c) for c in candidates] for c in candidate_strings: if c in self.dep_dict: dep_dict[c] = self.dep_dict.get(c) return AbstractDependency( name=self.name, specifiers=new_specifiers, markers=new_markers, candidates=candidates, requirement=new_requirement, parent=self.parent, dep_dict=dep_dict, finder=self.finder, )
8a4d2eb130fd173466310f59df607ea59bfc44a5
15
dependencies.py
444
Vendor in latest requirements lib and pip-shims in order to drop packaging and resolve differences in sourcing it.
3,973
0
473
285
76
21,648
117
pipenv
44
pipenv/vendor/requirementslib/models/dependencies.py
Python
40
{ "docstring": "Merge this abstract dependency with another one.\n\n Return the result of the merge as a new abstract dependency.\n\n :param other: An abstract dependency to merge with\n :type other: :class:`~requirementslib.models.dependency.AbstractDependency`\n :return: A new, combined abstract dependency\n :rtype: :class:`~requirementslib.models.dependency.AbstractDependency`\n ", "language": "en", "n_whitespaces": 79, "n_words": 37, "vocab_size": 27 }
https://github.com/pypa/pipenv.git
2
shade_normals
def shade_normals(self, normals, fraction=1.): intensity = normals.dot(self.direction) # Apply contrast stretch imin, imax = intensity.min(), intensity.max() intensity *= fraction # Rescale to 0-1, keeping range before contrast stretch # If constant slope, keep relative scaling (i.e. flat should be 0.5, # fully occluded 0, etc.) if (imax - imin) > 1e-6: # Strictly speaking, this is incorrect. Negative values should be # clipped to 0 because they're fully occluded. However, rescaling # in this manner is consistent with the previous implementation and # visually appears better than a "hard" clip. intensity -= imin intensity /= (imax - imin) intensity = np.clip(intensity, 0, 1) return intensity
f16da868d016363c4cd734b2abd6535230b094df
10
colors.py
124
[Doc] Fix ndarray-links for arguments
24,318
0
248
78
79
110,839
105
matplotlib
13
lib/matplotlib/colors.py
Python
9
{ "docstring": "\n Calculate the illumination intensity for the normal vectors of a\n surface using the defined azimuth and elevation for the light source.\n\n Imagine an artificial sun placed at infinity in some azimuth and\n elevation position illuminating our surface. The parts of the surface\n that slope toward the sun should brighten while those sides facing away\n should become darker.\n\n Parameters\n ----------\n fraction : number, optional\n Increases or decreases the contrast of the hillshade. Values\n greater than one will cause intermediate values to move closer to\n full illumination or shadow (and clipping any values that move\n beyond 0 or 1). Note that this is not visually or mathematically\n the same as vertical exaggeration.\n\n Returns\n -------\n `~numpy.ndarray`\n A 2D array of illumination values between 0-1, where 0 is\n completely in shadow and 1 is completely illuminated.\n ", "language": "en", "n_whitespaces": 302, "n_words": 132, "vocab_size": 96 }
https://github.com/matplotlib/matplotlib.git
4
get_loading_pipeline
def get_loading_pipeline(pipeline): loading_pipeline_cfg = [] for cfg in pipeline: obj_cls = PIPELINES.get(cfg['type']) # TODO:use more elegant way to distinguish loading modules if obj_cls is not None and obj_cls in (LoadImageFromFile, LoadAnnotations, LoadPanopticAnnotations): loading_pipeline_cfg.append(cfg) assert len(loading_pipeline_cfg) == 2, \ 'The data pipeline in your config file must include ' \ 'loading image and annotations related pipeline.' return loading_pipeline_cfg @HOOKS.register_module()
301d4a2d4cfe1cdb62608e2892924be3e67e3098
@HOOKS.register_module()
12
utils.py
113
[Feature] Support visualization for Panoptic Segmentation (#7041) * First commit of v2 * split the functions * Support to show panoptic result * temp * Support to show gt * support show gt * fix lint * Support to browse datasets * Fix unit tests * Fix findContours * fix comments * Fix pre-commit * fix lint * Add the type of an argument
70,172
1
210
62
50
243,970
58
mmdetection
14
mmdet/datasets/utils.py
Python
12
{ "docstring": "Only keep loading image and annotations related configuration.\n\n Args:\n pipeline (list[dict]): Data pipeline configs.\n\n Returns:\n list[dict]: The new pipeline list with only keep\n loading image and annotations related configuration.\n\n Examples:\n >>> pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True),\n ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n ... dict(type='RandomFlip', flip_ratio=0.5),\n ... dict(type='Normalize', **img_norm_cfg),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='DefaultFormatBundle'),\n ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ... ]\n >>> expected_pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True)\n ... ]\n >>> assert expected_pipelines ==\\\n ... get_loading_pipeline(pipelines)\n ", "language": "en", "n_whitespaces": 271, "n_words": 79, "vocab_size": 50 }
https://github.com/open-mmlab/mmdetection.git
1
test_cancel_during_response_hook
def test_cancel_during_response_hook(tctx): playbook, cff = start_h2_client(tctx) flow = Placeholder(HTTPFlow) server = Placeholder(Server) assert ( playbook >> DataReceived( tctx.client, cff.build_headers_frame( example_request_headers, flags=["END_STREAM"] ).serialize(), ) << http.HttpRequestHeadersHook(flow) >> reply() << http.HttpRequestHook(flow) >> reply() << OpenConnection(server) >> reply(None) << SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n") >> DataReceived(server, b"HTTP/1.1 204 No Content\r\n\r\n") << http.HttpResponseHeadersHook(flow) << CloseConnection(server) >> reply(to=-2) << http.HttpResponseHook(flow) >> DataReceived( tctx.client, cff.build_rst_stream_frame(1, ErrorCodes.CANCEL).serialize() ) >> reply(to=-2) )
b3587b52b25077f68116b9852b041d33e7fc6601
30
test_http2.py
266
make it black!
73,878
0
267
175
43
251,888
64
mitmproxy
29
test/mitmproxy/proxy/layers/http/test_http2.py
Python
29
{ "docstring": "\n Test that we properly handle the case of the following event sequence:\n - we receive a server response\n - we trigger the response hook\n - the client cancels the stream\n - the response hook completes\n\n Given that we have already triggered the response hook, we don't want to trigger the error hook.\n ", "language": "en", "n_whitespaces": 90, "n_words": 52, "vocab_size": 32 }
https://github.com/mitmproxy/mitmproxy.git
2
__getitem__
def __getitem__(self, name): if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, "_" + name)}) raise KeyError('Unknown media type "%s"' % name)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
widgets.py
69
Refs #33476 -- Reformatted code with Black.
51,333
0
52
40
20
206,035
20
django
8
django/forms/widgets.py
Python
4
{ "docstring": "Return a Media object that only contains media of the given type.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/django/django.git
58
payload
def payload(self, place=None, parameter=None, value=None, newValue=None, where=None): if conf.direct: return self.payloadDirect(newValue) retVal = "" if kb.forceWhere: where = kb.forceWhere elif where is None and isTechniqueAvailable(getTechnique()): where = getTechniqueData().where if kb.injection.place is not None: place = kb.injection.place if kb.injection.parameter is not None: parameter = kb.injection.parameter paramString = conf.parameters[place] paramDict = conf.paramDict[place] origValue = getUnicode(paramDict[parameter]) newValue = getUnicode(newValue) if newValue else newValue base64Encoding = re.sub(r" \(.+", "", parameter) in conf.base64Parameter if place == PLACE.URI or BOUNDED_INJECTION_MARKER in origValue: paramString = origValue if place == PLACE.URI: origValue = origValue.split(kb.customInjectionMark)[0] else: origValue = filterNone(re.search(_, origValue.split(BOUNDED_INJECTION_MARKER)[0]) for _ in (r"\w+\Z", r"[^\"'><]+\Z", r"[^ ]+\Z"))[0].group(0) origValue = origValue[origValue.rfind('/') + 1:] for char in ('?', '=', ':', ',', '&'): if char in origValue: origValue = origValue[origValue.rfind(char) + 1:] elif place == PLACE.CUSTOM_POST: paramString = origValue origValue = origValue.split(kb.customInjectionMark)[0] if kb.postHint in (POST_HINT.SOAP, POST_HINT.XML): origValue = re.split(r"['\">]", origValue)[-1] elif kb.postHint in (POST_HINT.JSON, POST_HINT.JSON_LIKE): origValue = extractRegexResult(r":\s*['\"]?(?P<result>\w+\Z)", origValue) or extractRegexResult(r'(?s)[\s:]*(?P<result>[^"\[,]+\Z)', origValue) else: _ = extractRegexResult(r"(?s)(?P<result>[^\s<>{}();'\"&]+\Z)", origValue) or "" origValue = _.split('=', 1)[1] if '=' in _ else "" elif place == PLACE.CUSTOM_HEADER: paramString = origValue origValue = origValue[origValue.find(',') + 1:] origValue = origValue.split(kb.customInjectionMark)[0] match = re.search(r"([^;]+)=(?P<value>[^;]*);?\Z", origValue) if match: origValue = match.group("value") elif ',' in paramString: header = paramString.split(',')[0] if header.upper() == HTTP_HEADER.AUTHORIZATION.upper(): origValue = origValue.split(' ')[-1].split(':')[-1] origValue = origValue or "" if value is None: if where == PAYLOAD.WHERE.ORIGINAL: value = origValue elif where == PAYLOAD.WHERE.NEGATIVE: if conf.invalidLogical: match = re.search(r"\A[^ ]+", newValue) newValue = newValue[len(match.group() if match else ""):] _ = randomInt(2) value = "%s%s AND %s LIKE %s" % (origValue, match.group() if match else "", _, _ + 1) elif conf.invalidBignum: value = randomInt(6) elif conf.invalidString: value = randomStr(6) else: if newValue.startswith("-"): value = "" else: value = "-%s" % randomInt() elif where == PAYLOAD.WHERE.REPLACE: value = "" else: value = origValue newValue = "%s%s" % (value, newValue) newValue = self.cleanupPayload(newValue, origValue) or "" if base64Encoding: _newValue = newValue _origValue = origValue if newValue: newValue = newValue.replace(BOUNDARY_BACKSLASH_MARKER, '\\') newValue = self.adjustLateValues(newValue) # TODO: support for POST_HINT newValue = "%s%s%s" % (BOUNDED_BASE64_MARKER, newValue, BOUNDED_BASE64_MARKER) if parameter in kb.base64Originals: origValue = kb.base64Originals[parameter] else: origValue = encodeBase64(origValue, binary=False, encoding=conf.encoding or UNICODE_ENCODING) if place in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER): _ = "%s%s" % (origValue, kb.customInjectionMark) if kb.postHint == POST_HINT.JSON and not isNumber(newValue) and '"%s"' % _ not in paramString: newValue = '"%s"' % self.addPayloadDelimiters(newValue) elif kb.postHint == POST_HINT.JSON_LIKE and not isNumber(newValue) and re.search(r"['\"]%s['\"]" % re.escape(_), paramString) is None: newValue = "'%s'" % self.addPayloadDelimiters(newValue) else: newValue = self.addPayloadDelimiters(newValue) if newValue: newValue = newValue.replace(kb.customInjectionMark, REPLACEMENT_MARKER) retVal = paramString.replace(_, newValue) retVal = retVal.replace(kb.customInjectionMark, "").replace(REPLACEMENT_MARKER, kb.customInjectionMark) elif BOUNDED_INJECTION_MARKER in paramDict[parameter]: if base64Encoding: retVal = paramString.replace("%s%s" % (_origValue, BOUNDED_INJECTION_MARKER), _newValue) match = re.search(r"(%s)=([^&]*)" % re.sub(r" \(.+", "", parameter), retVal) if match: retVal = retVal.replace(match.group(0), "%s=%s" % (match.group(1), encodeBase64(match.group(2), binary=False, encoding=conf.encoding or UNICODE_ENCODING))) else: retVal = paramString.replace("%s%s" % (origValue, BOUNDED_INJECTION_MARKER), self.addPayloadDelimiters(newValue)) elif place in (PLACE.USER_AGENT, PLACE.REFERER, PLACE.HOST): retVal = paramString.replace(origValue, self.addPayloadDelimiters(newValue)) else:
b1881129b6201ffd7d2ea01754ca2c76479776a3
22
agent.py
1,733
Bug for JSON-like bug (#5013)
27,346
0
1,802
1,255
188
123,418
474
sqlmap
83
lib/core/agent.py
Python
118
{ "docstring": "\n This method replaces the affected parameter with the SQL\n injection statement to request\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
https://github.com/sqlmapproject/sqlmap.git
2
is_alive
def is_alive(self) -> bool: return any(thread.is_alive() for thread in self._threads)
326110f09d45dbdce2e490fa1ae4b1208e5efe2c
9
multithreading.py
39
bugfix - timelapse image loader multithreading.py - typing + docs
20,745
0
24
23
10
101,327
10
faceswap
6
lib/multithreading.py
Python
9
{ "docstring": " Check if any threads are still alive\n\n Returns\n -------\n bool\n ``True`` if any threads are alive. ``False`` if no threads are alive\n ", "language": "en", "n_whitespaces": 62, "n_words": 22, "vocab_size": 14 }
https://github.com/deepfakes/faceswap.git
2
expand_block
def expand_block(self, block): w = list(struct.unpack(">16L", block)) + [0] * 64 for i in range(16, 80): w[i] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1) return w
f0af0c43340763724f139fa68aa1e5a9ffe458b4
16
sha1.py
122
refactor: clean code Signed-off-by: slowy07 <[email protected]>
4,456
0
75
80
27
22,785
36
Python
10
sha1.py
Python
5
{ "docstring": "\n Takes a bytestring-block of length 64, unpacks it to a list of integers and returns a\n list of 80 integers pafter some bit operations\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 18 }
https://github.com/geekcomputers/Python.git
1
test_redact_relation_annotation
def test_redact_relation_annotation(self) -> None: channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Both relations should exist. event_ids, relations = self._make_relation_requests() self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertEquals( relations["m.annotation"], {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]}, ) # Both relations appear in the aggregation. chunk = self._get_aggregations() self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 2}]) # Redact one of the reactions. self._redact(to_redact_event_id) # The unredacted relation should still exist. event_ids, relations = self._make_relation_requests() self.assertEquals(event_ids, [unredacted_event_id]) self.assertEquals( relations["m.annotation"], {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, ) # The unredacted aggregation should still exist. chunk = self._get_aggregations() self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 1}])
f63bedef07360216a8de71dc38f00f1aea503903
13
test_relations.py
435
Invalidate caches when an event with a relation is redacted. (#12121) The caches for the target of the relation must be cleared so that the bundled aggregations are re-calculated after the redaction is processed.
71,672
0
352
250
59
247,440
115
synapse
21
tests/rest/client/test_relations.py
Python
33
{ "docstring": "\n Test that annotations of an event are properly handled after the\n annotation is redacted.\n\n The redacted relation should not be included in bundled aggregations or\n the response to relations.\n ", "language": "en", "n_whitespaces": 65, "n_words": 29, "vocab_size": 28 }
https://github.com/matrix-org/synapse.git
1
test_collision
def test_collision(): content = config = parser.parse(content) assert config["example"]["nested"]["path"] == "first one" assert config["example"]["nested.path"] == "uh oh" assert config["reference_to_nested_path"] == "uh oh" assert config["example"]["nested"]["more_nested"]["value"] == "found it!" assert config["reference_to_nested_nested_value"] == "found it!"
aa9251872136235be81f5d5f6d97eec00928a992
11
test_yaml_parser.py
134
Low-code connectors: configurable source from yaml (#13038) * checkout from alex/cac * checkout from alex/cac * checkout from alex/cac * checkout from alex/cac * Add missing tests * Add missing files * Add missing tests * add missing file * missing file * missing file * sengrid low code connector * rename * doc * doc * remove broken test * rename * jinja dependency * Add comment * comment * comment * pyjq dependency * update import * rename file * delete unused file * Revert "delete unused file" This reverts commit 758e939367775ddbefcd52c6e1e832976d3ba9fe. * fix * rename * abstract property * delete unused field * delete unused field * rename * pass kwargs directly * isort * Revert "isort" This reverts commit 4a792239440bc9950813ccc6ed368641ce2a96e4. * isort * update state * fix imports * update * update dependency * remove dead code * remove dead code * format * rename file * decoder * Use decoder * Update comment * dict_state is actually backed by a dict * Add a comment * update state takes kwargs * move state out of offset paginator * fix * update jq parameter order * fix * pass config * update * update * remove incremental mixin * delete comment * start workin on yaml parser * fix test * progress * refer and overwrite partials * factory tests pass * fix * reset * Assert http_method is an enum value * fix auth * read lists works * fix test * comment * implement all streams * build connection checker * update comments * update comments * remove no_state * rename package * checkout from alex/cac * Add missing tests * Add missing files * missing file * rename * jinja dependency * Add comment * comment * comment * Revert "delete unused file" This reverts commit 758e939367775ddbefcd52c6e1e832976d3ba9fe. * delete unused field * delete unused field * rename * pass kwargs directly * isort * Revert "isort" This reverts commit 4a792239440bc9950813ccc6ed368641ce2a96e4. * format * decoder * better error handling * remove nostate * isort * remove print * move test * delete duplicates * update * delete dead code * Update mapping type to [str, Any] * add comment * Add comment * pass parameters through kwargs * pass parameters through kwargs * fix test * update interface * update interface to pass source in interface * update interface to pass source in interface * rename to stream_slicer * Allow passing a string or an enum * Define StateType enum * unit tests pass * update dict state * update * can read * fix test * fix from yaml update * elif * convert state_type if not of type type * convert state_type if not of type type * Add a test * Low code connectors: string interpolation with jinja (#12852) * checkout from alex/cac * Add missing tests * Add missing files * missing file * rename * jinja dependency * Add comment * comment * comment * Revert "delete unused file" This reverts commit 758e939367775ddbefcd52c6e1e832976d3ba9fe. * delete unused field * delete unused field * rename * pass kwargs directly * isort * Revert "isort" This reverts commit 4a792239440bc9950813ccc6ed368641ce2a96e4. * format * decoder * better error handling * remove nostate * isort * delete dead code * Update mapping type to [str, Any] * add comment * Add comment * pass parameters through kwargs * move test to right module * Add missing test * Use authbase instead of deprecated class * leverage generator * remove sendgrid specific code * update * update * delete comment * remove sendgrid specific file * remove unused file * Delete dead code * rename methods * rename to declarative * rename the classes too * select streams to check * nit * rename method * rename class * {} is faster than dict() * Update airbyte-cdk/python/airbyte_cdk/sources/declarative/parsers/yaml_parser.py Co-authored-by: Sherif A. Nada <[email protected]> * more precise exception * rename class * add comment * Try to install packages to build jq * isort * only automake * Revert "only automake" This reverts commit c8fe154ffcbec3fa05289a7b6f087e5711622c87. * remove git * rename file * create components in kwargs * Use tuple of strings * parser doesn't need to be stored * move file and delete duplicates * Revert "Use tuple of strings" This reverts commit ab5a7afd08ef9a781f067389bc93c19240964e41. * raise error if streams to check are not in the catalog * Revert "Revert "Use tuple of strings"" This reverts commit 7c9fb8eb3390cb36ea53fccce4a39c618eba9327. * traverse tree * rename to options * move docstring * Update airbyte-cdk/python/airbyte_cdk/sources/declarative/checks/check_stream.py Co-authored-by: Sherif A. Nada <[email protected]> * fix tests and format * format * update * better error message * Add jq dependency * Use request header provider * rename * rename field * remove get_context method * rename * add a comment * format Co-authored-by: Sherif A. Nada <[email protected]>
795
0
53
68
19
5,647
32
airbyte
5
airbyte-cdk/python/unit_tests/sources/declarative/parsers/test_yaml_parser.py
Python
19
{ "docstring": "\nexample:\n nested:\n path: \"first one\"\n more_nested:\n value: \"found it!\"\n nested.path: \"uh oh\"\nreference_to_nested_path:\n ref: \"*ref(example.nested.path)\"\nreference_to_nested_nested_value:\n ref: \"*ref(example.nested.more_nested.value)\"\n ", "language": "en", "n_whitespaces": 36, "n_words": 18, "vocab_size": 17 }
https://github.com/airbytehq/airbyte.git
1
do_get
def do_get(self, arg): ray.worker.global_worker.debugger_get_breakpoint = self._breakpoint_uuid self.__restore() self.handle.connection.close() return Pdb.do_continue(self, arg)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
9
rpdb.py
69
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,957
0
46
42
11
133,204
11
ray
14
python/ray/util/rpdb.py
Python
5
{ "docstring": "get\n Skip to where the current task returns to.\n ", "language": "en", "n_whitespaces": 23, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
2
trace
def trace(log_dir, create_perfetto_link=False, create_perfetto_trace=False): start_trace(log_dir, create_perfetto_link, create_perfetto_trace) try: yield finally: stop_trace()
260f1d8b843483df46cf397ae5a1afc0abc9c64f
10
profiler.py
51
Add option to generate perfetto trace without generating link
27,075
0
21
30
11
121,807
11
jax
6
jax/_src/profiler.py
Python
6
{ "docstring": "Context manager to take a profiler trace.\n\n The trace will capture CPU, GPU, and/or TPU activity, including Python\n functions and JAX on-device operations.\n\n The resulting trace can be viewed with TensorBoard. Note that TensorBoard\n doesn't need to be running when collecting the trace.\n\n Only once trace may be collected a time. A RuntimeError will be raised if a\n trace is started while another trace is running.\n\n Args:\n log_dir: The directory to save the profiler trace to (usually the\n TensorBoard log directory).\n create_perfetto_link: A boolean which, if true, creates and prints link to\n the Perfetto trace viewer UI (https://ui.perfetto.dev). The program will\n block until the link is opened and Perfetto loads the trace.\n create_perfetto_trace: A boolean which, if true, additionally dumps a\n ``perfetto_trace.json.gz`` file that is compatible for upload with the\n Perfetto trace viewer UI (https://ui.perfetto.dev). The file will also be\n generated if ``create_perfetto_link`` is true. This could be useful if you\n want to generate a Perfetto-compatible trace without blocking the\n processs.\n ", "language": "en", "n_whitespaces": 218, "n_words": 161, "vocab_size": 97 }
https://github.com/google/jax.git
1
get_weights_path
def get_weights_path(url): url = parse_url(url) path, _ = get_path(url, WEIGHTS_HOME) return path
b29aa30f650b06bea79698adfb3392cc96feb583
8
download.py
41
given inference model path, download model automatically (#6403) * given inference model path, download model automatically * encapsulate auto download model as a function * given note that default model dir is download link * change attr model download path to https://bj.bcebos.com/v1/paddledet/models/pipeline/PPLCNet_x1_0_person_attribute_945_infer.zip
53,022
0
24
24
11
211,057
12
PaddleDetection
7
deploy/pipeline/download.py
Python
4
{ "docstring": "Get weights path from WEIGHTS_HOME, if not exists,\n download it from url.\n ", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 11 }
https://github.com/PaddlePaddle/PaddleDetection.git
11
find_batch_size
def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None
18df440709f1b19d1c5617c0d987c5ff8fd0915d
13
trainer_pt_utils.py
192
Replace dict/BatchEncoding instance checks by Mapping (#17014) * Replace dict/BatchEncoding instance checks by Mapping * Typo
6,828
0
177
126
31
37,529
60
transformers
17
src/transformers/trainer_pt_utils.py
Python
15
{ "docstring": "\n Find the first dimension of a tensor in a nested list/tuple/dict of tensors.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 11 }
https://github.com/huggingface/transformers.git
1
test_spam_checker_shadow_ban
def test_spam_checker_shadow_ban(self) -> None: user_id = self.get_success(self.handler.register_user(localpart="user")) # Get an access token. token = "testtok" self.get_success( self.store.add_access_token_to_user( user_id=user_id, token=token, device_id=None, valid_until_ms=None ) ) # Ensure the user was marked as shadow-banned. request = Mock(args={}) request.args[b"access_token"] = [token.encode("ascii")] request.requestHeaders.getRawHeaders = mock_getRawHeaders() auth = Auth(self.hs) requester = self.get_success(auth.get_user_by_req(request)) self.assertTrue(requester.shadow_banned)
652d1669c5a103b1c20478770c4aaf18849c09a3
12
test_register.py
197
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
73,413
0
175
118
39
250,390
47
synapse
26
tests/handlers/test_register.py
Python
15
{ "docstring": "A spam checker can choose to shadow-ban a user, which allows registration to succeed.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/matrix-org/synapse.git
1
test_global_selection_header_updates_environment_with_browser_navigation_buttons
def test_global_selection_header_updates_environment_with_browser_navigation_buttons(self): with self.feature("organizations:global-views"): self.create_issues() self.issues_list.visit_issue_list(self.org.slug) self.issues_list.wait_until_loaded() assert "environment=" not in self.browser.current_url assert self.issue_details.global_selection.get_selected_environment() == "All Env" self.browser.click('[data-test-id="page-filter-environment-selector"]') self.browser.click('[data-test-id="environment-prod"]') self.issues_list.wait_until_loaded() assert "environment=prod" in self.browser.current_url assert self.issue_details.global_selection.get_selected_environment() == "prod" # clear environment prod self.browser.click('[data-test-id="page-filter-environment-selector"]') self.browser.click('[data-test-id="environment-prod"] [role="checkbox"]') self.browser.click('[data-test-id="page-filter-environment-selector"]') self.issues_list.wait_until_loaded() assert "environment=" not in self.browser.current_url assert self.issue_details.global_selection.get_selected_environment() == "All Env" self.browser.back() self.issues_list.wait_until_loaded() assert "environment=prod" in self.browser.current_url assert self.issue_details.global_selection.get_selected_environment() == "prod" self.browser.back() self.issues_list.wait_until_loaded() assert "environment=" not in self.browser.current_url assert self.issue_details.global_selection.get_selected_environment() == "All Env" self.browser.forward() self.issues_list.wait_until_loaded() assert "environment=prod" in self.browser.current_url assert self.issue_details.global_selection.get_selected_environment() == "prod" self.browser.forward() self.issues_list.wait_until_loaded() assert "environment=" not in self.browser.current_url assert self.issue_details.global_selection.get_selected_environment() == "All Env"
5c0c82919f14654cc18405384654fa792d12e31a
12
test_organization_global_selection_header.py
527
ref(page-filters): Remove mentions of global header in acceptance tests (#35075)
18,685
0
504
295
28
90,724
91
sentry
17
tests/acceptance/test_organization_global_selection_header.py
Python
50
{ "docstring": "\n Global Selection Header should:\n 1) load project from URL if it exists\n 2) clear the current environment if the user clicks clear\n 3) reload the environment from URL if it exists on browser navigation\n \n set up workflow:\n 1) environment=All environments\n 2) environment=prod\n 3) environment=All environments\n \n navigate back through history to the beginning\n 1) environment=All Env -> environment=prod\n 2) environment=prod -> environment=All Env\n \n navigate forward through history to the end\n 1) environment=All Env -> environment=prod\n 2) environment=prod -> environment=All Env\n ", "language": "en", "n_whitespaces": 261, "n_words": 79, "vocab_size": 40 }
https://github.com/getsentry/sentry.git
2
plot_colored_circles
def plot_colored_circles(ax, prng, nb_samples=15): for sty_dict, j in zip(plt.rcParams['axes.prop_cycle'], range(nb_samples)): ax.add_patch(plt.Circle(prng.normal(scale=3, size=2), radius=1.0, color=sty_dict['color'])) ax.grid(visible=True) # Add title for enabling grid plt.title('ax.grid(True)', family='monospace', fontsize='small') ax.set_xlim([-4, 8]) ax.set_ylim([-5, 6]) ax.set_aspect('equal', adjustable='box') # to plot circles as circles return ax
fd5cf5c88123653b656a8f908aa611e5a20dc923
14
style_sheets_reference.py
198
[DOC]: adding a grid to the style sheet reference. (#24020) * add divider and annotation. Co-authored-by: Kinza Raza <[email protected]> * fix flake8 * change from annotate to title * Update examples/style_sheets/style_sheets_reference.py Co-authored-by: Jody Klymak <[email protected]> * Update examples/style_sheets/style_sheets_reference.py Co-authored-by: Jody Klymak <[email protected]> Co-authored-by: Kinza Raza <[email protected]> Co-authored-by: Jody Klymak <[email protected]>
23,704
0
104
123
35
109,681
38
matplotlib
26
examples/style_sheets/style_sheets_reference.py
Python
10
{ "docstring": "\n Plot circle patches.\n\n NB: draws a fixed amount of samples, rather than using the length of\n the color cycle, because different styles may have different numbers\n of colors.\n ", "language": "en", "n_whitespaces": 44, "n_words": 28, "vocab_size": 24 }
https://github.com/matplotlib/matplotlib.git
2
first_time_user
def first_time_user() -> bool: if USER_ENV_FILE.stat().st_size == 0: set_key("OPENBB_PREVIOUS_USE", "True", True) return True return False
19660ff3ee39458bd78e729b97fca4a4e552c160
10
keys_model.py
54
Updated check for new user (#3302) * Updated check for new user * Update docstring * Fixed import error Co-authored-by: James Maslek <[email protected]>
85,800
0
38
30
14
286,413
15
OpenBBTerminal
6
openbb_terminal/keys_model.py
Python
13
{ "docstring": "Whether a user is a first time user. A first time user is someone with an empty .env file.\n If this is true, it also adds an env variable to make sure this does not run again.\n\n Returns\n ----------\n bool\n Whether or not the user is a first time user\n ", "language": "en", "n_whitespaces": 72, "n_words": 50, "vocab_size": 34 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
test_cardinality_limiter
def test_cardinality_limiter(caplog, settings): settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0 outer_message = _construct_outer_message( [ (counter_payload, []), (distribution_payload, []), (set_payload, []), ] ) batch = IndexerBatch(UseCaseKey.PERFORMANCE, outer_message) keys_to_remove = list(batch.parsed_payloads_by_offset)[:2] # the messages come in a certain order, and Python dictionaries preserve # their insertion order. So we can hardcode offsets here. assert keys_to_remove == [ PartitionIdxOffset(partition_idx=0, offset=0), PartitionIdxOffset(partition_idx=0, offset=1), ] batch.filter_messages(keys_to_remove) assert batch.extract_strings() == { 1: { "environment", "errored", "production", # Note, we only extracted one MRI, of the one metric that we didn't # drop "s:sessions/error@none", "session.status", }, } snuba_payloads = batch.reconstruct_messages( { 1: { "environment": 1, "errored": 2, "production": 3, "s:sessions/error@none": 4, "session.status": 5, }, }, { 1: { "environment": Metadata(id=1, fetch_type=FetchType.CACHE_HIT), "errored": Metadata(id=2, fetch_type=FetchType.CACHE_HIT), "production": Metadata(id=3, fetch_type=FetchType.CACHE_HIT), "s:sessions/error@none": Metadata(id=4, fetch_type=FetchType.CACHE_HIT), "session.status": Metadata(id=5, fetch_type=FetchType.CACHE_HIT), } }, ) assert _deconstruct_messages(snuba_payloads) == [ ( { "mapping_meta": { "c": { "1": "environment", "2": "errored", "3": "production", "4": "s:sessions/error@none", "5": "session.status", }, }, "metric_id": 4, "org_id": 1, "project_id": 3, "retention_days": 90, "tags": {"1": 3, "5": 2}, "timestamp": ts, "type": "s", "use_case_id": "performance", "value": [3], }, [ ("mapping_sources", b"c"), ("metric_type", "s"), ], ) ]
c48fda09e252018a4d2b831bb84e1c68a739c085
16
test_batch.py
567
feat(metrics): Add cardinality limiter to indexer [sns-1651] (#38428) Reopen of https://github.com/getsentry/sentry/pull/38302 to avoid notification spam See #38257 and https://www.notion.so/sentry/Metrics-Dimensionality-Limiting-df010a6a6d4e467ca3c5c19230db862b#4966fb9c07fc4394b720ad161c99a096. This is just the glue code and configuration options for using the cardinality limiter in the indexer. The actual implementation is TBD. This is safe to merge because the stub implementation does not actually limit anything at all, so it should be fast enough to do synchronously for now ## rollout plan - [x] https://github.com/getsentry/sentry/pull/38446 - [x] set options to nothing in prod - [ ] merge + deploy this PR - [ ] check prod metrics: redis should not be used - [ ] https://github.com/getsentry/sentry/pull/38445 - [ ] check prod metrics: redis should still not be used - [ ] run qe tests? - [ ] get a redis cluster and configure it - [ ] run use_quota on a separate thread - [ ] set a quota - [ ] get rid of writes limiter? - [ ] stop indexing tag values Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com> Co-authored-by: Nikhar Saxena <[email protected]>
18,023
0
1,008
344
115
85,650
177
sentry
30
tests/sentry/sentry_metrics/test_batch.py
Python
73
{ "docstring": "\n Test functionality of the indexer batch related to cardinality-limiting. More concretely, assert that `IndexerBatch.filter_messages`:\n\n 1. removes the messages from the outgoing batch\n 2. prevents strings from filtered messages from being extracted & indexed\n 3. does not crash when strings from filtered messages are not passed into reconstruct_messages\n 4. still extracts strings that exist both in filtered and unfiltered messages (eg \"environment\")\n ", "language": "en", "n_whitespaces": 80, "n_words": 61, "vocab_size": 46 }
https://github.com/getsentry/sentry.git
1
linear
def linear(x): return x @keras_export("keras.activations.serialize") @tf.__internal__.dispatch.add_dispatch_support
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.activations.serialize") @tf.__internal__.dispatch.add_dispatch_support
7
activations.py
39
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,014
1
10
8
6
269,300
6
keras
7
keras/activations.py
Python
2
{ "docstring": "Linear activation function (pass-through).\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.linear(a)\n >>> b.numpy()\n array([-3., -1., 0., 1., 3.], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The input, unmodified.\n ", "language": "en", "n_whitespaces": 75, "n_words": 34, "vocab_size": 30 }
https://github.com/keras-team/keras.git
12
build_args
def build_args(self, category, command=None, generate=False): logger.debug("Build cli arguments: (category: %s, command: %s, generate: %s)", category, command, generate) command = self.command if not command else command script = f"{category}.py" pathexecscript = os.path.join(self.pathscript, script) args = [sys.executable] if generate else [sys.executable, "-u"] args.extend([pathexecscript, command]) cli_opts = get_config().cli_opts for cliopt in cli_opts.gen_cli_arguments(command): args.extend(cliopt) if command == "train" and not generate: self._get_training_session_info(cliopt) if not generate: args.append("-gui") # Indicate to Faceswap that we are running the GUI if generate: # Delimit args with spaces args = [f'"{arg}"' if " " in arg and not arg.startswith(("[", "(")) and not arg.endswith(("]", ")")) else arg for arg in args] logger.debug("Built cli arguments: (%s)", args) return args
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
17
wrapper.py
309
Typing - lib.gui.display_command
21,327
0
328
183
70
101,950
108
faceswap
26
lib/gui/wrapper.py
Python
21
{ "docstring": " Build the faceswap command and arguments list.\n\n If training, pass the model folder and name to the training\n :class:`lib.gui.analysis.Session` for the GUI.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 18 }
https://github.com/deepfakes/faceswap.git
4
length
def length(self): if self._length_cache is None: if self.axis == 0: self._length_cache = sum( o.length() for o in self.list_of_partitions_to_combine ) else: self._length_cache = self.list_of_partitions_to_combine[0].length() return self._length_cache _width_cache = None
8d1004fdbdaa05700613c8e6287641a732acf606
15
virtual_partition.py
100
FIX-#3675: Expand virtual partitioning utility (#3886) Co-authored-by: mvashishtha <[email protected]> Co-authored-by: jeffreykennethli <[email protected]> Co-authored-by: Anatoly Myachev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Naren Krishna <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Doris Lee <[email protected]> Co-authored-by: Aditya Parameswaran <[email protected]> Co-authored-by: Rehan Sohail Durrani <[email protected]> Co-authored-by: Susmit Vengurlekar <[email protected]> Signed-off-by: Devin Petersohn <[email protected]>
35,286
0
138
58
22
153,185
28
modin
8
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py
Python
9
{ "docstring": "\n Get the length of this partition.\n\n Returns\n -------\n int\n The length of the partition.\n ", "language": "en", "n_whitespaces": 61, "n_words": 14, "vocab_size": 10 }
https://github.com/modin-project/modin.git
1
test_pipenv_respects_package_index_restrictions
def test_pipenv_respects_package_index_restrictions(PipenvInstance): with PipenvInstance() as p: with open(p.pipfile_path, 'w') as f: contents = .strip().format(url=p.pypi, requirement='{version="*", index="local"}') f.write(contents) c = p.pipenv('lock') assert c.returncode == 0 assert 'requests' in p.lockfile['default'] assert 'idna' in p.lockfile['default'] assert 'certifi' in p.lockfile['default'] assert 'urllib3' in p.lockfile['default'] assert 'chardet' in p.lockfile['default'] # this is the newest version we have in our private pypi (but pypi.org has 2.27.1 at present) expected_result = {'hashes': ['sha256:63b52e3c866428a224f97cab011de738c36aec0185aa91cfacd418b5d58911d1', 'sha256:ec22d826a36ed72a7358ff3fe56cbd4ba69dd7a6718ffd450ff0e9df7a47ce6a'], 'index': 'local', 'version': '==2.19.1'} assert p.lockfile['default']['requests'] == expected_result
0788b0122e75ca5244ff4185336dc7958543f165
15
test_lock.py
263
Refactor test into a version that also check quiet mode, move to correct test file. Add test cases for pip freeze has expected output Add test for the package index restrictions.
2,991
0
239
140
54
19,475
75
pipenv
18
tests/integration/test_lock.py
Python
29
{ "docstring": "\n[[source]]\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[[source]]\nurl = \"{url}/simple\"\nverify_ssl = true\nname = \"local\"\n\n[packages]\nrequests = {requirement}\n ", "language": "en", "n_whitespaces": 30, "n_words": 24, "vocab_size": 13 }
https://github.com/pypa/pipenv.git
2
transpose
def transpose(self) -> Tuple[int, int]: if self.transpose_method is not None: # Safety: `transpose` takes an int rather than e.g. an IntEnum. # self.transpose_method is set above to be a value in # EXIF_TRANSPOSE_MAPPINGS, and that only contains correct values. with self.image: self.image = self.image.transpose(self.transpose_method) # type: ignore[arg-type] self.width, self.height = self.image.size self.transpose_method = None # We don't need EXIF any more self.image.info["exif"] = None return self.image.size
5949ab86f8db0ef3dac2063e42210030f17786fb
13
thumbnailer.py
125
Fix potential thumbnail memory leaks. (#12932)
72,299
0
191
74
53
248,471
66
synapse
10
synapse/rest/media/v1/thumbnailer.py
Python
13
{ "docstring": "Transpose the image using its EXIF Orientation tag\n\n Returns:\n A tuple containing the new image size in pixels as (width, height).\n ", "language": "en", "n_whitespaces": 46, "n_words": 21, "vocab_size": 19 }
https://github.com/matrix-org/synapse.git
1
test_displayname_is_set_avatar_is_none
def test_displayname_is_set_avatar_is_none(self) -> None: channel = self.make_request( "POST", self.url, access_token=self.admin_user_tok, content={ "user_id": self.other_user, "content": {"msgtype": "m.text", "body": "test msg"}, }, ) self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite self._check_invite_and_join_status(self.other_user, 1, 0)
37f329c9adf6ed02df15661850f999edd9e5fd93
14
test_server_notice.py
129
Fix that sending server notices fail if avatar is `None` (#13566) Indroduced in #11846.
72,899
0
159
78
32
249,408
32
synapse
14
tests/rest/admin/test_server_notice.py
Python
17
{ "docstring": "\n Tests that sending a server notices is successfully,\n if a display_name is set, avatar_url is `None` and\n \"check avatar size and mime type\" is set.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 20 }
https://github.com/matrix-org/synapse.git
3
prepare_stocks_reservations_map
def prepare_stocks_reservations_map(self, variant_ids): stocks_reservations = defaultdict(int) if is_reservation_enabled(self.context.site.settings): # type: ignore # Can't do second annotation on same queryset because it made # available_quantity annotated value incorrect thanks to how # Django's ORM builds SQLs with annotations reservations_qs = ( Stock.objects.using(self.database_connection_name) .filter(product_variant_id__in=variant_ids) .annotate_reserved_quantity() .values_list("id", "reserved_quantity") ) for stock_id, quantity_reserved in reservations_qs: stocks_reservations[stock_id] = quantity_reserved return stocks_reservations
fd5a3b24bcb941180bf2c27e6d0c34022d9e3ecd
18
dataloaders.py
124
Fix quantity available for stocks with cc warehouses (#10343) * Refactor AvailableQuantityByProductVariantIdCountryCodeAndChannelSlugLoader and fix quantity available calculation * Add more tests for variant quantity available
5,169
0
226
73
49
28,430
56
saleor
21
saleor/graphql/warehouse/dataloaders.py
Python
12
{ "docstring": "Prepare stock id to quantity reserved map for provided variant ids.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/saleor/saleor.git
3
get_current_student
def get_current_student(): email = frappe.session.user if email in ("Administrator", "Guest"): return None try: student_id = frappe.get_all("Student", {"student_email_id": email}, ["name"])[0].name return frappe.get_doc("Student", student_id) except (IndexError, frappe.DoesNotExistError): return None
494bd9ef78313436f0424b918f200dab8fc7c20b
15
utils.py
117
style: format code with black
14,069
0
18
68
22
65,955
27
erpnext
11
erpnext/education/utils.py
Python
9
{ "docstring": "Returns current student from frappe.session.user\n\n\tReturns:\n\t object: Student Document\n\t", "language": "en", "n_whitespaces": 14, "n_words": 9, "vocab_size": 9 }
https://github.com/frappe/erpnext.git
6
copyDataFiles
def copyDataFiles(): for included_datafile in getIncludedDataFiles(): # TODO: directories should be resolved to files. if ( not isinstance(included_datafile, (IncludedDataFile)) or included_datafile.needsCopy() ): if shallMakeModule(): options_logger.sysexit( ) elif not isStandaloneMode(): options_logger.sysexit( ) _handleDataFile( included_datafile, )
c2e2b6fd58f535c3419f21a84c54ead13968ddd2
15
IncludedDataFiles.py
108
Plugins: Cleanups to how data files are handled
42,880
0
242
59
29
178,965
34
Nuitka
11
nuitka/freezer/IncludedDataFiles.py
Python
19
{ "docstring": "Copy the data files needed for standalone distribution.\n\n Notes:\n This is for data files only, not DLLs or even extension modules,\n those must be registered as entry points, and would not go through\n necessary handling if provided like this.\n \\\nError, data files for modules must be done via wheels, or commercial plugins '--embed-*' options.\\\nError, data files cannot be included in accelerated mode unless using commercial plugins '--embed-*' options.", "language": "en", "n_whitespaces": 94, "n_words": 70, "vocab_size": 53 }
https://github.com/Nuitka/Nuitka.git
1
test_commands_with_invalid_settings
def test_commands_with_invalid_settings(self): args = ["startproject"] out, err = self.run_django_admin(args, settings_file="bad_settings") self.assertNoOutput(out) self.assertOutput(err, "You must provide a project name", regex=True)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
tests.py
75
Refs #33476 -- Reformatted code with Black.
51,944
0
54
43
18
207,377
19
django
10
tests/admin_scripts/tests.py
Python
5
{ "docstring": "\n Commands that don't require settings succeed if the settings file\n doesn't exist.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 11 }
https://github.com/django/django.git
5
do_block
def do_block(parser, token): # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0]) block_name = bits[1] # Keep track of the names of BlockNodes found in this template, so we can # check for duplication. try: if block_name in parser.__loaded_blocks: raise TemplateSyntaxError( "'%s' tag with name '%s' appears more than once" % (bits[0], block_name) ) parser.__loaded_blocks.append(block_name) except AttributeError: # parser.__loaded_blocks isn't a list yet parser.__loaded_blocks = [block_name] nodelist = parser.parse(("endblock",)) # This check is kept for backwards-compatibility. See #3100. endblock = parser.next_token() acceptable_endblocks = ("endblock", "endblock %s" % block_name) if endblock.contents not in acceptable_endblocks: parser.invalid_block_tag(endblock, "endblock", acceptable_endblocks) return BlockNode(block_name, nodelist)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
loader_tags.py
226
Refs #33476 -- Reformatted code with Black.
51,468
0
237
134
93
206,286
119
django
19
django/template/loader_tags.py
Python
19
{ "docstring": "\n Define a block that can be overridden by child templates.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
1
test_wrapped_bleak_scanner
async def test_wrapped_bleak_scanner(hass, enable_bluetooth): scanner = HaBleakScannerWrapper() switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand") switchbot_adv = AdvertisementData( local_name="wohand", service_uuids=[], manufacturer_data={1: b"\x01"} ) inject_advertisement(hass, switchbot_device, switchbot_adv) assert scanner.discovered_devices == [switchbot_device] assert await scanner.discover() == [switchbot_device]
1b144c0e4dd683e3b47668a89da5eb6da4ae5e08
12
test_models.py
118
Update to bleak 0.18.0 (#79008)
86,942
0
62
70
26
287,754
31
core
15
tests/components/bluetooth/test_models.py
Python
9
{ "docstring": "Test wrapped bleak scanner dispatches calls as expected.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
2
get_best_result
def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]: if self._best_task_id is not None: compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth')) compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth')) with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f: config_list = json_tricks.load(f) return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list return None
cbac2c5c0f7606aca8ccf08fbd418ffe3adfe427
15
base.py
199
[Compression] fix typehints (#4800)
24,742
0
123
128
35
112,742
43
nni
24
nni/algorithms/compression/v2/pytorch/pruning/tools/base.py
Python
15
{ "docstring": "\n Returns\n -------\n Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]\n If self._best_task_id is not None,\n return best task id, best compact model, masks on the compact model, score, config list used in this task.\n ", "language": "en", "n_whitespaces": 84, "n_words": 33, "vocab_size": 29 }
https://github.com/microsoft/nni.git
11
_parse_directive
def _parse_directive(self, directive): words = directive.split() if len(words) == 1 and words[0] not in ('include', 'exclude', 'global-include', 'global-exclude', 'recursive-include', 'recursive-exclude', 'graft', 'prune'): # no action given, let's use the default 'include' words.insert(0, 'include') action = words[0] patterns = thedir = dir_pattern = None if action in ('include', 'exclude', 'global-include', 'global-exclude'): if len(words) < 2: raise DistlibException( '%r expects <pattern1> <pattern2> ...' % action) patterns = [convert_path(word) for word in words[1:]] elif action in ('recursive-include', 'recursive-exclude'): if len(words) < 3: raise DistlibException( '%r expects <dir> <pattern1> <pattern2> ...' % action) thedir = convert_path(words[1]) patterns = [convert_path(word) for word in words[2:]] elif action in ('graft', 'prune'): if len(words) != 2: raise DistlibException( '%r expects a single <dir_pattern>' % action) dir_pattern = convert_path(words[1]) else: raise DistlibException('unknown action %r' % action) return action, patterns, thedir, dir_pattern
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
14
manifest.py
359
upd; format
12,847
0
670
211
72
62,040
132
transferlearning
14
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/manifest.py
Python
31
{ "docstring": "\n Validate a directive.\n :param directive: The directive to validate.\n :return: A tuple of action, patterns, thedir, dir_patterns\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 17 }
https://github.com/jindongwang/transferlearning.git
3
parse
def parse(self, fname, num_splits, start, end, header, **kwargs): import pyarrow as pa import pyarrow.csv as csv with open(fname, "rb") as bio: # The header line for the CSV file first_line = bio.readline() bio.seek(start) to_read = header + first_line + bio.read(end - start) table = csv.read_csv( BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1) ) chunksize = compute_chunksize(table.num_columns, num_splits) chunks = [ pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)]) for i in range(num_splits) ] return chunks + [ table.num_rows, pandas.Series( [t.to_pandas_dtype() for t in table.schema.types], index=table.schema.names, ), ]
14015de80f2387703ca7be52ff917c99ef05fc68
14
parsers.py
264
REFACTOR-#5359: Fix code scanning alert - File is not always closed (#5362) Signed-off-by: Myachev <[email protected]>
36,340
0
300
171
62
155,337
83
modin
42
modin/experimental/core/storage_formats/pyarrow/parsers.py
Python
22
{ "docstring": "\n Parse CSV file into PyArrow tables.\n\n Parameters\n ----------\n fname : str\n Name of the CSV file to parse.\n num_splits : int\n Number of partitions to split the resulted PyArrow table into.\n start : int\n Position in the specified file to start parsing from.\n end : int\n Position in the specified file to end parsing at.\n header : str\n Header line that will be interpret as the first line of the parsed CSV file.\n **kwargs : kwargs\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n list\n List with split parse results and it's metadata:\n\n - First `num_split` elements are PyArrow tables, representing the corresponding chunk.\n - Next element is the number of rows in the parsed table.\n - Last element is the pandas Series, containing the data-types for each column of the parsed table.\n ", "language": "en", "n_whitespaces": 338, "n_words": 136, "vocab_size": 85 }
https://github.com/modin-project/modin.git
15
test_request_streaming
def test_request_streaming(tctx, why, transfer_encoding, response): server = Placeholder(Server) flow = Placeholder(HTTPFlow) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular)) if why.startswith("body_size"): tctx.options.stream_large_bodies = why.replace("body_size=", "")
56eea20f6389b751d38079fb09b29237a0d2b262
11
test_http.py
102
tutils: add BytesMatching placeholder
73,598
0
43
683
18
251,118
21
mitmproxy
20
test/mitmproxy/proxy/layers/http/test_http.py
Python
104
{ "docstring": "\n Test HTTP request streaming\n\n This is a bit more contrived as we may receive server data while we are still sending the request.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 22 }
https://github.com/mitmproxy/mitmproxy.git
4
get_data
def get_data(filters): accounts = frappe.db.sql( , filters.company, as_dict=True, ) company_currency = filters.presentation_currency or erpnext.get_company_currency(filters.company) if not accounts: return None accounts, accounts_by_name, parent_children_map = filter_accounts(accounts) min_lft, max_rgt = frappe.db.sql( , (filters.company,), )[0] gl_entries_by_account = {} opening_balances = get_opening_balances(filters) # add filter inside list so that the query in financial_statements.py doesn't break if filters.project: filters.project = [filters.project] set_gl_entries_by_account( filters.company, filters.from_date, filters.to_date, min_lft, max_rgt, filters, gl_entries_by_account, ignore_closing_entries=not flt(filters.with_period_closing_entry), ) total_row = calculate_values( accounts, gl_entries_by_account, opening_balances, filters, company_currency ) accumulate_values_into_parents(accounts, accounts_by_name) data = prepare_data(accounts, filters, total_row, parent_children_map, company_currency) data = filter_out_zero_value_rows( data, parent_children_map, show_zero_values=filters.get("show_zero_values") ) return data
494bd9ef78313436f0424b918f200dab8fc7c20b
12
trial_balance.py
279
style: format code with black
13,872
0
55
187
67
65,377
93
erpnext
35
erpnext/accounts/report/trial_balance/trial_balance.py
Python
40
{ "docstring": "select name, account_number, parent_account, account_name, root_type, report_type, lft, rgt\n\n\t\tfrom `tabAccount` where company=%s order by lftselect min(lft), max(rgt) from `tabAccount`\n\t\twhere company=%s", "language": "en", "n_whitespaces": 19, "n_words": 22, "vocab_size": 18 }
https://github.com/frappe/erpnext.git
3
queryset_chunks
def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE): i = 0 while True: items = list(qs[i * chunk_size :][:chunk_size]) if not items: break yield items i += 1
d10f15e55806c6944827d801cd9c2d53f5da4186
14
update_index.py
73
Reformat with black
16,427
0
104
44
21
75,606
24
wagtail
8
wagtail/search/management/commands/update_index.py
Python
8
{ "docstring": "\n Yield a queryset in chunks of at most ``chunk_size``. The chunk yielded\n will be a list, not a queryset. Iterating over the chunks is done in a\n transaction so that the order and count of items in the queryset\n remains stable.\n ", "language": "en", "n_whitespaces": 77, "n_words": 41, "vocab_size": 31 }
https://github.com/wagtail/wagtail.git
3
prepare_sql_script
def prepare_sql_script(self, sql): return [ sqlparse.format(statement, strip_comments=True) for statement in sqlparse.split(sql) if statement ]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
operations.py
50
Refs #33476 -- Reformatted code with Black.
50,934
0
68
32
13
204,857
14
django
8
django/db/backends/base/operations.py
Python
6
{ "docstring": "\n Take an SQL script that may contain multiple lines and return a list\n of statements to feed to successive cursor.execute() calls.\n\n Since few databases are able to process raw SQL scripts in a single\n cursor.execute() call and PEP 249 doesn't talk about this use case,\n the default implementation is conservative.\n ", "language": "en", "n_whitespaces": 93, "n_words": 50, "vocab_size": 44 }
https://github.com/django/django.git
6
angle_mod
def angle_mod(x, zero_2_2pi=False, degree=False): if isinstance(x, float): is_float = True else: is_float = False x = np.asarray(x).flatten() if degree: x = np.deg2rad(x) if zero_2_2pi: mod_angle = x % (2 * np.pi) else: mod_angle = (x + np.pi) % (2 * np.pi) - np.pi if degree: mod_angle = np.rad2deg(mod_angle) if is_float: return mod_angle.item() else: return mod_angle
32b545fe7c35b57f280cd9d570f62839886f2e4b
14
angle.py
185
Enhance dubins path docs (#664) * Engance dubins path docs * Update dubins_path.rst * fix doc artifact link in CI * wip * wip * wip * Update dubins_path.rst * wip * wip * wip * wip * wip
2,936
0
141
114
30
19,322
55
PythonRobotics
15
utils/angle.py
Python
18
{ "docstring": "\n Angle modulo operation\n Default angle modulo range is [-pi, pi)\n\n Parameters\n ----------\n x : float or array_like\n A angle or an array of angles. This array is flattened for\n the calculation. When an angle is provided, a float angle is returned.\n zero_2_2pi : bool, optional\n Change angle modulo range to [0, 2pi)\n Default is False.\n degree : bool, optional\n If True, then the given angles are assumed to be in degrees.\n Default is False.\n\n Returns\n -------\n ret : float or ndarray\n an angle or an array of modulated angle.\n\n Examples\n --------\n >>> angle_mod(-4.0)\n 2.28318531\n\n >>> angle_mod([-4.0])\n np.array(2.28318531)\n\n >>> angle_mod([-150.0, 190.0, 350], degree=True)\n array([-150., -170., -10.])\n\n >>> angle_mod(-60.0, zero_2_2pi=True, degree=True)\n array([300.])\n\n ", "language": "en", "n_whitespaces": 224, "n_words": 110, "vocab_size": 72 }
https://github.com/AtsushiSakai/PythonRobotics.git
3
__ror__
def __ror__(self, other): if isinstance(other, str_type): other = self._literalStringClass(other) if not isinstance(other, ParserElement): raise TypeError( "Cannot combine element of type {} with ParserElement".format( type(other).__name__ ) ) return other | self
f3166e673fe8d40277b804d35d77dcdb760fc3b3
14
core.py
86
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,433
0
140
52
26
20,578
30
pipenv
11
pipenv/patched/notpip/_vendor/pyparsing/core.py
Python
10
{ "docstring": "\n Implementation of ``|`` operator when left operand is not a :class:`ParserElement`\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/pypa/pipenv.git
2
get_tables
def get_tables(self) -> Response: response = None log.info("%s: calling 'get_tables'", self.__class__.__name__) try: r = self._do("/get_tables") r = self._convert_response(r.json()) response = Response(data_frame=r.get("data_frame", None), resp_type=r.get("resp_type"), error_code=r.get("error_code", 0), error_message=r.get("error_message", None), query=r.get("query")) log.info("%s: db service has replied. error_code - %s", self.__class__.__name__, response.error_code) except Exception as e: response = Response(error_message=str(e), error_code=1, resp_type=RESPONSE_TYPE.ERROR) log.error("call to db service has finished with an error: %s", traceback.format_exc()) return response
e1d031d0375f0a3575fa2c4607fc0f5d5bc8044d
14
db_client.py
258
Move DB handlers in a separate service (#3063) * Move DB handlers in a separate service * add docker-compose initial
25,846
0
354
156
46
116,838
60
mindsdb
26
mindsdb/integrations/handlers_client/db_client.py
Python
24
{ "docstring": "List all tabels in the database without the system data.\n\n Returns:\n A list of all records in the database in case of success\n An error message and error code if case of fail\n ", "language": "en", "n_whitespaces": 69, "n_words": 33, "vocab_size": 23 }
https://github.com/mindsdb/mindsdb.git
1
close_tilt_position
def close_tilt_position(self) -> PowerviewShadeMove: return PowerviewShadeMove(self._shade.close_position_tilt, {})
3ab294e8efc00c9f3cda2993318bb582ba675f8c
9
cover.py
34
Powerview refactor prep for all shade types (#79862)
87,917
0
21
20
7
288,765
7
core
5
homeassistant/components/hunterdouglas_powerview/cover.py
Python
3
{ "docstring": "Return the close tilt position and required additional positions.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
7
append
def append(self, key, _item): if not isinstance(_item, Item): _item = item(_item) self._value.append(key, _item) if isinstance(key, Key): key = next(iter(key)).key _item = self._value[key] if key is not None: dict.__setitem__(self, key, _item) m = re.match(r"(?s)^[^ ]*([ ]+).*$", self._trivia.indent) if not m: return self indent = m.group(1) if not isinstance(_item, Whitespace): m = re.match("(?s)^([^ ]*)(.*)$", _item.trivia.indent) if not m: _item.trivia.indent = indent else: _item.trivia.indent = m.group(1) + indent + m.group(2) return self
8faa74cdc9da20cfdcc69f5ec29b91112c95b4c9
15
items.py
262
Update tomlkit==0.9.2 Used: python -m invoke vendoring.update --package=tomlkit
4,037
0
257
166
39
21,764
69
pipenv
21
pipenv/vendor/tomlkit/items.py
Python
20
{ "docstring": "\n Appends a (key, item) to the table.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/pypa/pipenv.git
1
get_preference
def get_preference(self, *args, **kwargs): # type: (t.Any, t.Any) -> t.Union[float, int] raise NotImplementedError
143e7fb45e7b916fa973613000e97ee889f5666c
6
providers.py
25
ansible-galaxy - support resolvelib versions >= 0.5.3, < 0.9.0 (#77649) * ansible-galaxy - support resolvelib versions >= 0.5.3, <= 0.8.1 Test incompatibilities are removed for resolvelib >= 0.6.0 Test against the latest 0.8.x version and fix requirements * Fix tests - use a venv for testing the range of resolvelib versions * Update temporary hardcoded fallback for ansible-test * Update hardcoded upperbound for sanity tests * Make error check more flexible
78,911
0
34
14
13
267,451
13
ansible
5
lib/ansible/galaxy/dependency_resolution/providers.py
Python
2
{ "docstring": "Return sort key function return value for given requirement.\n\n This result should be based on preference that is defined as\n \"I think this requirement should be resolved first\".\n The lower the return value is, the more preferred this\n group of arguments is.\n\n resolvelib >=0.5.3, <0.7.0\n\n :param resolution: Currently pinned candidate, or ``None``.\n\n :param candidates: A list of possible candidates.\n\n :param information: A list of requirement information.\n\n Each ``information`` instance is a named tuple with two entries:\n\n * ``requirement`` specifies a requirement contributing to\n the current candidate list\n\n * ``parent`` specifies the candidate that provides\n (dependend on) the requirement, or `None`\n to indicate a root requirement.\n\n resolvelib >=0.7.0, < 0.8.0\n\n :param identifier: The value returned by ``identify()``.\n\n :param resolutions: Mapping of identifier, candidate pairs.\n\n :param candidates: Possible candidates for the identifer.\n Mapping of identifier, list of candidate pairs.\n\n :param information: Requirement information of each package.\n Mapping of identifier, list of named tuple pairs.\n The named tuples have the entries ``requirement`` and ``parent``.\n\n resolvelib >=0.8.0, <= 0.8.1\n\n :param identifier: The value returned by ``identify()``.\n\n :param resolutions: Mapping of identifier, candidate pairs.\n\n :param candidates: Possible candidates for the identifer.\n Mapping of identifier, list of candidate pairs.\n\n :param information: Requirement information of each package.\n Mapping of identifier, list of named tuple pairs.\n The named tuples have the entries ``requirement`` and ``parent``.\n\n :param backtrack_causes: Sequence of requirement information that were\n the requirements that caused the resolver to most recently backtrack.\n\n The preference could depend on a various of issues, including\n (not necessarily in this order):\n\n * Is this package pinned in the current resolution result?\n\n * How relaxed is the requirement? Stricter ones should\n probably be worked on first? (I don't know, actually.)\n\n * How many possibilities are there to satisfy this\n requirement? Those with few left should likely be worked on\n first, I guess?\n\n * Are there any known conflicts for this requirement?\n We should probably work on those with the most\n known conflicts.\n\n A sortable value should be returned (this will be used as the\n `key` parameter of the built-in sorting function). The smaller\n the value is, the more preferred this requirement is (i.e. the\n sorting function is called with ``reverse=False``).\n ", "language": "en", "n_whitespaces": 766, "n_words": 358, "vocab_size": 170 }
https://github.com/ansible/ansible.git
30
plot_wireframe
def plot_wireframe(self, X, Y, Z, **kwargs): had_data = self.has_data() if Z.ndim != 2: raise ValueError("Argument Z must be 2-dimensional.") # FIXME: Support masked arrays X, Y, Z = np.broadcast_arrays(X, Y, Z) rows, cols = Z.shape has_stride = 'rstride' in kwargs or 'cstride' in kwargs has_count = 'rcount' in kwargs or 'ccount' in kwargs if has_stride and has_count: raise ValueError("Cannot specify both stride and count arguments") rstride = kwargs.pop('rstride', 1) cstride = kwargs.pop('cstride', 1) rcount = kwargs.pop('rcount', 50) ccount = kwargs.pop('ccount', 50) if rcParams['_internal.classic_mode']: # Strides have priority over counts in classic mode. # So, only compute strides from counts # if counts were explicitly given if has_count: rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0 cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0 else: # If the strides are provided then it has priority. # Otherwise, compute the strides from the counts. if not has_stride: rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0 cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0 # We want two sets of lines, one running along the "rows" of # Z and another set of lines running along the "columns" of Z. # This transpose will make it easy to obtain the columns. tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z) if rstride: rii = list(range(0, rows, rstride)) # Add the last index only if needed if rows > 0 and rii[-1] != (rows - 1): rii += [rows-1] else: rii = [] if cstride: cii = list(range(0, cols, cstride)) # Add the last index only if needed if cols > 0 and cii[-1] != (cols - 1): cii += [cols-1] else: cii = [] if rstride == 0 and cstride == 0: raise ValueError("Either rstride or cstride must be non zero") # If the inputs were empty, then just # reset everything. if Z.size == 0: rii = [] cii = [] xlines = [X[i] for i in rii] ylines = [Y[i] for i in rii] zlines = [Z[i] for i in rii] txlines = [tX[i] for i in cii] tylines = [tY[i] for i in cii] tzlines = [tZ[i] for i in cii] lines = ([list(zip(xl, yl, zl)) for xl, yl, zl in zip(xlines, ylines, zlines)] + [list(zip(xl, yl, zl)) for xl, yl, zl in zip(txlines, tylines, tzlines)]) linec = art3d.Line3DCollection(lines, **kwargs) self.add_collection(linec) self.auto_scale_xyz(X, Y, Z, had_data) return linec
6ef6b37fc2113c041f7d2643d70b553ec335d597
19
axes3d.py
846
Remove *args deprecations
22,518
0
1,017
539
193
106,941
393
matplotlib
52
lib/mpl_toolkits/mplot3d/axes3d.py
Python
54
{ "docstring": "\n Plot a 3D wireframe.\n\n .. note::\n\n The *rcount* and *ccount* kwargs, which both default to 50,\n determine the maximum number of samples used in each direction. If\n the input data is larger, it will be downsampled (by slicing) to\n these numbers of points.\n\n Parameters\n ----------\n X, Y, Z : 2D arrays\n Data values.\n\n rcount, ccount : int\n Maximum number of samples used in each direction. If the input\n data is larger, it will be downsampled (by slicing) to these\n numbers of points. Setting a count to zero causes the data to be\n not sampled in the corresponding direction, producing a 3D line\n plot rather than a wireframe plot. Defaults to 50.\n\n rstride, cstride : int\n Downsampling stride in each direction. These arguments are\n mutually exclusive with *rcount* and *ccount*. If only one of\n *rstride* or *cstride* is set, the other defaults to 1. Setting a\n stride to zero causes the data to be not sampled in the\n corresponding direction, producing a 3D line plot rather than a\n wireframe plot.\n\n 'classic' mode uses a default of ``rstride = cstride = 1`` instead\n of the new default of ``rcount = ccount = 50``.\n\n **kwargs\n Other arguments are forwarded to `.Line3DCollection`.\n ", "language": "en", "n_whitespaces": 474, "n_words": 198, "vocab_size": 105 }
https://github.com/matplotlib/matplotlib.git
1
test_source_decode_2
def test_source_decode_2(): import_str, op_str, op_obj = source_decode("sklearn.linear_model.LogisticReg") from sklearn.linear_model import LogisticRegression assert import_str == "sklearn.linear_model" assert op_str == "LogisticReg" assert op_obj is None
388616b6247ca4ea8de4e2f340d6206aee523541
9
tpot_tests.py
60
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,489
0
41
33
19
181,702
23
tpot
8
tests/tpot_tests.py
Python
6
{ "docstring": "Assert that the source_decode return None when sourcecode is not available.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/EpistasisLab/tpot.git
1
save
def save(self, loc, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self
762db897b52d16ab2f164d5103df4cc26c1d0503
11
plot.py
66
Add rudimentary themeing support (#2929) * WIP Plot.theme * Add default values for theme to match set_theme() * Depend on matplotib style defaults and update rcParams more selectively * Fix lines test * Improve test coverage
7,483
0
60
38
20
42,087
21
seaborn
8
seaborn/_core/plot.py
Python
16
{ "docstring": "\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n ", "language": "en", "n_whitespaces": 119, "n_words": 43, "vocab_size": 32 }
https://github.com/mwaskom/seaborn.git
1
test_update_single_object_with_values
def test_update_single_object_with_values(self): site2 = Site.objects.get(name='Site 2') original_cfvs = {**site2.custom_field_data} data = { 'custom_fields': { 'text_field': 'ABCD', 'number_field': 1234, }, } url = reverse('dcim-api:site-detail', kwargs={'pk': site2.pk}) self.add_permissions('dcim.change_site') response = self.client.patch(url, data, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_200_OK) # Validate response data response_cf = response.data['custom_fields'] self.assertEqual(response_cf['text_field'], data['custom_fields']['text_field']) self.assertEqual(response_cf['number_field'], data['custom_fields']['number_field']) self.assertEqual(response_cf['longtext_field'], original_cfvs['longtext_field']) self.assertEqual(response_cf['boolean_field'], original_cfvs['boolean_field']) self.assertEqual(response_cf['date_field'], original_cfvs['date_field']) self.assertEqual(response_cf['url_field'], original_cfvs['url_field']) self.assertEqual(response_cf['json_field'], original_cfvs['json_field']) self.assertEqual(response_cf['choice_field'], original_cfvs['choice_field']) # Validate database data site2.refresh_from_db() self.assertEqual(site2.custom_field_data['text_field'], data['custom_fields']['text_field']) self.assertEqual(site2.custom_field_data['number_field'], data['custom_fields']['number_field']) self.assertEqual(site2.custom_field_data['longtext_field'], original_cfvs['longtext_field']) self.assertEqual(site2.custom_field_data['boolean_field'], original_cfvs['boolean_field']) self.assertEqual(site2.custom_field_data['date_field'], original_cfvs['date_field']) self.assertEqual(site2.custom_field_data['url_field'], original_cfvs['url_field']) self.assertEqual(site2.custom_field_data['json_field'], original_cfvs['json_field']) self.assertEqual(site2.custom_field_data['choice_field'], original_cfvs['choice_field'])
fa1e28e860c4bdb3e585a968bd248a2ac666e1f6
12
test_customfields.py
623
Initial work on #7006
77,617
0
333
361
59
264,131
78
netbox
26
netbox/extras/tests/test_customfields.py
Python
31
{ "docstring": "\n Update an object with existing custom field values. Ensure that only the updated custom field values are\n modified.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
https://github.com/netbox-community/netbox.git
2
__virtual__
def __virtual__(): if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__): return __virtualname__ else: err_msg = "NAPALM is not installed." log.error("Unable to load %s beacon: %s", __virtualname__, err_msg) return False, err_msg
4e3632254fb73210ce3e1954ec507473433018b8
11
napalm_beacon.py
71
Align enhanced logging accross beacons
53,868
0
63
42
23
215,170
26
salt
11
salt/beacons/napalm_beacon.py
Python
7
{ "docstring": "\n This beacon can only work when running under a regular or a proxy minion, managed through napalm.\n ", "language": "en", "n_whitespaces": 24, "n_words": 17, "vocab_size": 16 }
https://github.com/saltstack/salt.git
5
get_preprocess_function
def get_preprocess_function(self, field, value, export_format): # Try to find a field specific function and return it format_dict = self.custom_field_preprocess.get(field, {}) if export_format in format_dict: return format_dict[export_format] # Otherwise check for a value class specific function for value_classes, format_dict in self.custom_value_preprocess.items(): if isinstance(value, value_classes) and export_format in format_dict: return format_dict[export_format] # Finally resort to force_str to prevent encoding errors return force_str
d10f15e55806c6944827d801cd9c2d53f5da4186
10
mixins.py
105
Reformat with black
15,891
0
153
67
40
72,424
60
wagtail
13
wagtail/admin/views/mixins.py
Python
8
{ "docstring": "Returns the preprocessing function for a given field name, field value, and export format", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/wagtail/wagtail.git
3
remove_experiment_folder
def remove_experiment_folder(experiment_path): fs = fsspec.get_mapper(experiment_path).fs checkpoint_files = fs.glob(experiment_path + "/*.pth") if not checkpoint_files: if fs.exists(experiment_path): fs.rm(experiment_path, recursive=True) print(" ! Run is removed from {}".format(experiment_path)) else: print(" ! Run is kept in {}".format(experiment_path))
72d85e53c98b908345bbff70f7cfba2174e883ce
14
generic_utils.py
120
Update model file extension (#1422) * Update model file ext to ```.pth``` * Update docs * Rename more * Find model files
77,203
0
83
68
25
262,392
32
TTS
12
TTS/utils/generic_utils.py
Python
9
{ "docstring": "Check folder if there is a checkpoint, otherwise remove the folder", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/coqui-ai/TTS.git
6
full_info
def full_info(self): retval = "\n============ System Information ============\n" sys_info = {"os_platform": self._system["platform"], "os_machine": self._system["machine"], "os_release": self._system["release"], "py_conda_version": self._conda_version, "py_implementation": self._python["implementation"], "py_version": self._python["version"], "py_command": self._fs_command, "py_virtual_env": self._is_virtual_env, "sys_cores": self._system["cpu_count"], "sys_processor": self._system["processor"], "sys_ram": self._format_ram(), "encoding": self._encoding, "git_branch": self._git_branch, "git_commits": self._git_commits, "gpu_cuda": self._cuda_version, "gpu_cudnn": self._cudnn_version, "gpu_driver": self._gpu["driver"], "gpu_devices": ", ".join([f"GPU_{idx}: {device}" for idx, device in enumerate(self._gpu["devices"])]), "gpu_vram": ", ".join([f"GPU_{idx}: {int(vram)}MB" for idx, vram in enumerate(self._gpu["vram"])]), "gpu_devices_active": ", ".join([f"GPU_{idx}" for idx in self._gpu["devices_active"]])} for key in sorted(sys_info.keys()): retval += (f"{key + ':':<20} {sys_info[key]}\n") retval += "\n=============== Pip Packages ===============\n" retval += self._installed_pip if self._is_conda: retval += "\n\n============== Conda Packages ==============\n" retval += self._installed_conda retval += self._state_file retval += "\n\n================= Configs ==================" retval += self._configs return retval
48c886b3dce3d3117ad16edaf35c8abd28dc51f5
16
sysinfo.py
519
Allow decoding errors
21,438
0
722
267
84
102,073
112
faceswap
30
lib/sysinfo.py
Python
36
{ "docstring": " Obtain extensive system information stats, formatted into a human readable format.\n\n Returns\n -------\n str\n The system information for the currently running system, formatted for output to\n console or a log file.\n ", "language": "en", "n_whitespaces": 82, "n_words": 31, "vocab_size": 26 }
https://github.com/deepfakes/faceswap.git
4
_get_handles
def _get_handles(self): if self._is_plaidml: self._handles = self._plaid.devices elif IS_MACOS: self._handles = metal.get_handles() else: self._handles = [pynvml.nvmlDeviceGetHandleByIndex(i) for i in range(self._device_count)] self._log("debug", "GPU Handles found: {}".format(len(self._handles)))
444762114c1b1ad2e72c871e825373bd74880aba
14
gpu_stats.py
123
Initial somewhat working version
19,776
0
121
73
21
100,266
25
faceswap
17
lib/gpu_stats.py
Python
9
{ "docstring": " Obtain the internal handle identifiers for the system GPUs and allocate to\n :attr:`_handles`. ", "language": "en", "n_whitespaces": 21, "n_words": 13, "vocab_size": 12 }
https://github.com/deepfakes/faceswap.git
1
test_boost_mode
async def test_boost_mode(hass, aioclient_mock, mock_deconz_websocket): data = { "sensors": { "0": { "config": { "battery": 58, "heatsetpoint": 2200, "locked": False, "mode": "heat", "offset": -200, "on": True, "preset": "manual", "reachable": True, "schedule": {}, "schedule_on": False, "setvalve": False, "windowopen_set": False, }, "ep": 1, "etag": "404c15db68c318ebe7832ce5aa3d1e30", "lastannounced": "2022-08-31T03:00:59Z", "lastseen": "2022-09-19T11:58Z", "manufacturername": "_TZE200_b6wax7g0", "modelid": "TS0601", "name": "Thermostat", "state": { "lastupdated": "2022-09-19T11:58:24.204", "lowbattery": False, "on": False, "temperature": 2200, "valve": 0, }, "type": "ZHAThermostat", "uniqueid": "84:fd:27:ff:fe:8a:eb:89-01-0201", } } } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(hass, aioclient_mock) assert len(hass.states.async_all()) == 3 climate_thermostat = hass.states.get("climate.thermostat") assert climate_thermostat.state == HVACMode.HEAT assert climate_thermostat.attributes["preset_mode"] is DECONZ_PRESET_MANUAL assert climate_thermostat.attributes["hvac_action"] is HVACAction.IDLE # Event signals thermostat preset boost and valve 100 (real data) event_changed_sensor = { "t": "event", "e": "changed", "r": "sensors", "id": "0", "config": {"preset": "boost"}, "state": {"valve": 100}, } await mock_deconz_websocket(data=event_changed_sensor) await hass.async_block_till_done() climate_thermostat = hass.states.get("climate.thermostat") assert climate_thermostat.attributes["preset_mode"] is PRESET_BOOST assert climate_thermostat.attributes["hvac_action"] is HVACAction.HEATING # Verify service calls mock_deconz_put_request(aioclient_mock, config_entry.data, "/sensors/0/config")
7a6897c7578dffd6b67f57747ebd81b67b153e01
15
test_climate.py
549
Add deconz current hvac operation to thermostate based on "state" (#59989) * deconz - add current hvac operation to thermostate based on "state" * deconz - extend current hvac operation to thermostate based on "state" and "mode" * Add tests for current hvac action * Add boost mode as special case * format using Black * sort imports * Add test for device with mode none and state none * Update homeassistant/components/deconz/climate.py Co-authored-by: Robert Svensson <[email protected]> * Fix test_climate.py test_no_mode_no_state * Add test for boost mode Co-authored-by: Robert Svensson <[email protected]>
106,894
0
811
297
112
308,133
151
core
27
tests/components/deconz/test_climate.py
Python
58
{ "docstring": "Test that a climate device with boost mode and different state works.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git
4
keyPressEvent
def keyPressEvent(self, e): if e.key() == Qt.Key.Key_Insert and e.modifiers() == Qt.KeyboardModifier.ShiftModifier: try: text = utils.get_clipboard(selection=True, fallback=True) except utils.ClipboardError: # pragma: no cover e.ignore() else: e.accept() self.insert(text) return super().keyPressEvent(e)
0877fb0d78635692e481c8bde224fac5ad0dd430
13
prompt.py
137
Run scripts/dev/rewrite_enums.py
117,615
0
154
81
27
321,260
28
qutebrowser
20
qutebrowser/mainwindow/prompt.py
Python
11
{ "docstring": "Override keyPressEvent to paste primary selection on Shift + Ins.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/qutebrowser/qutebrowser.git
1
test_get_pdu_returns_nothing_when_event_does_not_exist
def test_get_pdu_returns_nothing_when_event_does_not_exist(self): remote_pdu = self.get_success( self.hs.get_federation_client().get_pdu( ["yet.another.server"], "event_should_not_exist", RoomVersions.V9, ) ) self.assertEqual(remote_pdu, None)
0f971ca68e808dd16f53f5594a6b33b7bddcc9a9
13
test_federation_client.py
71
Update `get_pdu` to return the original, pristine `EventBase` (#13320) Update `get_pdu` to return the untouched, pristine `EventBase` as it was originally seen over federation (no metadata added). Previously, we returned the same `event` reference that we stored in the cache which downstream code modified in place and added metadata like setting it as an `outlier` and essentially poisoned our cache. Now we always return a copy of the `event` so the original can stay pristine in our cache and re-used for the next cache call. Split out from https://github.com/matrix-org/synapse/pull/13205 As discussed at: - https://github.com/matrix-org/synapse/pull/13205#discussion_r918365746 - https://github.com/matrix-org/synapse/pull/13205#discussion_r918366125 Related to https://github.com/matrix-org/synapse/issues/12584. This PR doesn't fix that issue because it hits [`get_event` which exists from the local database before it tries to `get_pdu`](https://github.com/matrix-org/synapse/blob/7864f33e286dec22368dc0b11c06eebb1462a51e/synapse/federation/federation_client.py#L581-L594).
72,511
0
108
42
12
248,920
13
synapse
10
tests/federation/test_federation_client.py
Python
9
{ "docstring": "No event should be returned when the event does not exist", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
1
test_form
def test_form(self): form = self.EventPageForm( instance=self.event_page, for_user=self.commenting_user ) self.assertIn("comments", form.formsets) comments_formset = form.formsets["comments"] self.assertEqual(len(comments_formset.forms), 1) self.assertEqual(comments_formset.forms[0].for_user, self.commenting_user) replies_formset = comments_formset.forms[0].formsets["replies"] self.assertEqual(len(replies_formset.forms), 2) self.assertEqual(replies_formset.forms[0].for_user, self.commenting_user)
5fe2554934e5f9d6f27cf2bd4333b8ebe24ae592
10
test_edit_handlers.py
182
Refactor commenting forms to receive the user object on instantiation Use the inherit_kwargs feature added to django-modelcluster in https://github.com/wagtail/django-modelcluster/pull/156 / https://github.com/wagtail/django-modelcluster/pull/157 to pass the for_user attribute from the root form to the comments formset; this means we don't have to construct a form class on the fly with the user embedded, and so don't need to bind to a request object before retrieving the form definition.
16,567
0
105
115
21
76,676
24
wagtail
15
wagtail/admin/tests/test_edit_handlers.py
Python
11
{ "docstring": "\n Check that the form has the comments/replies formsets, and that the\n user has been set on each CommentForm/CommentReplyForm instance\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 15 }
https://github.com/wagtail/wagtail.git
20
get_pydot_graph
def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None): pydot_graph = pydot.Dot(caffe_net.name if caffe_net.name else 'Net', graph_type='digraph', rankdir=rankdir) pydot_nodes = {} pydot_edges = [] for layer in caffe_net.layer: if phase is not None: included = False if len(layer.include) == 0: included = True if len(layer.include) > 0 and len(layer.exclude) > 0: raise ValueError('layer ' + layer.name + ' has both include ' 'and exclude specified.') for layer_phase in layer.include: included = included or layer_phase.phase == phase for layer_phase in layer.exclude: included = included and not layer_phase.phase == phase if not included: continue node_label = get_layer_label(layer, rankdir) node_name = "%s_%s" % (layer.name, layer.type) if (len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]): # We have an in-place neuron layer. pydot_nodes[node_name] = pydot.Node(node_label, **NEURON_LAYER_STYLE) else: layer_style = LAYER_STYLE_DEFAULT layer_style['fillcolor'] = choose_color_by_layertype(layer.type) pydot_nodes[node_name] = pydot.Node(node_label, **layer_style) for bottom_blob in layer.bottom: pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob, **BLOB_STYLE) edge_label = '""' pydot_edges.append({'src': bottom_blob + '_blob', 'dst': node_name, 'label': edge_label}) for top_blob in layer.top: pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob)) if label_edges: edge_label = get_edge_label(layer) else: edge_label = '""' pydot_edges.append({'src': node_name, 'dst': top_blob + '_blob', 'label': edge_label}) # Now, add the nodes and edges to the graph. for node in pydot_nodes.values(): pydot_graph.add_node(node) for edge in pydot_edges: pydot_graph.add_edge( pydot.Edge(pydot_nodes[edge['src']], pydot_nodes[edge['dst']], label=edge['label'])) return pydot_graph
cc4d0564756ca067516f71718a3d135996525909
16
draw.py
667
Balanced joint maximum mean discrepancy for deep transfer learning
12,040
0
946
406
121
60,247
208
transferlearning
43
code/deep/BJMMD/caffe/python/caffe/draw.py
Python
54
{ "docstring": "Create a data structure which represents the `caffe_net`.\n\n Parameters\n ----------\n caffe_net : object\n rankdir : {'LR', 'TB', 'BT'}\n Direction of graph layout.\n label_edges : boolean, optional\n Label the edges (default is True).\n phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional\n Include layers from this network phase. If None, include all layers.\n (the default is None)\n\n Returns\n -------\n pydot graph object\n ", "language": "en", "n_whitespaces": 117, "n_words": 58, "vocab_size": 50 }
https://github.com/jindongwang/transferlearning.git
10
_lstsq
def _lstsq(a, b, rcond, *, numpy_resid=False): # TODO: add lstsq to lax_linalg and implement this function via those wrappers. # TODO: add custom jvp rule for more robust lstsq differentiation a, b = _promote_dtypes_inexact(a, b) if a.shape[0] != b.shape[0]: raise ValueError("Leading dimensions of input arrays must match") b_orig_ndim = b.ndim if b_orig_ndim == 1: b = b[:, None] if a.ndim != 2: raise TypeError( f"{a.ndim}-dimensional array given. Array must be two-dimensional") if b.ndim != 2: raise TypeError( f"{b.ndim}-dimensional array given. Array must be one or two-dimensional") m, n = a.shape dtype = a.dtype if rcond is None: rcond = jnp.finfo(dtype).eps * max(n, m) else: rcond = jnp.where(rcond < 0, jnp.finfo(dtype).eps, rcond) u, s, vt = svd(a, full_matrices=False) mask = s >= rcond * s[0] rank = mask.sum() safe_s = jnp.where(mask, s, 1) s_inv = jnp.where(mask, 1 / safe_s, 0)[:, jnp.newaxis] uTb = jnp.matmul(u.conj().T, b, precision=lax.Precision.HIGHEST) x = jnp.matmul(vt.conj().T, s_inv * uTb, precision=lax.Precision.HIGHEST) # Numpy returns empty residuals in some cases. To allow compilation, we # default to returning full residuals in all cases. if numpy_resid and (rank < n or m <= n): resid = jnp.asarray([]) else: b_estimate = jnp.matmul(a, x, precision=lax.Precision.HIGHEST) resid = norm(b - b_estimate, axis=0) ** 2 if b_orig_ndim == 1: x = x.ravel() return x, resid, rank, s _jit_lstsq = jit(partial(_lstsq, numpy_resid=False)) @_wraps(np.linalg.lstsq, lax_description=textwrap.dedent())
bb2682db6df5b9388ce0b161e3f449624238718b
@_wraps(np.linalg.lstsq, lax_description=textwrap.dedent("""\ It has two important differences: 1. In `numpy.linalg.lstsq`, the default `rcond` is `-1`, and warns that in the future the default will be `None`. Here, the default rcond is `None`. 2. In `np.linalg.lstsq` the returned residuals are empty for low-rank or over-determined solutions. Here, the residuals are returned in all cases, to make the function compatible with jit. The non-jit compatible numpy behavior can be recovered by passing numpy_resid=True. The lstsq function does not currently have a custom JVP rule, so the gradient is poorly behaved for some inputs, particularly for low-rank `a`. """))
14
linalg.py
580
remove numpy.linalg._promote_arg_dtypes in favor of numpy.util._promote_dtypes_inexact
26,804
1
280
336
145
120,239
218
jax
55
jax/_src/numpy/linalg.py
Python
34
{ "docstring": "\\\n It has two important differences:\n\n 1. In `numpy.linalg.lstsq`, the default `rcond` is `-1`, and warns that in the future\n the default will be `None`. Here, the default rcond is `None`.\n 2. In `np.linalg.lstsq` the returned residuals are empty for low-rank or over-determined\n solutions. Here, the residuals are returned in all cases, to make the function\n compatible with jit. The non-jit compatible numpy behavior can be recovered by\n passing numpy_resid=True.\n\n The lstsq function does not currently have a custom JVP rule, so the gradient is\n poorly behaved for some inputs, particularly for low-rank `a`.\n ", "language": "en", "n_whitespaces": 136, "n_words": 94, "vocab_size": 69 }
https://github.com/google/jax.git
2
_add_results
def _add_results(self, results, trial_id): for result in results: self.logger.debug("Appending result: %s" % result) result["trial_id"] = trial_id result_record = ResultRecord.from_json(result) result_record.save()
d2f0c3b2f64b41f6541f6521e98cf3a37577c016
11
collector.py
75
Clean up docstyle in data, ml, and tune packages (#25188)
31,929
0
78
44
19
140,345
20
ray
11
python/ray/tune/automlboard/backend/collector.py
Python
6
{ "docstring": "Add a list of results into db.\n\n Args:\n results: A list of json results.\n trial_id: Id of the trial.\n ", "language": "en", "n_whitespaces": 55, "n_words": 19, "vocab_size": 16 }
https://github.com/ray-project/ray.git
1
test_login_session_without_hash_session_key
def test_login_session_without_hash_session_key(self): user = User.objects.get(username="testclient") engine = import_module(settings.SESSION_ENGINE) session = engine.SessionStore() session[SESSION_KEY] = user.id session.save() original_session_key = session.session_key self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key self.login() self.assertNotEqual(original_session_key, self.client.session.session_key)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
test_views.py
136
Refs #33476 -- Reformatted code with Black.
49,961
0
94
82
18
201,522
24
django
23
tests/auth_tests/test_views.py
Python
10
{ "docstring": "\n Session without django.contrib.auth.HASH_SESSION_KEY should login\n without an exception.\n ", "language": "en", "n_whitespaces": 30, "n_words": 8, "vocab_size": 7 }
https://github.com/django/django.git
1
house_graph
def house_graph(create_using=None): description = [ "adjacencylist", "House Graph", 5, [[2, 3], [1, 4], [1, 4, 5], [2, 3, 5], [3, 4]], ] G = make_small_undirected_graph(description, create_using) return G
dec723f072eb997a497a159dbe8674cd39999ee9
9
small.py
90
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
41,724
0
71
64
24
176,154
28
networkx
5
networkx/generators/small.py
Python
9
{ "docstring": "\n Returns the House graph (square with triangle on top)\n\n The house graph is a simple undirected graph with\n 5 nodes and 6 edges [1]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n House graph in the form of a square with a triangle on top\n\n References\n ----------\n .. [1] https://mathworld.wolfram.com/HouseGraph.html\n ", "language": "en", "n_whitespaces": 121, "n_words": 68, "vocab_size": 51 }
https://github.com/networkx/networkx.git
10
getclosurevars
def getclosurevars(func): if ismethod(func): func = func.__func__ if not isfunction(func): raise TypeError("{!r} is not a Python function".format(func)) code = func.__code__ # Nonlocal references are named in co_freevars and resolved # by looking them up in __closure__ by positional index if func.__closure__ is None: nonlocal_vars = {} else: nonlocal_vars = { var : cell.cell_contents for var, cell in zip(code.co_freevars, func.__closure__) } # Global and builtin references are named in co_names and resolved # by looking them up in __globals__ or __builtins__ global_ns = func.__globals__ builtin_ns = global_ns.get("__builtins__", builtins.__dict__) if ismodule(builtin_ns): builtin_ns = builtin_ns.__dict__ global_vars = {} builtin_vars = {} unbound_names = set() for name in code.co_names: if name in ("None", "True", "False"): # Because these used to be builtins instead of keywords, they # may still show up as name references. We ignore them. continue try: global_vars[name] = global_ns[name] except KeyError: try: builtin_vars[name] = builtin_ns[name] except KeyError: unbound_names.add(name) return ClosureVars(nonlocal_vars, global_vars, builtin_vars, unbound_names) # -------------------------------------------------- stack frame extraction Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
inspect.py
319
add python 3.10.4 for windows
55,282
0
419
181
113
218,397
165
XX-Net
34
python3.10.4/Lib/inspect.py
Python
32
{ "docstring": "\n Get the mapping of free variables to their current values.\n\n Returns a named tuple of dicts mapping the current nonlocal, global\n and builtin references as seen by the body of the function. A final\n set of unbound names that could not be resolved is also provided.\n ", "language": "en", "n_whitespaces": 62, "n_words": 46, "vocab_size": 38 }
https://github.com/XX-net/XX-Net.git
5
process_deferred
def process_deferred(self) -> None: update: typing.Dict[str, typing.Any] = {} for optname, value in self.deferred.items(): if optname in self._options: if isinstance(value, _UnconvertedStrings): value = self._parse_setval(self._options[optname], value.val) update[optname] = value self.update(**update) for k in update.keys(): del self.deferred[k]
6f0587734e2525eab49ba9a1d2052c87922125c7
16
optmanager.py
151
Refactor how we process `--set` options (#5067) * refactor how we process `--set` options * minor improvements based on @marwinxxii's review * fix nits * update changelog
73,536
0
141
97
27
250,719
35
mitmproxy
18
mitmproxy/optmanager.py
Python
14
{ "docstring": "\n Processes options that were deferred in previous calls to set, and\n have since been added.\n ", "language": "en", "n_whitespaces": 45, "n_words": 15, "vocab_size": 15 }
https://github.com/mitmproxy/mitmproxy.git
4
_minmax_mhlo
def _minmax_mhlo(op, cmp, x, y): tensor_type = ir.RankedTensorType(x.type) if ir.ComplexType.isinstance(tensor_type.element_type): rx = mhlo.RealOp(x).result ry = mhlo.RealOp(y).result dims = [tensor_type.get_dim_size(i) for i in range(tensor_type.rank)] bool_shape = ir.RankedTensorType.get(dims, ir.IntegerType.get_signless(1)) if jax._src.lib.mlir_api_version >= 3: real_eq = mhlo.CompareOp(bool_shape, rx, ry, mhlo.ComparisonDirectionAttr.get("EQ"), mhlo.ComparisonTypeAttr.get("FLOAT")) real_cmp = mhlo.CompareOp(bool_shape, rx, ry, mhlo.ComparisonDirectionAttr.get(cmp), mhlo.ComparisonTypeAttr.get("FLOAT")) imag_cmp = mhlo.CompareOp(bool_shape, mhlo.ImagOp(x).result, mhlo.ImagOp(y).result, mhlo.ComparisonDirectionAttr.get(cmp), mhlo.ComparisonTypeAttr.get("FLOAT")) else: real_eq = mhlo.CompareOp(bool_shape, rx, ry, ir.StringAttr.get("EQ"), ir.StringAttr.get("FLOAT")) real_cmp = mhlo.CompareOp(bool_shape, rx, ry, ir.StringAttr.get(cmp), ir.StringAttr.get("FLOAT")) imag_cmp = mhlo.CompareOp(bool_shape, mhlo.ImagOp(x).result, mhlo.ImagOp(y).result, ir.StringAttr.get(cmp), ir.StringAttr.get("FLOAT")) which = mhlo.SelectOp(real_eq, imag_cmp, real_cmp).result return mhlo.SelectOp(which, x, y) else: return op(x, y) min_mhlo = partial(_minmax_mhlo, mhlo.MinOp, "LT") max_mhlo = partial(_minmax_mhlo, mhlo.MaxOp, "GT")
6cd9804163203e4da13b33171c5889b6d17e5f43
16
mlir.py
570
Replace (deprecated) StrEnumAttr with EnumAttr. ref: https://reviews.llvm.org/D120834 PiperOrigin-RevId: 435550738
26,700
0
556
347
56
119,845
97
jax
45
jax/interpreters/mlir.py
Python
32
{ "docstring": "Min/max that compares complex values lexicographically as pairs.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/google/jax.git
2
test_invites_by_users_ratelimit
def test_invites_by_users_ratelimit(self) -> None: for _ in range(3): room_id = self.helper.create_room_as(self.user_id) self.helper.invite(room_id, self.user_id, "@other-users:red") room_id = self.helper.create_room_as(self.user_id) self.helper.invite(room_id, self.user_id, "@other-users:red", expect_code=429)
2ffaf30803f93273a4d8a65c9e6c3110c8433488
11
test_rooms.py
115
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
71,582
0
71
72
16
247,311
21
synapse
10
tests/rest/client/test_rooms.py
Python
7
{ "docstring": "Tests that invites to a specific user are actually rate-limited.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git