complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
test_usage_lib_get_cluster_config_to_report
def test_usage_lib_get_cluster_config_to_report(monkeypatch, tmp_path, reset_lib_usage): cluster_config_file_path = tmp_path / "ray_bootstrap_config.yaml" cluster_config_file_path.write_text( ) cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report( cluster_config_file_path ) assert cluster_config_to_report.cloud_provider == "aws" assert cluster_config_to_report.min_workers is None assert cluster_config_to_report.max_workers == 1 assert cluster_config_to_report.head_node_instance_type is None assert cluster_config_to_report.worker_node_instance_types is None cluster_config_file_path.write_text( ) cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report( cluster_config_file_path ) assert cluster_config_to_report.cloud_provider == "gcp" assert cluster_config_to_report.min_workers == 1 assert cluster_config_to_report.max_workers is None assert cluster_config_to_report.head_node_instance_type == "m5.large" assert cluster_config_to_report.worker_node_instance_types == list( {"m3.large", "Standard_D2s_v3", "n1-standard-2"} ) cluster_config_file_path.write_text( ) cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report( cluster_config_file_path ) assert cluster_config_to_report.cloud_provider is None assert cluster_config_to_report.min_workers is None assert cluster_config_to_report.max_workers is None assert cluster_config_to_report.head_node_instance_type is None assert cluster_config_to_report.worker_node_instance_types == ["m5.large"] cluster_config_file_path.write_text("[invalid") cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report( cluster_config_file_path ) assert cluster_config_to_report == ClusterConfigToReport() cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report( tmp_path / "does_not_exist.yaml" ) assert cluster_config_to_report == ClusterConfigToReport() monkeypatch.setenv("KUBERNETES_SERVICE_HOST", "localhost") cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report( tmp_path / "does_not_exist.yaml" ) assert cluster_config_to_report.cloud_provider == "kubernetes" assert cluster_config_to_report.min_workers is None assert cluster_config_to_report.max_workers is None assert cluster_config_to_report.head_node_instance_type is None assert cluster_config_to_report.worker_node_instance_types is None @pytest.mark.skipif( sys.platform == "win32", reason="Test depends on runtime env feature not supported on Windows.", )
5fb61abba3f583a1aa080623623af0b79de9e3f3
@pytest.mark.skipif( sys.platform == "win32", reason="Test depends on runtime env feature not supported on Windows.", )
10
test_usage_stats.py
441
[Usage Stats][Hotfix] Import usage reported from workers. (#25785) ## Why are these changes needed? We currently only record usage stats from drivers. This can lose some of information when libraries are imported from workers (e.g., doing some rllib import from trainable). @jjyao just for the future reference.
32,589
1
371
242
49
142,091
159
ray
23
python/ray/tests/test_usage_stats.py
Python
99
{ "docstring": " Test minimal cluster config\ncluster_name: minimal\nmax_workers: 1\nprovider:\n type: aws\n region: us-west-2\n availability_zone: us-west-2a\n\ncluster_name: full\nmin_workers: 1\nprovider:\n type: gcp\nhead_node_type: head_node\navailable_node_types:\n head_node:\n node_config:\n InstanceType: m5.large\n min_workers: 0\n max_workers: 0\n aws_worker_node:\n node_config:\n InstanceType: m3.large\n min_workers: 0\n max_workers: 0\n azure_worker_node:\n node_config:\n azure_arm_parameters:\n vmSize: Standard_D2s_v3\n gcp_worker_node:\n node_config:\n machineType: n1-standard-2\n\ncluster_name: full\nhead_node_type: head_node\navailable_node_types:\n worker_node_1:\n node_config:\n ImageId: xyz\n worker_node_2:\n resources: {}\n worker_node_3:\n node_config:\n InstanceType: m5.large\n", "language": "en", "n_whitespaces": 246, "n_words": 66, "vocab_size": 41 }
https://github.com/ray-project/ray.git
10
_process_contour_level_args
def _process_contour_level_args(self, args): if self.levels is None: if len(args) == 0: levels_arg = 7 # Default, hard-wired. else: levels_arg = args[0] else: levels_arg = self.levels if isinstance(levels_arg, Integral): self.levels = self._autolev(levels_arg) else: self.levels = np.asarray(levels_arg, np.float64) if not self.filled: inside = (self.levels > self.zmin) & (self.levels < self.zmax) levels_in = self.levels[inside] if len(levels_in) == 0: self.levels = [self.zmin] _api.warn_external( "No contour levels were found within the data range.") if self.filled and len(self.levels) < 2: raise ValueError("Filled contours require at least 2 levels.") if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0: raise ValueError("Contour levels must be increasing")
1068a6faa19767724437461bcfb88c6852ec435c
12
contour.py
299
Remove unnecessary np.{,as}array / astype calls. Quite often numpy will call asarray for us, saving us the need to call asarray explicitly. When we do call asarray (or array) ourselves, a dtype can directly be passed in, rather than immediately calling astype immediately after. Passing the dtype makes it unnecessary for asarray to infer the dtype of the passed-in container, and can also save an extra array allocation if asarray first has to allocate an array of a type and astype immediately has to allocate an array of another type.
23,889
0
342
185
66
110,019
96
matplotlib
22
lib/matplotlib/contour.py
Python
23
{ "docstring": "\n Determine the contour levels and store in self.levels.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/matplotlib/matplotlib.git
1
test_delete_post
def test_delete_post(self): # Send request response = self.client.post( reverse("wagtailimages:delete_multiple", args=(self.image.id,)) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Make sure the image is deleted self.assertFalse(Image.objects.filter(id=self.image.id).exists()) # Check JSON response_json = json.loads(response.content.decode()) self.assertIn("image_id", response_json) self.assertIn("success", response_json) self.assertEqual(response_json["image_id"], self.image.id) self.assertTrue(response_json["success"]) @override_settings(WAGTAILIMAGES_IMAGE_MODEL="tests.CustomImage")
d10f15e55806c6944827d801cd9c2d53f5da4186
@override_settings(WAGTAILIMAGES_IMAGE_MODEL="tests.CustomImage")
14
test_admin_views.py
232
Reformat with black
16,361
1
155
128
33
75,124
40
wagtail
25
wagtail/images/tests/test_admin_views.py
Python
12
{ "docstring": "\n This tests that a POST request to the delete view deletes the image\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/wagtail/wagtail.git
2
is_media_file
def is_media_file(self): return not (self.is_documentation_page() or self.is_static_page())
e7f07cc82ab2be920ab426ba07456d8b2592714d
10
files.py
38
Remove spaces at the ends of docstrings, normalize quotes
57,163
0
21
21
7
224,016
7
mkdocs
4
mkdocs/structure/files.py
Python
2
{ "docstring": "Return True if file is not a documentation or static page.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/mkdocs/mkdocs.git
2
get_all
def get_all() -> pd.DataFrame: bursa = all_bursa() is_open_list = [] for exchange in bursa.index: is_open = check_if_open(bursa, exchange) is_open_list.append(is_open) bursa["open"] = is_open_list return bursa[["name", "short_name", "open"]] @log_start_end(log=logger)
33a041e5bf93ce93ab1a19adbc5ed74c2f1eb337
@log_start_end(log=logger)
10
bursa_model.py
109
Trading hours stock feature (#1697)
84,727
1
58
56
23
284,457
27
OpenBBTerminal
14
openbb_terminal/stocks/tradinghours/bursa_model.py
Python
18
{ "docstring": "Get all exchanges.\n\n Parameters\n ----------\n\n Returns\n -------\n pd.DataFrame\n All available exchanges\n ", "language": "en", "n_whitespaces": 36, "n_words": 11, "vocab_size": 11 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
distro_release_info
def distro_release_info(self): # type: () -> Dict[str, str] return self._distro_release_info
f3166e673fe8d40277b804d35d77dcdb760fc3b3
6
distro.py
20
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,197
0
31
10
10
20,048
10
pipenv
3
pipenv/patched/notpip/_vendor/distro.py
Python
2
{ "docstring": "\n Return a dictionary containing key-value pairs for the information\n items from the distro release file data source of the OS\n distribution.\n\n For details, see :func:`distro.distro_release_info`.\n ", "language": "en", "n_whitespaces": 61, "n_words": 25, "vocab_size": 23 }
https://github.com/pypa/pipenv.git
4
set_constrained_layout
def set_constrained_layout(self, constrained): if constrained is None: constrained = mpl.rcParams['figure.constrained_layout.use'] _constrained = bool(constrained) _parameters = constrained if isinstance(constrained, dict) else {} if _constrained: self.set_layout_engine(ConstrainedLayoutEngine(**_parameters)) self.stale = True
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
12
figure.py
96
ENH: implement and use base layout_engine for more flexible layout.
22,600
0
91
58
20
107,135
27
matplotlib
13
lib/matplotlib/figure.py
Python
8
{ "docstring": "\n Set whether ``constrained_layout`` is used upon drawing. If None,\n :rc:`figure.constrained_layout.use` value will be used.\n\n When providing a dict containing the keys ``w_pad``, ``h_pad``\n the default ``constrained_layout`` paddings will be\n overridden. These pads are in inches and default to 3.0/72.0.\n ``w_pad`` is the width padding and ``h_pad`` is the height padding.\n\n See :doc:`/tutorials/intermediate/constrainedlayout_guide`.\n\n Parameters\n ----------\n constrained : bool or dict or None\n ", "language": "en", "n_whitespaces": 140, "n_words": 61, "vocab_size": 48 }
https://github.com/matplotlib/matplotlib.git
14
_default
def _default(obj): if isinstance(obj, datetime): if is_naive(obj): obj = convert_to_utc(obj) return obj.isoformat() elif isinstance(obj, date): return obj.strftime('%Y-%m-%d') elif isinstance(obj, Decimal): _, _, exponent = obj.as_tuple() if exponent >= 0: # No digits after the decimal point. return int(obj) # Technically lossy due to floating point errors, but the best we # can do without implementing a custom encode function. return float(obj) elif np is not None and isinstance( obj, ( np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, ), ): return int(obj) elif np is not None and isinstance(obj, np.bool_): return bool(obj) elif np is not None and isinstance( obj, (np.float_, np.float16, np.float32, np.float64, np.complex_, np.complex64, np.complex128) ): return float(obj) elif k8s is not None and isinstance(obj, (k8s.V1Pod, k8s.V1ResourceRequirements)): from airflow.kubernetes.pod_generator import PodGenerator return PodGenerator.serialize_pod(obj) raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
451a6f4d9ff8b744075e2f25099046c77f28179e
12
json.py
386
Speed up grid_data endpoint by 10x (#24284) * Speed up grid_data endpoint by 10x These changes make the endpoint go from almost 20s down to 1.5s and the changes are two fold: 1. Keep datetimes as objects for as long as possible Previously we were converting start/end dates for a task group to a string, and then in the parent parsing it back to a datetime to find the min and max of all the child nodes. The fix for that was to leave it as a datetime (or a pendulum.DateTime technically) and use the existing `AirflowJsonEncoder` class to "correctly" encode these objects on output. 2. Reduce the number of DB queries from 1 per task to 1. The removed `get_task_summaries` function was called for each task, and was making a query to the database to find info for the given DagRuns. The helper function now makes just a single DB query for all tasks/runs and constructs a dict to efficiently look up the ti by run_id. * Add support for mapped tasks in the grid data * Don't fail when not all tasks have a finish date. Note that this possibly has incorrect behaviour, in that the end_date of a TaskGroup is set to the max of all the children's end dates, even if some are still running. (This is the existing behaviour and is not changed or altered by this change - limiting it to just performance fixes)
7,849
0
597
253
92
43,172
137
airflow
47
airflow/utils/json.py
Python
39
{ "docstring": "Convert dates and numpy objects in a json serializable format.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/apache/airflow.git
4
repeat_elements
def repeat_elements(x, rep, axis): x_shape = x.shape.as_list() # For static axis if x_shape[axis] is not None: # slices along the repeat axis splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis) # repeat each slice the given number of reps x_rep = [s for s in splits for _ in range(rep)] return concatenate(x_rep, axis) # Here we use tf.tile to mimic behavior of np.repeat so that # we can handle dynamic shapes (that include None). # To do that, we need an auxiliary axis to repeat elements along # it and then merge them along the desired axis. # Repeating auxiliary_axis = axis + 1 x_shape = tf.shape(x) x_rep = tf.expand_dims(x, axis=auxiliary_axis) reps = np.ones(len(x.shape) + 1) reps[auxiliary_axis] = rep x_rep = tf.tile(x_rep, reps) # Merging reps = np.delete(reps, auxiliary_axis) reps[axis] = rep reps = tf.constant(reps, dtype="int32") x_shape *= reps x_rep = tf.reshape(x_rep, x_shape) # Fix shape representation x_shape = x.shape.as_list() x_rep.set_shape(x_shape) x_rep._keras_shape = tuple(x_shape) return x_rep @keras_export("keras.backend.repeat") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.repeat") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
12
backend.py
339
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,172
1
266
195
102
269,546
156
keras
37
keras/backend.py
Python
21
{ "docstring": "Repeats the elements of a tensor along an axis, like `np.repeat`.\n\n If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output\n will have shape `(s1, s2 * rep, s3)`.\n\n Args:\n x: Tensor or variable.\n rep: Python integer, number of times to repeat.\n axis: Axis along which to repeat.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> b = tf.constant([1, 2, 3])\n >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)\n <tf.Tensor: shape=(6,), dtype=int32,\n numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>\n\n ", "language": "en", "n_whitespaces": 153, "n_words": 75, "vocab_size": 65 }
https://github.com/keras-team/keras.git
2
test_annotate_signature
def test_annotate_signature(): fig, ax = plt.subplots() annotate_params = inspect.signature(ax.annotate).parameters annotation_params = inspect.signature(mtext.Annotation).parameters assert list(annotate_params.keys()) == list(annotation_params.keys()) for p1, p2 in zip(annotate_params.values(), annotation_params.values()): assert p1 == p2 @image_comparison(['fill_units.png'], savefig_kwarg={'dpi': 60})
c4116bb841f6d2c0911c87c89817998342d67c5c
@image_comparison(['fill_units.png'], savefig_kwarg={'dpi': 60})
10
test_axes.py
159
Make signature of Axes.annotate() more explicit. The signature is identical to Annotation. This makes the parameters explicit instead of `*args`, which improves usability. On the downside, this introduces redundancy. But we can bear that. The signature will not change often and I've added a test that ensures the signatures stay synchronized.
22,805
1
53
80
24
107,549
29
matplotlib
21
lib/matplotlib/tests/test_axes.py
Python
7
{ "docstring": "Check that the signature of Axes.annotate() matches Annotation.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/matplotlib/matplotlib.git
3
_check_thread
def _check_thread(self): if self._thread_id is None: return thread_id = threading.get_ident() if thread_id != self._thread_id: raise RuntimeError( "Non-thread-safe operation invoked on an event loop other " "than the current one")
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
base_events.py
63
add python 3.10.4 for windows
55,970
0
109
34
27
220,346
29
XX-Net
7
python3.10.4/Lib/asyncio/base_events.py
Python
8
{ "docstring": "Check that the current thread is the thread running the event loop.\n\n Non-thread-safe methods of this class make this assumption and will\n likely behave incorrectly when the assumption is violated.\n\n Should only be called when (self._debug == True). The caller is\n responsible for checking this condition for performance reasons.\n ", "language": "en", "n_whitespaces": 85, "n_words": 49, "vocab_size": 38 }
https://github.com/XX-net/XX-Net.git
1
add_gridspec
def add_gridspec(self, nrows=1, ncols=1, **kwargs): _ = kwargs.pop('figure', None) # pop in case user has added this... gs = GridSpec(nrows=nrows, ncols=ncols, figure=self, **kwargs) return gs
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
9
figure.py
75
DOC: improve grammar and consistency
24,018
0
54
48
23
110,276
25
matplotlib
10
lib/matplotlib/figure.py
Python
4
{ "docstring": "\n Return a `.GridSpec` that has this figure as a parent. This allows\n complex layout of Axes in the figure.\n\n Parameters\n ----------\n nrows : int, default: 1\n Number of rows in grid.\n\n ncols : int, default: 1\n Number of columns in grid.\n\n Returns\n -------\n `.GridSpec`\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments are passed to `.GridSpec`.\n\n See Also\n --------\n matplotlib.pyplot.subplots\n\n Examples\n --------\n Adding a subplot that spans two rows::\n\n fig = plt.figure()\n gs = fig.add_gridspec(2, 2)\n ax1 = fig.add_subplot(gs[0, 0])\n ax2 = fig.add_subplot(gs[1, 0])\n # spans two rows:\n ax3 = fig.add_subplot(gs[:, 1])\n\n ", "language": "en", "n_whitespaces": 324, "n_words": 90, "vocab_size": 67 }
https://github.com/matplotlib/matplotlib.git
3
_register_arrow_data_serializer
def _register_arrow_data_serializer(serialization_context): import pyarrow as pa if os.environ.get(RAY_DISABLE_CUSTOM_ARROW_DATA_SERIALIZATION, "0") == "1": return # Register custom reducer for Arrow Arrays. array_types = _get_arrow_array_types() for array_type in array_types: serialization_context._register_cloudpickle_reducer( array_type, _arrow_array_reduce ) # Register custom reducer for Arrow ChunkedArrays. serialization_context._register_cloudpickle_reducer( pa.ChunkedArray, _arrow_chunkedarray_reduce ) # Register custom reducer for Arrow RecordBatches. serialization_context._register_cloudpickle_reducer( pa.RecordBatch, _arrow_recordbatch_reduce ) # Register custom reducer for Arrow Tables. serialization_context._register_cloudpickle_reducer(pa.Table, _arrow_table_reduce)
c1d62d46495f0157faf3168aa87eed350802e10f
9
arrow_serialization.py
124
[Datasets] Arrow 7.0.0+ Support: Use Arrow IPC format for pickling Arrow data to circumvent slice view buffer truncation bug. (#29055) This PR registers a custom serializer for Array arrays, chunked arrays, record batches, and tables that works around an Arrow serialization bug that serializes the entire underlying data buffer when serializing zero-copy slice views. The custom serializer uses the Arrow IPC format as the underlying pickled representation.
28,745
0
149
73
38
128,560
61
ray
19
python/ray/data/_internal/arrow_serialization.py
Python
16
{ "docstring": "Custom reducer for Arrow data that works around a zero-copy slicing pickling\n bug by using the Arrow IPC format for the underlying serialization.\n\n Background:\n Arrow has both array-level slicing and buffer-level slicing; both are zero-copy,\n but the former has a serialization bug where the entire buffer is serialized\n instead of just the slice, while the latter's serialization works as expected\n and only serializes the slice of the buffer. I.e., array-level slicing doesn't\n propagate the slice down to the buffer when serializing the array.\n\n All that these copy methods do is, at serialization time, take the array-level\n slicing and translate them to buffer-level slicing, so only the buffer slice is\n sent over the wire instead of the entire buffer.\n\n See https://issues.apache.org/jira/browse/ARROW-10739.\n ", "language": "en", "n_whitespaces": 188, "n_words": 120, "vocab_size": 75 }
https://github.com/ray-project/ray.git
4
get_swisscom_data
def get_swisscom_data(self): url = f"http://{self.host}/ws" headers = {"Content-Type": "application/x-sah-ws-4-call+json"} data = devices = {} try: request = requests.post(url, headers=headers, data=data, timeout=10) except ( requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.ConnectTimeout, ): _LOGGER.info("No response from Swisscom Internet Box") return devices if "status" not in request.json(): _LOGGER.info("No status in response from Swisscom Internet Box") return devices for device in request.json()["status"]: with suppress(KeyError, requests.exceptions.RequestException): devices[device["Key"]] = { "ip": device["IPAddress"], "mac": device["PhysAddress"], "host": device["Name"], "status": device["Active"], } return devices
2bab6447a924d63d253b210f9a6ab3ea3ca67e7d
14
device_tracker.py
279
Replaces aiohttp.hdrs CONTENT_TYPE with plain string for the Swisscom integration (#76568)
102,529
0
354
158
53
303,714
71
core
22
homeassistant/components/swisscom/device_tracker.py
Python
28
{ "docstring": "Retrieve data from Swisscom and return parsed result.\n {\"service\":\"Devices\", \"method\":\"get\",\n \"parameters\":{\"expression\":\"lan and not self\"}}", "language": "en", "n_whitespaces": 27, "n_words": 14, "vocab_size": 13 }
https://github.com/home-assistant/core.git
22
run_interpret
def run_interpret(interface, raw_input): if isinstance(interface.interpretation, list): # Either "default" or "shap" processed_input = [input_component.preprocess(raw_input[i]) for i, input_component in enumerate(interface.input_components)] original_output = interface.run_prediction(processed_input) scores, alternative_outputs = [], [] for i, (x, interp) in enumerate(zip(raw_input, interface.interpretation)): if interp == "default": input_component = interface.input_components[i] neighbor_raw_input = list(raw_input) if input_component.interpret_by_tokens: tokens, neighbor_values, masks = input_component.tokenize( x) interface_scores = [] alternative_output = [] for neighbor_input in neighbor_values: neighbor_raw_input[i] = neighbor_input processed_neighbor_input = [input_component.preprocess(neighbor_raw_input[i]) for i, input_component in enumerate(interface.input_components)] neighbor_output = interface.run_prediction( processed_neighbor_input) processed_neighbor_output = [output_component.postprocess( neighbor_output[i]) for i, output_component in enumerate(interface.output_components)] alternative_output.append( processed_neighbor_output) interface_scores.append(quantify_difference_in_label( interface, original_output, neighbor_output)) alternative_outputs.append(alternative_output) scores.append( input_component.get_interpretation_scores( raw_input[i], neighbor_values, interface_scores, masks=masks, tokens=tokens)) else: neighbor_values, interpret_kwargs = input_component.get_interpretation_neighbors( x) interface_scores = [] alternative_output = [] for neighbor_input in neighbor_values: neighbor_raw_input[i] = neighbor_input processed_neighbor_input = [input_component.preprocess(neighbor_raw_input[i]) for i, input_component in enumerate(interface.input_components)] neighbor_output = interface.run_prediction( processed_neighbor_input) processed_neighbor_output = [output_component.postprocess( neighbor_output[i]) for i, output_component in enumerate(interface.output_components)] alternative_output.append( processed_neighbor_output) interface_scores.append(quantify_difference_in_label( interface, original_output, neighbor_output)) alternative_outputs.append(alternative_output) interface_scores = [-score for score in interface_scores] scores.append( input_component.get_interpretation_scores( raw_input[i], neighbor_values, interface_scores, **interpret_kwargs)) elif interp == "shap" or interp == "shapley": try: import shap # type: ignore except (ImportError, ModuleNotFoundError): raise ValueError( "The package `shap` is required for this interpretation method. Try: `pip install shap`") input_component = interface.input_components[i] if not (input_component.interpret_by_tokens): raise ValueError( "Input component {} does not support `shap` interpretation".format(input_component)) tokens, _, masks = input_component.tokenize(x) # construct a masked version of the input
85b0d2df3458236b5e8c343febc507f94c1e60ad
22
interpretation.py
701
brought back sessions for TF 1.x
42,917
0
1,546
674
117
179,183
221
gradio
46
gradio/interpretation.py
Python
93
{ "docstring": "\n Runs the interpretation command for the machine learning model. Handles both the \"default\" out-of-the-box\n interpretation for a certain set of UI component types, as well as the custom interpretation case.\n Parameters:\n raw_input: a list of raw inputs to apply the interpretation(s) on.\n ", "language": "en", "n_whitespaces": 58, "n_words": 42, "vocab_size": 32 }
https://github.com/gradio-app/gradio.git
1
parse_args
def parse_args(args): # Use the file's docstring for the help text and don't let argparse reformat it. parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--css', type=str, required=True, help='hostname of code signing server') group = parser.add_mutually_exclusive_group() # We use 'store_false' and a destination related to the other type of # artifact to cause the flag being set to disable publishing of the other # artifact. This makes using the parsed arguments later on a little simpler # and cleaner. group.add_argument('--snaps-only', action='store_false', dest='publish_windows', help='Skip publishing other artifacts and only publish the snaps') group.add_argument('--windows-only', action='store_false', dest='publish_snaps', help='Skip publishing other artifacts and only publish the Windows installer') return parser.parse_args(args)
6e1696ba32ef8c1162bb0cd85df5a22113952828
10
finish_release.py
160
Add Signed Windows Installer Workflow (#9076) * Add Code Signing action for Windows Installer * Clean up variable names and input * Amend and add to documentation per PR guidelines * Update tools/finish_release.py Co-authored-by: Brad Warren <[email protected]> * Update tools/finish_release.py Amend typo Co-authored-by: Brad Warren <[email protected]> * Amend release script for better work flow - SCP commands to upload and download unsigned & signed installers from CSS * Collapse spaces * Update tools/finish_release.py Co-authored-by: Brad Warren <[email protected]> * Create new windows signer function * Update Windows Installer Script - Update change log - add new function for signing and document - @TODO Streammline SSH session * Remove Azure and Github release methods - Methods moved to CSS - Reduced to a ssh function that triggers the process on a CSS * Amend Chnagelog and Remove Unneeded Deps * Update tools/finish_release.py Co-authored-by: Brad Warren <[email protected]> * Add Verison Fetch Function - For the purpose of snap releases - Add back package to dev extras for function * Chaneg path in ssh command * Amend release script * Amend the ssh command for CSS * Update tools/finish_release.py Co-authored-by: Brad Warren <[email protected]> * Update script with proper path and subprocess call * Update ssh command * Correct typo in path * Fix typo in path * Update certbot/CHANGELOG.md Co-authored-by: ohemorange <[email protected]> * Remove missed conflict text Co-authored-by: Brad Warren <[email protected]> Co-authored-by: ohemorange <[email protected]>
45,621
0
224
90
71
186,787
102
certbot
18
tools/finish_release.py
Python
10
{ "docstring": "Parse command line arguments.\n\n :param args: command line arguments with the program name removed. This is\n usually taken from sys.argv[1:].\n :type args: `list` of `str`\n\n :returns: parsed arguments\n :rtype: argparse.Namespace\n\n ", "language": "en", "n_whitespaces": 52, "n_words": 30, "vocab_size": 26 }
https://github.com/certbot/certbot.git
2
get_context_data
def get_context_data(self, parent_context): context = parent_context.copy() if requires_request_arg(self.get_url): warn( "%s.get_url should no longer take a 'request' argument. " "See https://docs.wagtail.org/en/stable/releases/2.15.html#template-components-2-15" % type(self).__name__, category=RemovedInWagtail217Warning ) url = self.get_url(parent_context['request'], parent_context) else: url = self.get_url(parent_context) context.update({ 'label': self.label, 'url': url, 'name': self.name, 'classname': self.classname, 'icon_name': self.icon_name, 'request': parent_context['request'], }) return context
e9183a95c88fe2eaf4c1d3aff9833633509713f3
13
action_menu.py
182
Update docs links to reference new domain
15,517
0
252
106
44
70,549
48
wagtail
18
wagtail/admin/action_menu.py
Python
20
{ "docstring": "Defines context for the template, overridable to use more data", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/wagtail/wagtail.git
2
_parse_tag
def _parse_tag(tag): # type: (str) -> str match = re.match('^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag) if match: # remove the 'v' prefix and add a '.devN' suffix return '%s.dev%s' % (match.group(1), match.group(2)) else: # just remove the 'v' prefix return re.sub('^v', '', tag)
402677d50cc007cb5b39dc0fdb8934dae36cf6ac
11
__init__.py
91
Update git format
52,591
0
82
50
31
209,070
39
scapy
6
scapy/__init__.py
Python
6
{ "docstring": "\n Parse a tag from ``git describe`` into a version.\n\n Example::\n\n v2.3.2-346-g164a52c075c8 -> '2.3.2.dev346'\n ", "language": "en", "n_whitespaces": 30, "n_words": 13, "vocab_size": 12 }
https://github.com/secdev/scapy.git
14
handle
def handle(self, *args, **options): jt = options['jt'] threshold = options['threshold'] history = options['history'] ignore = options['ignore'] print('## ' + JobTemplate.objects.get(pk=jt).name + f' (last {history} runs)\n') with connection.cursor() as cursor: cursor.execute( f ) slowest_events = cursor.fetchall()
d3eb2c197595c29c4a3f7b38cd609ce953009623
13
bottleneck.py
150
Add new flak8 rules to do some meaningful corrections
17,294
0
124
398
30
82,002
35
awx
19
awx/main/management/commands/bottleneck.py
Python
67
{ "docstring": "\n SELECT\n b.id, b.job_id, b.host_name, b.created - a.created delta,\n b.task task,\n b.event_data::json->'task_action' task_action,\n b.event_data::json->'task_path' task_path\n FROM main_jobevent a JOIN main_jobevent b\n ON b.parent_uuid = a.parent_uuid AND a.host_name = b.host_name\n WHERE\n a.event = 'runner_on_start' AND\n b.event != 'runner_on_start' AND\n b.event != 'runner_on_skipped' AND\n b.failed = false AND\n a.job_id IN (\n SELECT unifiedjob_ptr_id FROM main_job\n WHERE job_template_id={jt}\n ORDER BY unifiedjob_ptr_id DESC\n LIMIT {history}\n )\n ORDER BY delta DESC;\n ", "language": "en", "n_whitespaces": 439, "n_words": 65, "vocab_size": 48 }
https://github.com/ansible/awx.git
1
test_multiple_output_keys_error
def test_multiple_output_keys_error() -> None: chain = FakeChain(the_output_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar")
8d0869c6d3ed63b2b15d4f75ea664e089dcc569d
11
test_base.py
64
change run to use args and kwargs (#367) Before, `run` was not able to be called with multiple arguments. This expands the functionality.
46,799
0
27
33
11
191,685
11
langchain
8
tests/unit_tests/chains/test_base.py
Python
5
{ "docstring": "Test run with multiple output keys errors as expected.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/hwchase17/langchain.git
1
get_best_result
def get_best_result(self) -> Result: return self._trial_to_result(self._experiment_analysis.best_trial)
4b28bc3f09ebcc9c0d6262355b394452a11667fc
9
result_grid.py
33
[Tuner part1] Add Tuner interface. (#22975)
33,655
0
20
19
6
146,296
6
ray
6
python/ray/tune/result_grid.py
Python
9
{ "docstring": "Get the best result from all the trials run.\n\n Note, \"best\" here is determined by \"metric\" and \"mode\" specified in your Tuner's\n TuneConfig.\n\n Trials are compared using their \"last\" results. In a similar notion, the last\n checkpoint of the best trial is returned as part of the result.", "language": "en", "n_whitespaces": 75, "n_words": 48, "vocab_size": 41 }
https://github.com/ray-project/ray.git
2
get_lookup
def get_lookup(self, lookup_name): if lookup_name not in ["exact", "in", "isnull"]: raise TypeError("Lookup type %s is not supported." % lookup_name) return super().get_lookup(lookup_name)
0ad4aa9d9fe3beb3fd20aaedd961b3c2c800efb1
11
fields.py
65
ref(django-3.2): Vendor django picklefield (#35727) * ref(django-3.2): Vendor django picklefield Django-picklefield hasn't been updated in 2 years, but we need it. We also need to upgrade to django 3.2 which means we need to update picklefield. * Remove reference to django-picklefield * Fix the module name to use underscore * style(lint): Auto commit lint changes * Remove versioning code Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
18,772
0
53
36
20
91,646
21
sentry
5
src/django_picklefield/fields.py
Python
4
{ "docstring": "\n We need to limit the lookup types.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/getsentry/sentry.git
1
test_legend_label_with_leading_underscore
def test_legend_label_with_leading_underscore(): fig, ax = plt.subplots() line, = ax.plot([0, 1], label='_foo') with pytest.warns(UserWarning, match=r"starts with '_'.*excluded from the legend."): legend = ax.legend(handles=[line]) assert len(legend.legendHandles) == 0 @image_comparison(['legend_labels_first.png'], remove_text=True)
8573a52853df59579236a6096abbecd409c26982
@image_comparison(['legend_labels_first.png'], remove_text=True)
12
test_legend.py
128
Clarify warning about labels with leading underscores. Replaces and closes #17488. Co-authored-by: Clement Walter <[email protected]>
22,829
1
70
65
25
107,585
28
matplotlib
18
lib/matplotlib/tests/test_legend.py
Python
7
{ "docstring": "\n Test that artists with labels starting with an underscore are not added to\n the legend, and that a warning is issued if one tries to add them\n explicitly.\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 25 }
https://github.com/matplotlib/matplotlib.git
7
_async_process_on_unload
async def _async_process_on_unload(self) -> None: if self._on_unload is not None: while self._on_unload: self._on_unload.pop()() while self._pending_tasks: pending = [task for task in self._pending_tasks if not task.done()] self._pending_tasks.clear() if pending: await asyncio.gather(*pending)
00810235c92b492a966c6021021d49360ffb3cdd
13
config_entries.py
120
Track tasks adding entities (#73828) * Track tasks adding entities * Update homeassistant/config_entries.py * fix cast tests Co-authored-by: J. Nick Koston <[email protected]>
113,486
0
125
71
25
314,885
30
core
11
homeassistant/config_entries.py
Python
10
{ "docstring": "Process the on_unload callbacks and wait for pending tasks.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
2
cursor_text_start
def cursor_text_start(self) -> bool: if self.cursor_index == 0: return False self.cursor_index = 0 return True
dd18ecbdbe744812509630935a877424202f2a70
7
_text_backend.py
41
Docstring improvements
44,183
0
54
24
13
183,431
15
textual
4
src/textual/_text_backend.py
Python
10
{ "docstring": "Move the cursor to the start of the text\n\n Returns:\n bool: True if the cursor moved. False otherwise.\n ", "language": "en", "n_whitespaces": 43, "n_words": 18, "vocab_size": 14 }
https://github.com/Textualize/textual.git
1
async_create_post_interval_update_cb
def async_create_post_interval_update_cb(self) -> None: self._post_interval_update_cb_canceller = async_call_later( self.hass, get_unavailability_interval(self.ping_interval), self.async_post_interval_update, )
af4e37339a39badd5596e8bc9ba86d6c1994aa1b
11
sia_entity_base.py
48
Add Connectivity sensor to SIA (#64305) * implemented connectivity sensor * further cleanup off update code * cleanup and tighter behaviour for attributes * added seperate connectivity class to binary sensor * callbacks and keys * redid name and unique_id logic, non-breaking result * using entry more in inits * Fix import * fix ping_interval in sia_entity_base * added ping_interval default to next * fixed next Co-authored-by: Martin Hjelmare <[email protected]>
91,047
0
65
30
11
291,944
11
core
8
homeassistant/components/sia/sia_entity_base.py
Python
7
{ "docstring": "Create a port interval update cb and store the callback.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
test_doorbell_update_via_pubnub
async def test_doorbell_update_via_pubnub(hass): doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json") pubnub = AugustPubNub() await _create_august_with_devices(hass, [doorbell_one], pubnub=pubnub) assert doorbell_one.pubsub_channel == "7c7a6672-59c8-3333-ffff-dcd98705cccc" binary_sensor_k98gidt45gul_name_motion = hass.states.get( "binary_sensor.k98gidt45gul_name_motion" ) assert binary_sensor_k98gidt45gul_name_motion.state == STATE_OFF binary_sensor_k98gidt45gul_name_ding = hass.states.get( "binary_sensor.k98gidt45gul_name_ding" ) assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF pubnub.message( pubnub, Mock( channel=doorbell_one.pubsub_channel, timetoken=_timetoken(), message={ "status": "imagecapture", "data": { "result": { "created_at": "2021-03-16T01:07:08.817Z", "secure_url": "https://dyu7azbnaoi74.cloudfront.net/zip/images/zip.jpeg", }, }, }, ), ) await hass.async_block_till_done() binary_sensor_k98gidt45gul_name_image_capture = hass.states.get( "binary_sensor.k98gidt45gul_name_image_capture" ) assert binary_sensor_k98gidt45gul_name_image_capture.state == STATE_ON pubnub.message( pubnub, Mock( channel=doorbell_one.pubsub_channel, timetoken=_timetoken(), message={ "status": "doorbell_motion_detected", "data": { "event": "doorbell_motion_detected", "image": { "height": 640, "width": 480, "format": "jpg", "created_at": "2021-03-16T02:36:26.886Z", "bytes": 14061, "secure_url": "https://dyu7azbnaoi74.cloudfront.net/images/1f8.jpeg", "url": "https://dyu7azbnaoi74.cloudfront.net/images/1f8.jpeg", "etag": "09e839331c4ea59eef28081f2caa0e90", }, "doorbellName": "Front Door", "callID": None, "origin": "mars-api", "mutableContent": True, }, }, ), ) await hass.async_block_till_done() binary_sensor_k98gidt45gul_name_motion = hass.states.get( "binary_sensor.k98gidt45gul_name_motion" ) assert binary_sensor_k98gidt45gul_name_motion.state == STATE_ON binary_sensor_k98gidt45gul_name_ding = hass.states.get( "binary_sensor.k98gidt45gul_name_ding" ) assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF new_time = dt_util.utcnow() + datetime.timedelta(seconds=40) native_time = datetime.datetime.now() + datetime.timedelta(seconds=40) with patch( "homeassistant.components.august.binary_sensor._native_datetime", return_value=native_time, ): async_fire_time_changed(hass, new_time) await hass.async_block_till_done() binary_sensor_k98gidt45gul_name_image_capture = hass.states.get( "binary_sensor.k98gidt45gul_name_image_capture" ) assert binary_sensor_k98gidt45gul_name_image_capture.state == STATE_OFF pubnub.message( pubnub, Mock( channel=doorbell_one.pubsub_channel, timetoken=_timetoken(), message={ "status": "buttonpush", }, ), ) await hass.async_block_till_done() binary_sensor_k98gidt45gul_name_ding = hass.states.get( "binary_sensor.k98gidt45gul_name_ding" ) assert binary_sensor_k98gidt45gul_name_ding.state == STATE_ON new_time = dt_util.utcnow() + datetime.timedelta(seconds=40) native_time = datetime.datetime.now() + datetime.timedelta(seconds=40) with patch( "homeassistant.components.august.binary_sensor._native_datetime", return_value=native_time, ): async_fire_time_changed(hass, new_time) await hass.async_block_till_done() binary_sensor_k98gidt45gul_name_ding = hass.states.get( "binary_sensor.k98gidt45gul_name_ding" ) assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF
ea5b18c1ef16b64cd7916f2540692ab5de2d2edf
17
test_binary_sensor.py
819
Split august motion and image capture binary sensors (#62154)
107,857
0
1,162
475
87
309,149
211
core
33
tests/components/august/test_binary_sensor.py
Python
109
{ "docstring": "Test creation of a doorbell that can be updated via pubnub.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git
1
test_prefill_form_backcompat
def test_prefill_form_backcompat(extras, expected): mock_form = mock.Mock() mock_form.data = {"conn_id": "test", "extra": json.dumps(extras), "conn_type": "test"} cmv = ConnectionModelView() cmv.extra_fields = ['extra__test__my_param'] # this is set by `lazy_add_provider_discovered_options_to_connection_form` cmv.extra_field_name_mapping['extra__test__my_param'] = 'my_param' cmv.prefill_form(form=mock_form, pk=1) assert mock_form.extra__test__my_param.data == expected @pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field']) @mock.patch('airflow.utils.module_loading.import_string') @mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)
1dfae80412377eef0a38637535d6a1d3393cc4fe
@pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field']) @mock.patch('airflow.utils.module_loading.import_string') @mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)
10
test_views_connection.py
197
Enable use of custom conn extra fields without prefix (#22607) Previously, connection "extra" fields which were added as custom fields in the webserver connection form had to be named with prefix `extra__<conn_type>__`. This was because custom fields are registered globally on the connection view model, so the prefix was necessary to prevent collisions. But the prefix is ugly and cumbersome in the `extra` field. So now what we do is add this prefix when defining the field internally in the model, and strip it when saving the connection. This doesn't change any providers -- each of those will have to be updated in order to use no-prefix custom fields, with special care to handle backcompat.
9,302
1
65
77
37
47,930
41
airflow
23
tests/www/views/test_views_connection.py
Python
8
{ "docstring": "\n When populating custom fields in the connection form we should first check for the non-prefixed\n value (since prefixes in extra are deprecated) and then fallback to the prefixed value.\n\n Either way, the field is known internally to the model view as the prefixed value.\n ", "language": "en", "n_whitespaces": 57, "n_words": 44, "vocab_size": 35 }
https://github.com/apache/airflow.git
13
_build_test_case
def _build_test_case(self, task_data, host_data): name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) duration = host_data.finish - task_data.start if self._task_relative_path and task_data.path: junit_classname = os.path.relpath(task_data.path, self._task_relative_path) else: junit_classname = task_data.path if self._replace_out_of_tree_path is not None and junit_classname.startswith('../'): junit_classname = self._replace_out_of_tree_path + os.path.basename(junit_classname) if self._task_class == 'true': junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname) if host_data.status == 'included': return TestCase(name=name, classname=junit_classname, time=duration, system_out=str(host_data.result)) res = host_data.result._result rc = res.get('rc', 0) dump = self._dump_results(res, indent=0) dump = self._cleanse_string(dump) if host_data.status == 'ok': return TestCase(name=name, classname=junit_classname, time=duration, system_out=dump) test_case = TestCase(name=name, classname=junit_classname, time=duration) if host_data.status == 'failed': if 'exception' in res: message = res['exception'].strip().split('\n')[-1] output = res['exception'] test_case.errors.append(TestError(message=message, output=output)) elif 'msg' in res: message = res['msg'] test_case.failures.append(TestFailure(message=message, output=dump)) else: test_case.failures.append(TestFailure(message='rc=%s' % rc, output=dump)) elif host_data.status == 'skipped': if 'skip_reason' in res: message = res['skip_reason'] else: message = 'skipped' test_case.skipped = message return test_case
fbb5d56bd274c44b193cb95f0230b9352f62aab2
17
junit.py
589
ansible-test - Use relative paths in junit output. (#76871) * ansible-test - Use relative paths in junit output. Also fix a traceback in the junit callback during automatic fact gathering. * ansible-test - Handle out-of-tree JUnit paths.
78,457
0
509
360
81
266,525
138
ansible
46
lib/ansible/plugins/callback/junit.py
Python
37
{ "docstring": " build a TestCase from the given TaskData and HostData ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
https://github.com/ansible/ansible.git
7
_check_lazy_references
def _check_lazy_references(apps, ignore=None): pending_models = set(apps._pending_operations) - (ignore or set()) # Short circuit if there aren't any errors. if not pending_models: return [] from django.db.models import signals model_signals = { signal: name for name, signal in vars(signals).items() if isinstance(signal, signals.ModelSignal) }
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
model_checks.py
112
Refs #33476 -- Reformatted code with Black.
50,730
0
90
155
38
204,433
41
django
17
django/core/checks/model_checks.py
Python
32
{ "docstring": "\n Ensure all lazy (i.e. string) model references have been resolved.\n\n Lazy references are used in various places throughout Django, primarily in\n related fields and model signals. Identify those common cases and provide\n more helpful error messages for them.\n\n The ignore parameter is used by StateApps to exclude swappable models from\n this check.\n ", "language": "en", "n_whitespaces": 74, "n_words": 52, "vocab_size": 47 }
https://github.com/django/django.git
9
activated
def activated(self, include_extras=True, extra_dists=None): if not extra_dists: extra_dists = [] original_path = sys.path original_prefix = sys.prefix parent_path = Path(__file__).absolute().parent vendor_dir = parent_path.joinpath("vendor").as_posix() patched_dir = parent_path.joinpath("patched").as_posix() parent_path = parent_path.as_posix() self.add_dist("pip") prefix = self.prefix.as_posix() with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path(): os.environ["PATH"] = os.pathsep.join( [ vistir.compat.fs_str(self.script_basedir), vistir.compat.fs_str(self.prefix.as_posix()), os.environ.get("PATH", ""), ] ) os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8") os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1") if self.is_venv: os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"] os.environ["VIRTUAL_ENV"] = vistir.compat.fs_str(prefix) else: if not self.project.s.PIPENV_USE_SYSTEM and not os.environ.get( "VIRTUAL_ENV" ): os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"] os.environ.pop("PYTHONHOME", None) sys.path = self.sys_path sys.prefix = self.sys_prefix site.addsitedir(self.base_paths["purelib"]) pip = self.safe_import("pip") # noqa pip_vendor = self.safe_import("pip._vendor") pep517_dir = os.path.join(os.path.dirname(pip_vendor.__file__), "pep517") site.addsitedir(pep517_dir) os.environ["PYTHONPATH"] = os.pathsep.join( [os.environ.get("PYTHONPATH", self.base_paths["PYTHONPATH"]), pep517_dir] ) if include_extras: site.addsitedir(parent_path) sys.path.extend([parent_path, patched_dir, vendor_dir]) extra_dists = list(self.extra_dists) + extra_dists for extra_dist in extra_dists: if extra_dist not in self.get_working_set(): extra_dist.activate(self.sys_path) try: yield finally: sys.path = original_path sys.prefix = original_prefix
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
16
environment.py
743
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
3,062
0
783
440
84
19,684
130
pipenv
51
pipenv/environment.py
Python
52
{ "docstring": "Helper context manager to activate the environment.\n\n This context manager will set the following variables for the duration\n of its activation:\n\n * sys.prefix\n * sys.path\n * os.environ[\"VIRTUAL_ENV\"]\n * os.environ[\"PATH\"]\n\n In addition, it will make any distributions passed into `extra_dists` available\n on `sys.path` while inside the context manager, as well as making `passa` itself\n available.\n\n The environment's `prefix` as well as `scripts_dir` properties are both prepended\n to `os.environ[\"PATH\"]` to ensure that calls to `~Environment.run()` use the\n environment's path preferentially.\n ", "language": "en", "n_whitespaces": 185, "n_words": 78, "vocab_size": 59 }
https://github.com/pypa/pipenv.git
1
delete_tasks_annotations
def delete_tasks_annotations(project, queryset, **kwargs): task_ids = queryset.values_list('id', flat=True) annotations = Annotation.objects.filter(task__id__in=task_ids) count = annotations.count() annotations_ids = list(annotations.values('id')) annotations.delete() emit_webhooks_for_instance(project.organization, project, WebhookAction.ANNOTATIONS_DELETED, annotations_ids) bulk_update_stats_project_tasks(queryset) return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' annotations'}
85152f2c8c7f8b301b28fcd771f13b5c166c59eb
11
basic.py
158
fix: DEV-1486: fix dm action when deleting all annotations, finished state is not updated (#1923) Co-authored-by: Max Tkachenko <[email protected]>
42,445
0
72
93
29
177,572
34
label-studio
23
label_studio/data_manager/actions/basic.py
Python
10
{ "docstring": " Delete all annotations by tasks ids\n\n :param project: project instance\n :param queryset: filtered tasks db queryset\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
https://github.com/heartexlabs/label-studio.git
3
load
def load(self, rawdata): if isinstance(rawdata, str): self.__parse_string(rawdata) else: # self.update() wouldn't call our custom __setitem__ for key, value in rawdata.items(): self[key] = value return
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
cookies.py
70
add python 3.10.4 for windows
54,959
0
100
42
23
217,834
24
XX-Net
9
python3.10.4/Lib/http/cookies.py
Python
7
{ "docstring": "Load cookies from a string (presumably HTTP_COOKIE) or\n from a dictionary. Loading cookies from a dictionary 'd'\n is equivalent to calling:\n map(Cookie.__setitem__, d.keys(), d.values())\n ", "language": "en", "n_whitespaces": 57, "n_words": 24, "vocab_size": 19 }
https://github.com/XX-net/XX-Net.git
4
_assert_same_graph
def _assert_same_graph(original_item, item): original_graph = getattr(original_item, "graph", None) graph = getattr(item, "graph", None) if original_graph and graph and original_graph is not graph: raise ValueError( "%s must be from the same graph as %s (graphs are %s and %s)." % (item, original_item, graph, original_graph) )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
backend.py
84
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,129
0
92
53
34
269,497
44
keras
7
keras/backend.py
Python
8
{ "docstring": "Fail if the 2 items are from different graphs.\n\n Args:\n original_item: Original item to check against.\n item: Item to check.\n\n Raises:\n ValueError: if graphs do not match.\n ", "language": "en", "n_whitespaces": 51, "n_words": 27, "vocab_size": 25 }
https://github.com/keras-team/keras.git
2
rewrite
def rewrite(C, alpha, w): v = C._schreier_free_group.identity for i in range(len(w)): x_i = w[i] v = v*C.P[alpha][C.A_dict[x_i]] alpha = C.table[alpha][C.A_dict[x_i]] return v # Pg 350, section 2.5.2 from [2]
498015021131af4dbb07eb110e5badaba8250c7b
12
fp_groups.py
107
Updated import locations
47,555
0
61
70
24
196,055
29
sympy
14
sympy/combinatorics/fp_groups.py
Python
7
{ "docstring": "\n Parameters\n ==========\n\n C: CosetTable\n alpha: A live coset\n w: A word in `A*`\n\n Returns\n =======\n\n rho(tau(alpha), w)\n\n Examples\n ========\n\n >>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, define_schreier_generators, rewrite\n >>> from sympy.combinatorics import free_group\n >>> F, x, y = free_group(\"x, y\")\n >>> f = FpGroup(F, [x**2, y**3, (x*y)**6])\n >>> C = CosetTable(f, [])\n >>> C.table = [[1, 1, 2, 3], [0, 0, 4, 5], [4, 4, 3, 0], [5, 5, 0, 2], [2, 2, 5, 1], [3, 3, 1, 4]]\n >>> C.p = [0, 1, 2, 3, 4, 5]\n >>> define_schreier_generators(C)\n >>> rewrite(C, 0, (x*y)**6)\n x_4*y_2*x_3*x_1*x_2*y_4*x_5\n\n ", "language": "en", "n_whitespaces": 158, "n_words": 94, "vocab_size": 67 }
https://github.com/sympy/sympy.git
8
login
def login(self, request, extra_context=None): if request.method == 'GET' and self.has_permission(request): # Already logged-in, redirect to admin index index_path = reverse('admin:index', current_app=self.name) return HttpResponseRedirect(index_path) # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level, # and django.contrib.admin.forms eventually imports User. from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.views import LoginView context = { **self.each_context(request), 'title': _('Log in'), 'subtitle': None, 'app_path': request.get_full_path(), 'username': request.user.get_username(), } if (REDIRECT_FIELD_NAME not in request.GET and REDIRECT_FIELD_NAME not in request.POST): context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name) context.update(extra_context or {}) defaults = { 'extra_context': context, 'authentication_form': self.login_form or AdminAuthenticationForm, 'template_name': self.login_template or 'admin/login.html', } request.current_app = self.name return LoginView.as_view(**defaults)(request)
0a4a5e5bacc354df3132d0fcf706839c21afb89d
12
sites.py
308
Refs #32681 -- Fixed VariableDoesNotExist when rendering some admin template. Regression in 84609b3205905097d7d3038d32e6101f012c0619. Follow up to 4e5bbb6ef2287126badd32842b239f4a8a7394ca. Thanks Sourav Kumar for the report.
50,220
0
362
187
83
203,036
110
django
33
django/contrib/admin/sites.py
Python
24
{ "docstring": "\n Display the login form for the given HttpRequest.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 7 }
https://github.com/django/django.git
1
get_font_preamble
def get_font_preamble(cls): font_preamble, command = cls._get_font_preamble_and_command() return font_preamble
13147992b317c29c6e832ca7f6d05bf48aeb0718
8
texmanager.py
31
Move towards making texmanager stateless. Previously, TexManager needed to call get_font_config at a specific place in the middle of processing to update some internal attributes before proceeding with TeX source generation. Instead, move towards making TexManager stateless (except for caching), i.e. the user facing API should be thought of as a bunch of independently callable functions `make_tex()`, `make_dvi()`, etc. (they will probably stay as methods on a "empty" TexManager object for a long time for backcompat, in fact).
23,019
0
29
17
8
108,020
8
matplotlib
5
lib/matplotlib/texmanager.py
Python
3
{ "docstring": "\n Return a string containing font configuration for the tex preamble.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/matplotlib/matplotlib.git
9
addIncludedDataFilesFromPackageOptions
def addIncludedDataFilesFromPackageOptions(): # Cyclic dependency from nuitka import ModuleRegistry for module in ModuleRegistry.getDoneModules(): if module.isCompiledPythonPackage() or module.isUncompiledPythonPackage(): package_name = module.getFullName() match, reason = package_name.matchesToShellPatterns( patterns=getShallIncludePackageData() ) if match: package_directory = module.getCompileTimeDirectory() pkg_filenames = getFileList( package_directory, ignore_dirs=("__pycache__",), ignore_suffixes=(".py", ".pyw", ".pyc", ".pyo", ".dll") + getSharedLibrarySuffixes(), ) if pkg_filenames: file_reason = "package '%s' %s" % (package_name, reason) for pkg_filename in pkg_filenames: rel_path = os.path.join( package_name.asPath(), os.path.relpath(pkg_filename, package_directory), ) addIncludedDataFile( makeIncludedDataFile( source_path=pkg_filename, dest_path=rel_path, reason=file_reason, tracer=options_logger, tags="user,package_data", ) ) # Circular dependency from nuitka.plugins.Plugins import Plugins # Plugins provide per module through this. for module in ModuleRegistry.getDoneModules(): for included_datafile in Plugins.considerDataFiles(module=module): addIncludedDataFile(included_datafile)
abfb99b0a05dd76d2ecc6ebc20732a271857c6c8
21
IncludedDataFiles.py
319
Plugins: Massive cleanup of data file handling * Move data file handling out of standalone only, allowing support for other modes as well. * Attach logger and tags to data file objects.
42,860
0
734
196
70
178,915
97
Nuitka
40
nuitka/freezer/IncludedDataFiles.py
Python
36
{ "docstring": "Late data files, from plugins and user options that work with packages", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/Nuitka/Nuitka.git
1
get_xframe_options_value
def get_xframe_options_value(self, request, response): return getattr(settings, "X_FRAME_OPTIONS", "DENY").upper()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
clickjacking.py
40
Refs #33476 -- Reformatted code with Black.
51,375
0
22
23
8
206,133
8
django
7
django/middleware/clickjacking.py
Python
2
{ "docstring": "\n Get the value to set for the X_FRAME_OPTIONS header. Use the value from\n the X_FRAME_OPTIONS setting, or 'DENY' if not set.\n\n This method can be overridden if needed, allowing it to vary based on\n the request or response.\n ", "language": "en", "n_whitespaces": 74, "n_words": 38, "vocab_size": 29 }
https://github.com/django/django.git
2
validate_config
def validate_config(self, config): site_dir = config_options.SiteDir() docs_dir = config_options.Dir() fname = os.path.join(os.path.abspath('..'), 'mkdocs.yml') config['docs_dir'] = docs_dir.validate(config['docs_dir']) config['site_dir'] = site_dir.validate(config['site_dir']) schema = [ ('site_dir', site_dir), ('docs_dir', docs_dir), ] cfg = Config(schema, fname) cfg.load_dict(config) failed, warned = cfg.validate() if failed: raise config_options.ValidationError(failed) return True
e7f07cc82ab2be920ab426ba07456d8b2592714d
11
config_options_tests.py
203
Remove spaces at the ends of docstrings, normalize quotes
57,194
0
166
120
35
224,047
42
mkdocs
21
mkdocs/tests/config/config_options_tests.py
Python
16
{ "docstring": "Given a config with values for site_dir and doc_dir, run site_dir post_validation.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/mkdocs/mkdocs.git
1
axis
def axis(self): axis = self.vector_part().normalize() return Quaternion(0, axis.b, axis.c, axis.d)
72a2eb0cd1dda000a1fe56bb0fc8ff1f3c119bd9
10
quaternion.py
55
made changes
48,054
0
31
34
10
196,621
10
sympy
8
sympy/algebras/quaternion.py
Python
3
{ "docstring": "\n Returns the axis($\\mathbf{Ax}(q)$) of the quaternion.\n\n Explanation\n ===========\n\n Given a quaternion $q = a + bi + cj + dk$ and returns $\\mathbf{Ax}(q)$ ie normalized versor (vector part) of that quaternion.\n The axis is always an imaginary unit with square equal to $-1 + 0i + 0j + 0k$.\n\n Examples\n ========\n\n >>> from sympy.algebras.quaternion import Quaternion\n >>> q = Quaternion(1, 1, 1, 1)\n >>> q.axis()\n 0 + sqrt(3)/3*i + sqrt(3)/3*j + sqrt(3)/3*k\n\n See Also\n ========\n\n vector_part\n\n ", "language": "en", "n_whitespaces": 182, "n_words": 76, "vocab_size": 59 }
https://github.com/sympy/sympy.git
1
editor_attributes
def editor_attributes(self, image, alt_text): return { "data-embedtype": "image", "data-id": image.id, "data-format": self.name, "data-alt": escape(alt_text), }
d10f15e55806c6944827d801cd9c2d53f5da4186
9
formats.py
63
Reformat with black
16,343
0
80
36
15
75,039
15
wagtail
7
wagtail/images/formats.py
Python
7
{ "docstring": "\n Return additional attributes to go on the HTML element\n when outputting this image within a rich text editor field\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
https://github.com/wagtail/wagtail.git
9
find_head_tail
def find_head_tail(self, points, orientation_thr): assert points.ndim == 2 assert points.shape[0] >= 4 assert points.shape[1] == 2 assert isinstance(orientation_thr, float) if len(points) > 4: pad_points = np.vstack([points, points[0]]) edge_vec = pad_points[1:] - pad_points[:-1] theta_sum = [] adjacent_vec_theta = [] for i, edge_vec1 in enumerate(edge_vec): adjacent_ind = [x % len(edge_vec) for x in [i - 1, i + 1]] adjacent_edge_vec = edge_vec[adjacent_ind] temp_theta_sum = np.sum( self.vector_angle(edge_vec1, adjacent_edge_vec)) temp_adjacent_theta = self.vector_angle(adjacent_edge_vec[0], adjacent_edge_vec[1]) theta_sum.append(temp_theta_sum) adjacent_vec_theta.append(temp_adjacent_theta) theta_sum_score = np.array(theta_sum) / np.pi adjacent_theta_score = np.array(adjacent_vec_theta) / np.pi poly_center = np.mean(points, axis=0) edge_dist = np.maximum( norm( pad_points[1:] - poly_center, axis=-1), norm( pad_points[:-1] - poly_center, axis=-1)) dist_score = edge_dist / np.max(edge_dist) position_score = np.zeros(len(edge_vec)) score = 0.5 * theta_sum_score + 0.15 * adjacent_theta_score score += 0.35 * dist_score if len(points) % 2 == 0: position_score[(len(score) // 2 - 1)] += 1 position_score[-1] += 1 score += 0.1 * position_score pad_score = np.concatenate([score, score]) score_matrix = np.zeros((len(score), len(score) - 3)) x = np.arange(len(score) - 3) / float(len(score) - 4) gaussian = 1. / (np.sqrt(2. * np.pi) * 0.5) * np.exp(-np.power( (x - 0.5) / 0.5, 2.) / 2) gaussian = gaussian / np.max(gaussian) for i in range(len(score)): score_matrix[i, :] = score[i] + pad_score[(i + 2):(i + len( score) - 1)] * gaussian * 0.3 head_start, tail_increment = np.unravel_index(score_matrix.argmax(), score_matrix.shape) tail_start = (head_start + tail_increment + 2) % len(points) head_end = (head_start + 1) % len(points) tail_end = (tail_start + 1) % len(points) if head_end > tail_end: head_start, tail_start = tail_start, head_start head_end, tail_end = tail_end, head_end head_inds = [head_start, head_end] tail_inds = [tail_start, tail_end] else: if self.vector_slope(points[1] - points[0]) + self.vector_slope( points[3] - points[2]) < self.vector_slope(points[ 2] - points[1]) + self.vector_slope(points[0] - points[ 3]): horizontal_edge_inds = [[0, 1], [2, 3]] vertical_edge_inds = [[3, 0], [1, 2]] else: horizontal_edge_inds = [[3, 0], [1, 2]] vertical_edge_inds = [[0, 1], [2, 3]] vertical_len_sum = norm(points[vertical_edge_inds[0][0]] - points[ vertical_edge_inds[0][1]]) + norm(points[vertical_edge_inds[1][ 0]] - points[vertical_edge_inds[1][1]]) horizontal_len_sum = norm(points[horizontal_edge_inds[0][ 0]] - points[horizontal_edge_inds[0][1]]) + norm(points[ horizontal_edge_inds[1][0]] - points[horizontal_edge_inds[1] [1]]) if vertical_len_sum > horizontal_len_sum * orientation_thr: head_inds = horizontal_edge_inds[0] tail_inds = horizontal_edge_inds[1] else: head_inds = vertical_edge_inds[0] tail_inds = vertical_edge_inds[1] return head_inds, tail_inds
9f62b610dea6161627200ed85d92e19b1923279a
20
fce_targets.py
1,235
add fcenet
4,527
0
1,486
831
182
23,181
345
PaddleOCR
64
ppocr/data/imaug/fce_targets.py
Python
78
{ "docstring": "Find the head edge and tail edge of a text polygon.\n\n Args:\n points (ndarray): The points composing a text polygon.\n orientation_thr (float): The threshold for distinguishing between\n head edge and tail edge among the horizontal and vertical edges\n of a quadrangle.\n\n Returns:\n head_inds (list): The indexes of two points composing head edge.\n tail_inds (list): The indexes of two points composing tail edge.\n ", "language": "en", "n_whitespaces": 157, "n_words": 62, "vocab_size": 33 }
https://github.com/PaddlePaddle/PaddleOCR.git
1
_is_dataframe
def _is_dataframe(self) -> bool: return issubclass(self._pandas_class, pandas.DataFrame)
2ebc9cf51bfc773e3d4c898f5a33c0f60ad7ebc5
8
base.py
32
REFACTOR-#5310: Remove some hasattr('columns') checks. (#5311) Signed-off-by: mvashishtha <[email protected]>
36,344
0
21
19
7
155,342
7
modin
7
modin/pandas/base.py
Python
14
{ "docstring": "\n Tell whether this is a dataframe.\n\n Ideally, other methods of BasePandasDataset shouldn't care whether this\n is a dataframe or a series, but sometimes we need to know. This method\n is better than hasattr(self, \"columns\"), which for series will call\n self.__getattr__(\"columns\"), which requires materializing the index.\n\n Returns\n -------\n bool : Whether this is a dataframe.\n ", "language": "en", "n_whitespaces": 118, "n_words": 54, "vocab_size": 43 }
https://github.com/modin-project/modin.git
2
forward_loss
def forward_loss(self, pixel_values, pred, mask): target = self.patchify(pixel_values) if self.config.norm_pix_loss: mean = tf.reduce_mean(target, axis=-1, keepdims=True) var = tf.math.reduce_variance(target, axis=-1, keepdims=True) target = (target - mean) / (var + 1.0e-6) ** 0.5 loss = (pred - target) ** 2 loss = tf.reduce_mean(loss, axis=-1) # [batch_size, num_patches], mean loss per patch loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask) # mean loss on removed patches return loss
b681e12d5963490d29c2a77ba7346ee050e46def
12
modeling_tf_vit_mae.py
185
[ViTMAE] Fix docstrings and variable names (#17710) * Fix docstrings and variable names * Rename x to something better * Improve messages * Fix docstrings and add test for greyscale images Co-authored-by: Niels Rogge <[email protected]>
5,737
0
148
125
44
31,420
64
transformers
19
src/transformers/models/vit_mae/modeling_tf_vit_mae.py
Python
10
{ "docstring": "\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, height, width, num_channels)`):\n Pixel values.\n pred (`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:\n Predicted pixel values.\n mask (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Tensor indicating which patches are masked (1) and which are not (0).\n\n Returns:\n `tf.Tensor`: Pixel reconstruction loss.\n ", "language": "en", "n_whitespaces": 157, "n_words": 46, "vocab_size": 34 }
https://github.com/huggingface/transformers.git
5
extract_frames
def extract_frames(self, bpf_buffer): # Ensure that the BPF buffer contains at least the header len_bb = len(bpf_buffer) if len_bb < _bpf_hdr_len: return # Extract useful information from the BPF header bh_hdr = bpf_hdr.from_buffer_copy(bpf_buffer) if bh_hdr.bh_datalen == 0: return # Get and store the Scapy object frame_str = bpf_buffer[ bh_hdr.bh_hdrlen:bh_hdr.bh_hdrlen + bh_hdr.bh_caplen ] if _NANOTIME: ts = bh_hdr.bh_tstamp.tv_sec + 1e-9 * bh_hdr.bh_tstamp.tv_nsec else: ts = bh_hdr.bh_tstamp.tv_sec + 1e-6 * bh_hdr.bh_tstamp.tv_usec self.received_frames.append( (self.guessed_cls, frame_str, ts) ) # Extract the next frame end = self.bpf_align(bh_hdr.bh_hdrlen, bh_hdr.bh_caplen) if (len_bb - end) >= 20: self.extract_frames(bpf_buffer[end:])
adaa923db82be88e5bd84b4046faa23f2b69d0ed
13
supersocket.py
218
Major BPF improvements This adds: - timestamps support - structures of bpf_hdr - nanosecond precision on FREEBSD - cleanups
52,575
0
286
140
65
208,924
90
scapy
24
scapy/arch/bpf/supersocket.py
Python
20
{ "docstring": "\n Extract all frames from the buffer and stored them in the received list\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/secdev/scapy.git
26
as_content_primitive
def as_content_primitive(self, radical=False, clear=True): con, prim = self.func(*[_keep_coeff(*a.as_content_primitive( radical=radical, clear=clear)) for a in self.args]).primitive() if not clear and not con.is_Integer and prim.is_Add: con, d = con.as_numer_denom() _p = prim/d if any(a.as_coeff_Mul()[0].is_Integer for a in _p.args): prim = _p else: con /= d if radical and prim.is_Add: # look for common radicals that can be removed args = prim.args rads = [] common_q = None for m in args: term_rads = defaultdict(list) for ai in Mul.make_args(m): if ai.is_Pow: b, e = ai.as_base_exp() if e.is_Rational and b.is_Integer: term_rads[e.q].append(abs(int(b))**e.p) if not term_rads: break if common_q is None: common_q = set(term_rads.keys()) else: common_q = common_q & set(term_rads.keys()) if not common_q: break rads.append(term_rads) else: # process rads # keep only those in common_q for r in rads: for q in list(r.keys()): if q not in common_q: r.pop(q) for q in r: r[q] = Mul(*r[q]) # find the gcd of bases for each q G = [] for q in common_q: g = reduce(igcd, [r[q] for r in rads], 0) if g != 1: G.append(g**Rational(1, q)) if G: G = Mul(*G) args = [ai/G for ai in args] prim = G*prim.func(*args) return con, prim
19114acf6514bc87f5c8cfde35e0fcab88965be7
22
add.py
587
Code optimizations
49,018
0
989
369
99
198,655
188
sympy
46
sympy/core/add.py
Python
47
{ "docstring": "Return the tuple (R, self/R) where R is the positive Rational\n extracted from self. If radical is True (default is False) then\n common radicals will be removed and included as a factor of the\n primitive expression.\n\n Examples\n ========\n\n >>> from sympy import sqrt\n >>> (3 + 3*sqrt(2)).as_content_primitive()\n (3, 1 + sqrt(2))\n\n Radical content can also be factored out of the primitive:\n\n >>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)\n (2, sqrt(2)*(1 + 2*sqrt(5)))\n\n See docstring of Expr.as_content_primitive for more examples.\n ", "language": "en", "n_whitespaces": 167, "n_words": 76, "vocab_size": 62 }
https://github.com/sympy/sympy.git
2
_add_projection
def _add_projection(self, frame): proj = CalciteProjectionNode( frame._table_cols, [self._ref(frame, col) for col in frame._table_cols] ) self._push(proj) return proj
e5b1888cd932909e49194d58035da34b210b91c4
11
calcite_builder.py
64
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
36,062
0
63
41
16
154,552
17
modin
9
modin/experimental/core/execution/native/implementations/hdk_on_native/calcite_builder.py
Python
6
{ "docstring": "\n Add a projection node to the resulting sequence.\n\n Added node simply selects all frame's columns. This method can be used\n to discard a virtual 'rowid' column provided by all scan nodes.\n\n Parameters\n ----------\n frame : HdkOnNativeDataframe\n An input frame for a projection.\n\n Returns\n -------\n CalciteProjectionNode\n Created projection node.\n ", "language": "en", "n_whitespaces": 141, "n_words": 48, "vocab_size": 41 }
https://github.com/modin-project/modin.git
2
closing
def closing(pool): # type: (Pool) -> Iterator[Pool] try: yield pool finally: # For Pool.imap*, close and join are needed # for the returned iterator to begin yielding. pool.close() pool.join() pool.terminate()
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
parallel.py
55
upd; format
12,490
0
84
27
28
61,288
30
transferlearning
5
.venv/lib/python3.8/site-packages/pip/_internal/utils/parallel.py
Python
7
{ "docstring": "Return a context manager making sure the pool closes properly.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/jindongwang/transferlearning.git
8
_cov
def _cov(self, min_periods, ddof, numeric_only): # noqa: PR01, RT01, D200 if not numeric_only: return self._default_to_pandas( pandas.DataFrame.cov, min_periods=min_periods, ddof=ddof, numeric_only=numeric_only, ) numeric_df = self.drop( columns=[ i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i]) ] ) is_notna = True if all(numeric_df.notna().all()): if min_periods is not None and min_periods > len(numeric_df): result = np.empty((numeric_df.shape[1], numeric_df.shape[1])) result.fill(np.nan) return numeric_df.__constructor__(result) else: cols = numeric_df.columns idx = cols.copy() numeric_df = numeric_df.astype(dtype="float64") denom = 1.0 / (len(numeric_df) - ddof) means = numeric_df.mean(axis=0) result = numeric_df - means result = result.T._query_compiler.conj().dot(result._query_compiler) else: result = numeric_df._query_compiler.cov(min_periods=min_periods) is_notna = False if is_notna: result = numeric_df.__constructor__( query_compiler=result, index=idx, columns=cols ) result *= denom else: result = numeric_df.__constructor__(query_compiler=result) return result
7871c7bc5adeeaca9b6fc4b88482be8b18333faa
17
dataframe.py
413
FEAT-#4989: Switch pandas version to 1.5 (#5037) Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Anatoly Myachev <[email protected]> Co-authored-by: Jonathan Shi <[email protected]> Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Vasily Litvinov <[email protected]>
36,194
0
551
265
68
154,972
108
modin
41
modin/pandas/dataframe.py
Python
38
{ "docstring": "\n Compute pairwise covariance of columns, excluding NA/null values.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/modin-project/modin.git
1
_object2proto
def _object2proto(self) -> PublishDatasetMessage_PB: return PublishDatasetMessage_PB( msg_id=serialize(self.id), address=serialize(self.address), reply_to=serialize(self.reply_to), dataset_id=self.dataset_id, deployment_id=self.deployment_id, host_or_ip = self.host_or_ip, protocol = self.protocol, port = self.port, client=serialize(self.client), )
54c0a2f6738090252dc2b2863eb3c13b1bcb9e6a
11
oblv_messages.py
115
Changes for publishing dataset and passing actions to enclave
310
0
142
78
20
2,529
22
PySyft
14
packages/syft/src/syft/core/node/common/node_service/oblv/oblv_messages.py
Python
23
{ "docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n :rtype: PublishDatasetMessage_PB\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "language": "en", "n_whitespaces": 150, "n_words": 68, "vocab_size": 56 }
https://github.com/OpenMined/PySyft.git
3
async_update
async def async_update(self) -> None: _LOGGER.debug("Updating %s", self._config_entry_id) if self._manager: status_info = self._manager.status_info() if status_info: self._update_from_status_info(status_info)
b043053aadab57435c768d1e0fe3892367686999
11
sensor.py
72
Improve entity type hints [g] (#77145)
103,647
0
74
41
15
304,852
16
core
8
homeassistant/components/gdacs/sensor.py
Python
7
{ "docstring": "Update this entity from the data held in the feed manager.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/home-assistant/core.git
2
test_notification_permission_workaround
def test_notification_permission_workaround(): try: notifications = QWebEnginePage.Feature.Notifications except AttributeError: pytest.skip("No Notifications member") permissions = webenginetab._WebEnginePermissions assert permissions._options[notifications] == 'content.notifications.enabled' assert permissions._messages[notifications] == 'show notifications'
0877fb0d78635692e481c8bde224fac5ad0dd430
11
test_webenginetab.py
82
Run scripts/dev/rewrite_enums.py
117,667
0
55
46
20
321,330
23
qutebrowser
13
tests/unit/browser/webengine/test_webenginetab.py
Python
8
{ "docstring": "Make sure the value for QWebEnginePage::Notifications is correct.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/qutebrowser/qutebrowser.git
1
test_data_parameter_replacement
def test_data_parameter_replacement(): program = ( "import logging; " "logging.basicConfig(level=logging.DEBUG); " "import matplotlib.pyplot as plt" ) cmd = [sys.executable, "-c", program] completed_proc = subprocess.run(cmd, text=True, capture_output=True) assert 'data parameter docstring error' not in completed_proc.stderr
686c9e5a413e31c46bb049407d5eca285bcab76d
9
test_preprocess_data.py
81
Fix spelling errors
23,206
0
72
46
29
108,475
33
matplotlib
11
lib/matplotlib/tests/test_preprocess_data.py
Python
9
{ "docstring": "\n Test that the docstring contains the correct *data* parameter stub\n for all methods that we run _preprocess_data() on.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 16 }
https://github.com/matplotlib/matplotlib.git
3
submit_failure
def submit_failure(cls, trigger_id, exc=None, session=None): for task_instance in session.query(TaskInstance).filter( TaskInstance.trigger_id == trigger_id, TaskInstance.state == State.DEFERRED ): # Add the error and set the next_method to the fail state traceback = format_exception(type(exc), exc, exc.__traceback__) if exc else None task_instance.next_method = "__fail__" task_instance.next_kwargs = {"error": "Trigger failure", "traceback": traceback} # Remove ourselves as its trigger task_instance.trigger_id = None # Finally, mark it as scheduled so it gets re-queued task_instance.state = State.SCHEDULED
4ad21f5f7c2d416cf813a860564bc2bf3e161d46
13
trigger.py
149
Log traceback in trigger excs (#21213)
8,565
0
189
92
56
45,435
69
airflow
19
airflow/models/trigger.py
Python
9
{ "docstring": "\n Called when a trigger has failed unexpectedly, and we need to mark\n everything that depended on it as failed. Notably, we have to actually\n run the failure code from a worker as it may have linked callbacks, so\n hilariously we have to re-schedule the task instances to a worker just\n so they can then fail.\n\n We use a special __fail__ value for next_method to achieve this that\n the runtime code understands as immediate-fail, and pack the error into\n next_kwargs.\n\n TODO: Once we have shifted callback (and email) handling to run on\n workers as first-class concepts, we can run the failure code here\n in-process, but we can't do that right now.\n ", "language": "en", "n_whitespaces": 195, "n_words": 110, "vocab_size": 74 }
https://github.com/apache/airflow.git
1
uname_attr
def uname_attr(self, attribute): # type: (str) -> str return self._uname_info.get(attribute, "")
f3166e673fe8d40277b804d35d77dcdb760fc3b3
8
distro.py
34
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,231
0
32
19
11
20,088
11
pipenv
5
pipenv/patched/notpip/_vendor/distro.py
Python
2
{ "docstring": "\n Return a single named information item from the uname command\n output data source of the OS distribution.\n\n For details, see :func:`distro.uname_attr`.\n ", "language": "en", "n_whitespaces": 50, "n_words": 21, "vocab_size": 20 }
https://github.com/pypa/pipenv.git
1
_set_item_existing_loc
def _set_item_existing_loc(self, row_loc, col_loc, item): row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc) self._setitem_positional( row_lookup, col_lookup, item, axis=self._determine_setitem_axis( row_lookup, col_lookup, is_scalar(row_loc), is_scalar(col_loc) ), )
11ba4811e6db11740e11fd33d3cdfba8ce5bec54
12
indexing.py
81
FIX-#3764: Ensure df.loc with a scalar out of bounds appends to df (#3765) Co-authored-by: Devin Petersohn <[email protected]> Co-authored-by: Bill Wang <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]>
36,224
0
119
56
18
155,050
21
modin
12
modin/pandas/indexing.py
Python
10
{ "docstring": "\n Assign `item` value to dataset located by `row_loc` and `col_loc` with existing rows and columns.\n\n Parameters\n ----------\n row_loc : scalar, slice, list, array or tuple\n Row locator.\n col_loc : scalar, slice, list, array or tuple\n Columns locator.\n item : modin.pandas.DataFrame, modin.pandas.Series or scalar\n Value that should be assigned to located dataset.\n ", "language": "en", "n_whitespaces": 134, "n_words": 51, "vocab_size": 38 }
https://github.com/modin-project/modin.git
1
test_exclude_glob_case5_include_strong
def test_exclude_glob_case5_include_strong(): incl_dom = {} incl_glob = {} incl_ent = {"binary_sensor.working"} excl_dom = {"binary_sensor"} excl_glob = {"binary_sensor.*"} excl_ent = {"light.ignoreme", "sensor.notworking"} testfilter = generate_filter( incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob ) assert testfilter("sensor.test") assert testfilter("sensor.notworking") is False assert testfilter("light.test") assert testfilter("light.ignoreme") is False assert testfilter("binary_sensor.working") assert testfilter("binary_sensor.another") is False assert testfilter("sun.sun") is True
a8349a4866d22cddbca9ac9367d4affae39a8325
9
test_entityfilter.py
172
Adjust entity filters to make includes stronger than excludes (#74080) * Adjust entity filters to make includes stronger than excludes Fixes #59080 * adjust test for stronger entity glob includes * sync with docs
113,352
0
108
94
34
314,748
53
core
9
tests/helpers/test_entityfilter.py
Python
17
{ "docstring": "Test case 5 - include and exclude specified, with excluded glob, and a specifically included entity.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
https://github.com/home-assistant/core.git
4
_bool_arith_fallback
def _bool_arith_fallback(op_str, a, b): if _has_bool_dtype(a) and _has_bool_dtype(b): if op_str in _BOOL_OP_UNSUPPORTED: warnings.warn( f"evaluating in Python space because the {repr(op_str)} " "operator is not supported by numexpr for the bool dtype, " f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.", stacklevel=find_stack_level(inspect.currentframe()), ) return True return False
e94faa23e24c0abf9db74d79cfebe06676577867
17
expressions.py
108
WARN,TST check stacklevel for all warnings (#47998) * use find_stack_level everywhere * fixup * pyx fixups * fixup test_optional_dependency * fixup api * set check_stacklevel=False for some tests * use lru_cache for currentframe * fixup import in __init__ * add missing imports to pyx files * add missing import * fixup import in conversion * revert some __init__ changes * start n=1 * temporarily dont check stacklevel in _check_plot_works * catch some more warnings * dont check stacklevel in check_plot_works * fixup * ignore stacklevel in check_plot_works
40,296
0
150
52
36
168,434
41
pandas
13
pandas/core/computation/expressions.py
Python
11
{ "docstring": "\n Check if we should fallback to the python `_evaluate_standard` in case\n of an unsupported operation by numexpr, which is the case for some\n boolean ops.\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 23 }
https://github.com/pandas-dev/pandas.git
3
compute_output_shape
def compute_output_shape(self, input_shape): if tf.executing_eagerly(): # In this case we build the model first in order to do shape inference. # This is acceptable because the framework only calls # `compute_output_shape` on shape values that the layer would later be # built for. It would however cause issues in case a user attempts to # use `compute_output_shape` manually with shapes that are incompatible # with the shape the Layer will be called on (these users will have to # implement `compute_output_shape` themselves). self._maybe_build(input_shape) graph_name = str(self.name) + "_scratch_graph" with tf.__internal__.FuncGraph(graph_name).as_default(): input_shape = tf_utils.convert_shapes( input_shape, to_tuples=False )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
14
base_layer.py
105
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,542
0
269
131
70
270,719
96
keras
15
keras/engine/base_layer.py
Python
26
{ "docstring": "Computes the output shape of the layer.\n\n This method will cause the layer's state to be built, if that has not\n happened before. This requires that the layer will later be used with\n inputs that match the input shape provided here.\n\n Args:\n input_shape: Shape tuple (tuple of integers)\n or list of shape tuples (one per output tensor of the layer).\n Shape tuples can include None for free dimensions,\n instead of an integer.\n\n Returns:\n An input shape tuple.\n ", "language": "en", "n_whitespaces": 186, "n_words": 77, "vocab_size": 56 }
https://github.com/keras-team/keras.git
1
test_error_wrong_input_output_name
def test_error_wrong_input_output_name(self) -> None: m1, m2 = _load_model(m1_def), _load_model(m2_def) self.assertRaises(ValueError, compose.merge_models, m1, m2, io_map=[("wrong_outname", "B01"), ("B10", "B11"), ("B20", "B21")]) # Wrong output name self.assertRaises(ValueError, compose.merge_models, m1, m2, io_map=[("B00", "wrong_input"), ("B10", "B11"), ("B20", "B21")])
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
11
compose_test.py
154
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <[email protected]> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <[email protected]> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <[email protected]> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <[email protected]> * fixes Signed-off-by: Gary Miguel <[email protected]> * remove extra blank line Signed-off-by: Gary Miguel <[email protected]> * fix type annotations Signed-off-by: Gary Miguel <[email protected]> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <[email protected]> * fix Operators.md Signed-off-by: Gary Miguel <[email protected]> * fix TestCoverage.md Signed-off-by: Gary Miguel <[email protected]> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <[email protected]>
74,741
0
168
93
24
255,401
33
onnx
12
onnx/test/compose_test.py
Python
11
{ "docstring": "\n Tests that providing a non existing output/input name in the io_map argument produces an error.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 15 }
https://github.com/onnx/onnx.git
1
get_local_am_pm
def get_local_am_pm(): am_local = time(1).strftime("%p") pm_local = time(13).strftime("%p") return am_local, pm_local @pytest.fixture(params=["string", "pathlike", "buffer"])
ae6dc976d334e791b3e215cf6e63a267675cccbe
@pytest.fixture(params=["string", "pathlike", "buffer"])
10
test_format.py
86
BUG: Fixed Unicode decoding error in `Period.strftime` when a locale-specific directive is used (#46405) * Added test representative of #46319. Should fail on CI * Added a gha worker with non utf 8 zh_CN encoding * Attempt to fix the encoding so that locale works * Added the fix, but not using it for now, until CI is able to reproduce the issue. * Crazy idea: maybe simply removing the .utf8 modifier will use the right encoding ! * Hopefully fixing the locale not available error * Now simply generating the locale, not updating the ubuntu one * Trying to install the locale without enabling it * Stupid mistake * Testing the optional locale generator condition * Put back all runners * Added whatsnew * Now using the fix * As per code review: moved locale-switching fixture `overridden_locale` to conftest * Flake8 * Added comments on the runner * Added a non-utf8 locale in the `it_IT` runner. Added the zh_CN.utf8 locale in the tests * Improved readability of fixture `overridden_locale` as per code review * Added two comments on default encoding * Fixed #46319 by adding a new `char_to_string_locale` function in the `tslibs.util` module, able to decode char* using the current locale. * As per code review: modified the test to contain non-utf8 chars. Fixed the resulting issue. * Split the test in two for clarity * Fixed test and flake8 error. * Updated whatsnew to ref #46468 . Updated test name * Removing wrong whatsnew bullet * Nitpick on whatsnew as per code review * Fixed build error rst directive * Names incorrectly reverted in last merge commit * Fixed test_localization so that #46595 can be demonstrated on windows targets (even if today these do not run on windows targets, see #46597) * Fixed `tm.set_locale` context manager, it could error and leak when category LC_ALL was used. Fixed #46595 * Removed the fixture as per code review, and added corresponding parametrization in tests. * Dummy mod to trigger CI again * reverted dummy mod * Attempt to fix the remaining error on the numpy worker * Fixed issue in `_from_ordinal` * Added asserts to try to understand * Reverted debugging asserts and applied fix for numpy repeat from #47670. * Fixed the last issue on numpy dev: a TypeError message had changed * Code review: Removed `EXTRA_LOC` * Code review: removed commented line * Code review: reverted out of scope change * Code review: reverted out of scope change * Fixed unused import * Fixed revert mistake * Moved whatsnew to 1.6.0 * Update pandas/tests/io/parser/test_quoting.py Co-authored-by: Sylvain MARIE <[email protected]>
40,362
1
25
31
12
168,984
14
pandas
8
pandas/tests/io/formats/test_format.py
Python
4
{ "docstring": "Return the AM and PM strings returned by strftime in current locale.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/pandas-dev/pandas.git
2
_get_directory_stash
def _get_directory_stash(self, path): # type: (str) -> str try: save_dir = AdjacentTempDirectory(path) # type: TempDirectory except OSError: save_dir = TempDirectory(kind="uninstall") self._save_dirs[os.path.normcase(path)] = save_dir return save_dir.path
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
13
req_uninstall.py
80
upd; format
12,394
0
90
46
19
61,043
25
transferlearning
11
.venv/lib/python3.8/site-packages/pip/_internal/req/req_uninstall.py
Python
7
{ "docstring": "Stashes a directory.\n\n Directories are stashed adjacent to their original location if\n possible, or else moved/copied into the user's temp dir.", "language": "en", "n_whitespaces": 34, "n_words": 21, "vocab_size": 21 }
https://github.com/jindongwang/transferlearning.git
1
sqrt
def sqrt(x): zero = _constant_to_tensor(0.0, x.dtype.base_dtype) x = tf.maximum(x, zero) return tf.sqrt(x) @keras_export("keras.backend.exp") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.exp") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
10
backend.py
86
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,245
1
24
37
14
269,628
15
keras
14
keras/backend.py
Python
4
{ "docstring": "Element-wise square root.\n\n This function clips negative tensor values to 0 before computing the\n square root.\n\n Args:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n ", "language": "en", "n_whitespaces": 59, "n_words": 24, "vocab_size": 22 }
https://github.com/keras-team/keras.git
5
_handle_random_seed
def _handle_random_seed(seed=None): if isinstance(seed, np.random.RandomState): rng = seed else: try: # General interface for seeding on numpy >= 1.17 rng = np.random.default_rng(seed) except AttributeError: # We are on numpy < 1.17, handle options ourselves if isinstance(seed, (numbers.Integral, np.integer)): rng = np.random.RandomState(seed) elif seed is None: rng = np.random.RandomState() else: err = "{} cannot be used to seed the random number generator" raise ValueError(err.format(seed)) return rng
5910d6ef50196c8bd1f4ed40a5da202a39d7f62c
18
algorithms.py
162
docs: fix typos (#2899) * Small typo fixes * Catch an additional typo Co-authored-by: Michael Waskom <[email protected]>
7,462
0
224
97
49
42,023
65
seaborn
15
seaborn/algorithms.py
Python
15
{ "docstring": "Given a seed in one of many formats, return a random number generator.\n\n Generalizes across the numpy 1.17 changes, preferring newer functionality.\n\n ", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 21 }
https://github.com/mwaskom/seaborn.git
3
get_admin_form_fields
def get_admin_form_fields(self, response): fields = [] for fieldset in response.context["adminform"]: for field_line in fieldset: fields.extend(field_line) return fields
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
58
Refs #33476 -- Reformatted code with Black.
52,074
0
71
35
14
207,732
17
django
8
tests/admin_views/tests.py
Python
6
{ "docstring": "\n Return a list of AdminFields for the AdminForm in the response.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
https://github.com/django/django.git
10
_fit
def _fit(self, X, y, max_iter, alpha, fit_path, normalize, Xy=None): n_features = X.shape[1] X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, self.fit_intercept, normalize, self.copy_X ) if y.ndim == 1: y = y[:, np.newaxis] n_targets = y.shape[1] Gram = self._get_gram(self.precompute, X, y) self.alphas_ = [] self.n_iter_ = [] self.coef_ = np.empty((n_targets, n_features), dtype=X.dtype) if fit_path: self.active_ = [] self.coef_path_ = [] for k in range(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, active, coef_path, n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=True, return_n_iter=True, positive=self.positive, ) self.alphas_.append(alphas) self.active_.append(active) self.n_iter_.append(n_iter_) self.coef_path_.append(coef_path) self.coef_[k] = coef_path[:, -1] if n_targets == 1: self.alphas_, self.active_, self.coef_path_, self.coef_ = [ a[0] for a in (self.alphas_, self.active_, self.coef_path_, self.coef_) ] self.n_iter_ = self.n_iter_[0] else: for k in range(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, _, self.coef_[k], n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=False, return_n_iter=True, positive=self.positive, ) self.alphas_.append(alphas) self.n_iter_.append(n_iter_) if n_targets == 1: self.alphas_ = self.alphas_[0] self.n_iter_ = self.n_iter_[0] self._set_intercept(X_offset, y_offset, X_scale) return self
1c094728a33f05bb6c83d7b856b87254964e0e8c
18
_least_angle.py
740
CLN clean _preprocess_data in linear_model (#22762)
75,616
0
1,212
519
99
259,163
186
scikit-learn
51
sklearn/linear_model/_least_angle.py
Python
70
{ "docstring": "Auxiliary method to fit the model using X, y as training data", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/scikit-learn/scikit-learn.git
1
delete_instance
def delete_instance(self, instance_id): request = DeleteInstanceRequest() request.set_InstanceId(instance_id) request.set_Force(True) logging.info("Delete %s command submit successfully", instance_id) self._send_request(request)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
8
utils.py
68
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,248
0
57
39
15
130,362
15
ray
10
python/ray/autoscaler/_private/aliyun/utils.py
Python
6
{ "docstring": "Release a pay-as-you-go instance or\n an expired subscription instance.\n\n :param instance_id: The ID of the instance that you want to release.\n ", "language": "en", "n_whitespaces": 46, "n_words": 21, "vocab_size": 20 }
https://github.com/ray-project/ray.git
1
__aenter__
async def __aenter__(self): await self.before_transition(self.context) self.context.rule_signature.append(str(self.__class__)) return self.context
e935e13e0f2abf2be02b548bbd7e874d131162b0
10
rules.py
59
Push nullified transition logic into GlobalTransforms - we may choose to run some bookkeeping even if a transition is aborted
11,937
0
36
34
8
59,729
8
prefect
8
src/prefect/orion/orchestration/rules.py
Python
4
{ "docstring": "\n Enter an async runtime context governed by this transform.\n\n The `with` statement will bind a governed `OrchestrationContext` to the target\n specified by the `as` clause. If the transition proposed by the\n `OrchestrationContext` has been nullified on entry and `context.proposed_state`\n is `None`, entering this context will do nothing. Otherwise\n `self.before_transition` will fire.\n ", "language": "en", "n_whitespaces": 101, "n_words": 51, "vocab_size": 40 }
https://github.com/PrefectHQ/prefect.git
1
write_hashes
def write_hashes(filename): [head, tail] = os.path.split(filename) new_filename = f"{head}/hashed-{tail}" shutil.copy(filename, new_filename) with open(filename) as in_file: data = json.loads(in_file.read()) click.echo(f"Event ID: {data['event_id']}") click.echo("Writing span hashes") config = load_span_grouping_config({"id": DEFAULT_CONFIG_ID}) results = config.execute_strategy(data) with open(new_filename, "w") as out_file: results.write_to_event(data) out_file.write(json.dumps(data, indent=4)) click.echo("Done") click.echo("\n")
9738a416a27cd77a3d9a2e506c31d8280e0bf1af
12
spans.py
240
feat(suspect-spans): Add CLI script for testing hashing output (#40668) Adds a CLI script for testing our spans hashes. Given a file, runs grouping, and writes the result to a new JSON file.
18,307
0
98
126
35
87,430
41
sentry
28
src/sentry/runner/commands/spans.py
Python
15
{ "docstring": "\n Runs span hash grouping on event data in the supplied filename using the\n default grouping strategy. Write the results to a copy of the file.\n Filename should be a path to a JSON event data file.\n ", "language": "en", "n_whitespaces": 49, "n_words": 36, "vocab_size": 26 }
https://github.com/getsentry/sentry.git
13
_get_ordered_dummies
def _get_ordered_dummies(mul, verbose=False): # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' dum_repr = {d: __kprint(d) for d in all_dums}
24f1e7730119fe958cc8e28411f790c9a5ec04eb
12
secondquant.py
211
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
49,638
0
148
258
43
200,424
67
sympy
21
sympy/physics/secondquant.py
Python
26
{ "docstring": "Returns all dummies in the mul sorted in canonical order.\n\n Explanation\n ===========\n\n The purpose of the canonical ordering is that dummies can be substituted\n consistently across terms with the result that equivalent terms can be\n simplified.\n\n It is not possible to determine if two terms are equivalent based solely on\n the dummy order. However, a consistent substitution guided by the ordered\n dummies should lead to trivially (non-)equivalent terms, thereby revealing\n the equivalence. This also means that if two terms have identical sequences of\n dummies, the (non-)equivalence should already be apparent.\n\n Strategy\n --------\n\n The canonical order is given by an arbitrary sorting rule. A sort key\n is determined for each dummy as a tuple that depends on all factors where\n the index is present. The dummies are thereby sorted according to the\n contraction structure of the term, instead of sorting based solely on the\n dummy symbol itself.\n\n After all dummies in the term has been assigned a key, we check for identical\n keys, i.e. unorderable dummies. If any are found, we call a specialized\n method, _determine_ambiguous(), that will determine a unique order based\n on recursive calls to _get_ordered_dummies().\n\n Key description\n ---------------\n\n A high level description of the sort key:\n\n 1. Range of the dummy index\n 2. Relation to external (non-dummy) indices\n 3. Position of the index in the first factor\n 4. Position of the index in the second factor\n\n The sort key is a tuple with the following components:\n\n 1. A single character indicating the range of the dummy (above, below\n or general.)\n 2. A list of strings with fully masked string representations of all\n factors where the dummy is present. By masked, we mean that dummies\n are represented by a symbol to indicate either below fermi, above or\n general. No other information is displayed about the dummies at\n this point. The list is sorted stringwise.\n 3. An integer number indicating the position of the index, in the first\n factor as sorted in 2.\n 4. An integer number indicating the position of the index, in the second\n factor as sorted in 2.\n\n If a factor is either of type AntiSymmetricTensor or SqOperator, the index\n position in items 3 and 4 is indicated as 'upper' or 'lower' only.\n (Creation operators are considered upper and annihilation operators lower.)\n\n If the masked factors are identical, the two factors cannot be ordered\n unambiguously in item 2. In this case, items 3, 4 are left out. If several\n indices are contracted between the unorderable factors, it will be handled by\n _determine_ambiguous()\n\n\n ", "language": "en", "n_whitespaces": 650, "n_words": 415, "vocab_size": 207 }
https://github.com/sympy/sympy.git
4
_pprint_semantics_within_frame
def _pprint_semantics_within_frame(self, vnframe, indent=""): pieces = [] for predicate in vnframe["semantics"]: arguments = [argument["value"] for argument in predicate["arguments"]] pieces.append( f"{'¬' if predicate['negated'] else ''}{predicate['predicate_value']}({', '.join(arguments)})" ) return "\n".join(f"{indent}* {piece}" for piece in pieces)
8b43b49b0cd8c12cae1d48df27edfdd98cf859fd
14
verbnet.py
151
Read 'bool' field from VerbNet
7,592
0
109
58
28
42,525
33
nltk
11
nltk/corpus/reader/verbnet.py
Python
8
{ "docstring": "Returns a pretty printed version of semantics within frame in a VerbNet class\n\n Return a string containing a pretty-printed representation of\n the given VerbNet frame semantics.\n\n :param vnframe: An ElementTree containing the xml contents of\n a VerbNet frame.\n ", "language": "en", "n_whitespaces": 77, "n_words": 38, "vocab_size": 27 }
https://github.com/nltk/nltk.git
3
dev_clone_iter
def dev_clone_iter(xs, devs): if isinstance(devs, str): devs = [devs] return DevClonedIter([dev_clone(x, devs) for x in xs], devs)
d743336b1f3654cd0315f380f43eed4116997c1d
10
device.py
60
renamed dev_str arg to dev for all methods.
53,667
0
33
39
16
213,595
17
ivy
8
ivy/core/device.py
Python
4
{ "docstring": "\n Clone elements of the iterbale xs to each of the specified devices.\n\n :param xs: The iterable of items to clone.\n :type xs: iterable of any\n :param devs: The devices to clone each of the iterable elements to.\n :type devs: sequence of strs\n :return: iterable with each element cloned to each of the target devices\n ", "language": "en", "n_whitespaces": 76, "n_words": 54, "vocab_size": 29 }
https://github.com/unifyai/ivy.git
1
test_ts
def test_ts(self, mock_handler): # TS df2 = pd.DataFrame(pd.date_range(start='1/1/2018', end='1/31/2018'), columns=['t']) df3 = df2.copy() df2['a'] = 'a' df2['x'] = range(1, 32) df3['a'] = 'b' df3['x'] = range(11, 42) df = pd.concat([df2, df3]) self.set_handler(mock_handler, name='pg', tables={'df': df}) # create project self.run_sql('create database proj') # TS predictor # create predictor self.run_sql() self.wait_predictor('proj', 'modelx') # run predict ret = self.run_sql() # LW can predict assert list(ret.x) == [42, 43, 44]
9ce5a21dd6359fd7e8ebf78051ce9e97bd195ec9
12
test_lightwood.py
258
ML handler supbrocess (#3377) * log -> logger dividing components: app initialize parse args set env.MINDSDB_CONFIG_PATH config requiers env.MINDSDB_CONFIG_PATH sets env.MINDSDB_DB_CON Config() - makes initialization log uses config initialize_log - makes initialization database uses env.MINDSDB_DB_CON have init() method file storage uses config * partial sync for model storage interfaces * ml handler in subprocess interface * fix delete model * fix: model with error in success status * fix: remove hf predictor * fix pg handler * MLHandlerPersistWrapper keeps wrapper process opened * predictor with error keeps 'success' status #3362 * lock for sending tasks to subprocess one by one * check target of predictor before run learn in subproccess * fix check target * fix: json_ai override and problem definition generation * fix None case * folder for ml handler tests * fix: add timeseries_settings key to learn_args * fixes in lw_handler * fix: del join_learn_process * tests for LW handler * finish unit test for LW * changes in tests: - set-project -> to base class - return of ml handler is dataframe - draft for project structure test * merge from staging * create_validation method to check learn params before send to subprocess fixes of HF fixed version of transformers in HF requirements Co-authored-by: Patricio Cerda Mardini <[email protected]>
26,011
0
198
146
49
117,468
66
mindsdb
23
tests/unit/ml_handlers/test_lightwood.py
Python
27
{ "docstring": "\n create model proj.modelx\n from pg (select * from df)\n predict x\n order by t\n group by a\n window 5\n horizon 3\n \n SELECT p.*\n FROM pg.df as t \n JOIN proj.modelx as p\n where t.a='b' and t.t > latest\n ", "language": "en", "n_whitespaces": 164, "n_words": 37, "vocab_size": 32 }
https://github.com/mindsdb/mindsdb.git
5
sys_tags
def sys_tags(**kwargs): # type: (bool) -> Iterator[Tag] warn = _warn_keyword_parameter("sys_tags", kwargs) interp_name = interpreter_name() if interp_name == "cp": for tag in cpython_tags(warn=warn): yield tag else: for tag in generic_tags(): yield tag for tag in compatible_tags(): yield tag
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
tags.py
102
upd; format
13,065
0
101
57
24
62,899
37
transferlearning
10
.venv/lib/python3.8/site-packages/pip/_vendor/packaging/tags.py
Python
11
{ "docstring": "\n Returns the sequence of tag triples for the running interpreter.\n\n The order of the sequence corresponds to priority order for the\n interpreter, from most to least important.\n ", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 19 }
https://github.com/jindongwang/transferlearning.git
1
mixin_http_gateway_parser
def mixin_http_gateway_parser(parser=None): gp = add_arg_group(parser, title='HTTP Gateway') gp.add_argument( '--title', type=str, help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.', ) gp.add_argument( '--description', type=str, help='The description of this HTTP server. It will be used in automatics docs such as Swagger UI.', ) gp.add_argument( '--cors', action='store_true', default=False, help=, ) gp.add_argument( '--default-swagger-ui', action='store_true', default=False, help='If set, the default swagger ui is used for `/docs` endpoint. ', ) gp.add_argument( '--no-debug-endpoints', action='store_true', default=False, help='If set, /status /post endpoints are removed from HTTP interface. ', ) gp.add_argument( '--no-crud-endpoints', action='store_true', default=False, help=, ) gp.add_argument( '--expose-endpoints', type=str, help=, ) gp.add_argument( '--uvicorn-kwargs', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--grpc-server-kwargs', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--ssl-certfile', type=str, help=, dest='ssl_certfile', ) gp.add_argument( '--ssl-keyfile', type=str, help=, dest='ssl_keyfile', )
dd5f08e9d0c535de2f0e6b106db3b21c597752cd
10
remote.py
378
feat: add grpc tls support on gateway (#4522)
2,141
0
500
223
66
11,938
127
jina
15
jina/parsers/orchestrate/runtimes/remote.py
Python
86
{ "docstring": "Add the options to rest server\n\n :param parser: the parser\n \n If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.\n \n If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.\n\n Any executor that has `@requests(on=...)` bind with those values will receive data requests.\n \n A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.\n \nDictionary of kwargs arguments that will be passed to Uvicorn server when starting the server\n\nMore details can be found in Uvicorn docs: https://www.uvicorn.org/settings/\n\n\n Dictionary of kwargs arguments that will be passed to the grpc server when starting the server # todo update\n \n the path to the certificate file\n \n the path to the key file\n ", "language": "en", "n_whitespaces": 211, "n_words": 118, "vocab_size": 75 }
https://github.com/jina-ai/jina.git
14
next
def next(self): self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0: raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if tarinfo is not None: self.members.append(tarinfo) else: self._loaded = True return tarinfo #-------------------------------------------------------------------------- # Little helper methods:
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
17
tarfile.py
353
Vendor in pip 22.1.2
3,847
0
596
211
58
21,454
112
pipenv
24
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
Python
37
{ "docstring": "Return the next member of the archive as a TarInfo object, when\n TarFile is opened for reading. Return None if there is no more\n available.\n ", "language": "en", "n_whitespaces": 52, "n_words": 25, "vocab_size": 22 }
https://github.com/pypa/pipenv.git
4
generate_config_style_dict
def generate_config_style_dict(self) -> dict[str, str]: keys_converting_dict = { "fill": ("color", "fill_color"), "fill-opacity": ("opacity", "fill_opacity"), "stroke": ("color", "stroke_color"), "stroke-opacity": ("opacity", "stroke_opacity"), "stroke-width": ("stroke_width",), } svg_default_dict = self.svg_default result = {} for svg_key, style_keys in keys_converting_dict.items(): for style_key in style_keys: if svg_default_dict[style_key] is None: continue result[svg_key] = str(svg_default_dict[style_key]) return result
309c9d41eb734ca85a7aea5533f88a6d4ee7c944
13
svg_mobject.py
180
Ported improved implementation of :class:`.SVGMobject` from 3b1b/manim (#2898) * port SVGMobject from 3b1b/manim * added svgelements as dependency * revert change of default values * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * set default stroke_width of svg elements to 0 if not set * fix handling of circles with different rx/ry * turn more methods into staticmethods * removed duplicated method * set/adapt stroke-width of some test SVGs * updated control data * forgot some control data * fixed init_colors in tex_mobject and text_mobject * minor changes, added docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * module docstring, removed import * vector_to_coords changed again * nail sphinx version to below 5.1 to fix rtd (?) * update test_text control data for science * changed Brace to use VMobjectFromSVGPath * remove unused classes and methods depending on old SVG path implementation * remove style_utils and svg_path modules * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change test_text to use monospace font * restore geometry.polygram * added get_mobject_type_class auxiliary method; changed polyline implementation to ad-hoc approach * restore test_text to previous version * skip Use tags as svgelements already populates them Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
46,283
0
212
104
40
189,989
48
manim
12
manim/mobject/svg/svg_mobject.py
Python
17
{ "docstring": "Generate a dictionary holding the default style information.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ManimCommunity/manim.git
1
test_update_from_select
def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = ret = self.command_executor.execute_command( parse_sql(sql, dialect='mindsdb')) assert ret.error_code is None # 1 select and 2 updates assert mock_handler().query.call_count == 3 # second is update assert mock_handler().query.call_args_list[1][0][0].to_string() == "update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')" # @patch('mindsdb.integrations.handlers.postgres_handler.Handler') # def test_union_type_mismatch(self, mock_handler): # self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # # sql = # from mindsdb.api.mysql.mysql_proxy.utilities import ErSqlWrongArguments # with pytest.raises(ErSqlWrongArguments): # self.command_executor.execute_command(parse_sql(sql, dialect='mindsdb'))
0dadd5cecec68f252a08637f695b0e4b573b316f
15
test_executor.py
247
support of update command #2454
25,796
0
360
135
71
116,615
101
mindsdb
25
tests/unit/test_executor.py
Python
37
{ "docstring": "\n update \n pg.table2 \n set\n a1 = df.a,\n c1 = df.c\n from \n (\n SELECT model.a as a, model.b as b, model.p as c\n FROM pg.tasks as t\n JOIN mindsdb.task_model as model\n WHERE t.a=1 \n )\n as df\n where \n table2.a1 = df.a \n and table2.b1 = df.b \n \n # SELECT a, b FROM pg.tasks\n # UNION\n # SELECT b, a FROM pg.tasks\n # ", "language": "en", "n_whitespaces": 410, "n_words": 57, "vocab_size": 38 }
https://github.com/mindsdb/mindsdb.git
1
synchronize_between_processes
def synchronize_between_processes(self): t = reduce_across_processes([self.count, self.total]) t = t.tolist() self.count = int(t[0]) self.total = t[1]
e556640badaa3d565e8a1238154265eaccf9f49c
10
utils.py
75
Reduce variance of evaluation in reference (#5819) * Change code to reduce variance in eval * Remove unnecessary new line * Fix missing import warnings * Fix the warning on video_classification * Fix bug to get len of UniformClipSampler
46,906
0
50
46
11
192,761
15
vision
8
references/segmentation/utils.py
Python
5
{ "docstring": "\n Warning: does not synchronize the deque!\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/pytorch/vision.git
2
convert_types
def convert_types(self, schema, col_type_dict, row, stringify_dict=False) -> list: return [ self.convert_type(value, col_type_dict.get(name), stringify_dict=stringify_dict) for name, value in zip(schema, row) ]
766726f2e3a282fcd2662f5dc6e9926dc38a6540
10
sql_to_gcs.py
71
Fix `PostgresToGCSOperator` does not allow nested JSON (#23063) * Avoid double json.dumps for json data export in PostgresToGCSOperator. * Fix CI
9,394
0
63
49
20
48,187
20
airflow
12
airflow/providers/google/cloud/transfers/sql_to_gcs.py
Python
6
{ "docstring": "Convert values from DBAPI to output-friendly formats.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/apache/airflow.git
6
__next__
def __next__(self): if "run" not in self.unresolved_spec: raise TuneError("Must specify `run` in {}".format(self.unresolved_spec)) if self.variants and self.variants.has_next(): # This block will be skipped upon instantiation. # `variants` will be set later after the first loop. resolved_vars, spec = next(self.variants) return self.create_trial(resolved_vars, spec) if self.points_to_evaluate: config = self.points_to_evaluate.pop(0) self.num_samples_left -= 1 self.variants = _VariantIterator( get_preset_variants( self.unresolved_spec, config, constant_grid_search=self.constant_grid_search, random_state=self.random_state, ), lazy_eval=self.lazy_eval, ) resolved_vars, spec = next(self.variants) return self.create_trial(resolved_vars, spec) elif self.num_samples_left > 0: self.variants = _VariantIterator( generate_variants( self.unresolved_spec, constant_grid_search=self.constant_grid_search, random_state=self.random_state, ), lazy_eval=self.lazy_eval, ) self.num_samples_left -= 1 resolved_vars, spec = next(self.variants) return self.create_trial(resolved_vars, spec) else: raise StopIteration
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
15
basic_variant.py
312
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,734
0
547
200
58
132,303
95
ray
22
python/ray/tune/suggest/basic_variant.py
Python
34
{ "docstring": "Generates Trial objects with the variant generation process.\n\n Uses a fixed point iteration to resolve variants. All trials\n should be able to be generated at once.\n\n See also: `ray.tune.suggest.variant_generator`.\n\n Returns:\n Trial object\n ", "language": "en", "n_whitespaces": 78, "n_words": 32, "vocab_size": 29 }
https://github.com/ray-project/ray.git
1
test_tweedie_link_auto
def test_tweedie_link_auto(power, expected_link_class): y = np.array([0.1, 0.5]) # in range of all distributions X = np.array([[1], [2]]) glm = TweedieRegressor(link="auto", power=power).fit(X, y) assert isinstance(glm._linear_loss.base_loss.link, expected_link_class) @pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3]) @pytest.mark.parametrize("link", ["log", "identity"])
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
@pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3]) @pytest.mark.parametrize("link", ["log", "identity"])
12
test_glm.py
164
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
75,784
1
48
72
32
259,453
34
scikit-learn
17
sklearn/linear_model/_glm/tests/test_glm.py
Python
5
{ "docstring": "Test that link='auto' delivers the expected link function", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/scikit-learn/scikit-learn.git
5
gmean
def gmean(a, axis=0, dtype=None, weights=None): r if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) if weights is not None: weights = np.asanyarray(weights, dtype=dtype) return np.exp(np.average(log_a, axis=axis, weights=weights)) @_axis_nan_policy_factory( lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, result_unpacker=lambda x: (x,), kwd_samples=['weights'])
56869131c8e0a0d6e1af86cc1a000c61e83efcf6
@_axis_nan_policy_factory( lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, result_unpacker=lambda x: (x,), kwd_samples=['weights'])
17
_stats_py.py
286
DOC: stats: correct doc display
69,761
1
177
148
57
242,047
76
scipy
25
scipy/stats/_stats_py.py
Python
79
{ "docstring": "Compute the weighted geometric mean along the specified axis.\n\n The weighted geometric mean of the array :math:`a_i` associated to weights\n :math:`w_i` is:\n\n .. math::\n\n \\exp \\left( \\frac{ \\sum_{i=1}^n w_i \\log a_i }{ \\sum_{i=1}^n w_i }\n \\right) \\, ,\n\n and, with equal weights, it falls backs to:\n\n .. math::\n\n \\sqrt[n]{ \\prod_{i=1}^n a_i } \\, .\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the geometric mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If dtype is not specified, it defaults to the\n dtype of a, unless a has an integer dtype with a precision less than\n that of the default platform integer. In that case, the default\n platform integer is used.\n weights : array_like, optional\n The `weights` array must be broadcastable to the same shape as `a`.\n Default is None, which gives each value a weight of 1.0.\n\n Returns\n -------\n gmean : ndarray\n See `dtype` parameter above.\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n hmean : Harmonic mean\n\n Notes\n -----\n The geometric average is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n\n References\n ----------\n .. [1] \"Weighted Geometric Mean\", *Wikipedia*,\n https://en.wikipedia.org/wiki/Weighted_geometric_mean.\n\n Examples\n --------\n >>> from scipy.stats import gmean\n >>> gmean([1, 4])\n 2.0\n >>> gmean([1, 2, 3, 4, 5, 6, 7])\n 3.3800151591412964\n >>> gmean([1, 4, 7], weights=[3, 1, 3])\n 2.80668351922014\n\n ", "language": "en", "n_whitespaces": 506, "n_words": 276, "vocab_size": 173 }
https://github.com/scipy/scipy.git
3
_wrap_model
def _wrap_model(self): if not self.is_wrapped: for _, wrapper in reversed(self.get_modules_wrapper().items()): _setattr(self.bound_model, wrapper.name, wrapper) wrapper._weight2buffer() self.is_wrapped = True
2566badb06095b9e3ea16eb6f00fd58da65a95fd
13
pruner.py
86
[Model Compression] Pruning Wrapper Refactor (#4488)
24,562
0
83
52
17
112,049
17
nni
12
nni/algorithms/compression/v2/pytorch/base/pruner.py
Python
6
{ "docstring": "\n Wrap all modules that needed to be compressed.\n Different from the parent function, call `wrapper._weight2buffer()` after replace the origin module to wrapper.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 20 }
https://github.com/microsoft/nni.git
5
check_convert_parameter
def check_convert_parameter(name, value, parameter): try: # Try to cast the value to the correct type if int or float if parameter.type_ is int: new_value = parameter.type_(float(value)) elif parameter.type_ is float: new_value = parameter.type_(value) elif parameter.type_ is bool: new_value = strtobool(value) else: new_value = value except ValueError: new_value = parameter.default console.print( f"[red]'{name}' format should be '{parameter.type_.__name__}' type[/red]", f"[red]and could not be converted. Setting default '{new_value}'.\n[/red]", ) return new_value
faca7ab67d1ce5d0ae0e5c862332bcfc37f72ea9
14
params_helpers.py
157
Portfolio optimization bug fixes (#3675) * remove hcp * add prams dict to statics * little controller bug * fix view bug * avoid crash if one stock * check params on setup * create parameter class * check and convert parameters using class * change params name to avoid confusion * create a parameter statics dict * change some funcs names * remove unused imports * remove default dict * optional type * add multi choices * cast only int and float * fix completer * fix bugs with parameter validation * fix bugs with parameter validation * add excel formatting * sdk needs mapping as well, controller takes care of this in terminal * small formating * add some safe guard try except * controller bugs * oops * change export path of parameters to portfolio folder * add more commands to scripts * catch optimization exception * log errors * black and exceptions * add flag to test * black * flake8 * forgot this * pylint * change defaults * fix ef default * fix bl * sync sdk defaults * sync last defaults * fix plot heat and add more choices to controller autocomplete * patch weights * fix wrong bool parsing Co-authored-by: James Maslek <[email protected]>
86,053
0
201
84
48
286,830
67
OpenBBTerminal
15
openbb_terminal/portfolio/portfolio_optimization/parameters/params_helpers.py
Python
17
{ "docstring": "Converts a parameter to the correct type\n\n Parameters\n ----------\n name: str\n The name of the received parameter\n value: str\n The value of the received parameter\n parameter: Parameter\n The parameter object\n\n Returns\n -------\n The converted parameter\n ", "language": "en", "n_whitespaces": 83, "n_words": 35, "vocab_size": 23 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
test_remember_collapsed
def test_remember_collapsed(self): # Sidebar should not be collapsed self.client.cookies["wagtail_sidebar_collapsed"] = "0" response = self.client.get(reverse("wagtailadmin_home")) self.assertNotContains(response, "sidebar-collapsed") # Sidebar should be collapsed self.client.cookies["wagtail_sidebar_collapsed"] = "1" response = self.client.get(reverse("wagtailadmin_home")) self.assertContains(response, "sidebar-collapsed")
d10f15e55806c6944827d801cd9c2d53f5da4186
11
test_menu.py
127
Reformat with black
15,818
0
92
68
17
72,022
29
wagtail
9
wagtail/admin/tests/test_menu.py
Python
7
{ "docstring": "Sidebar should render with collapsed class applied.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/wagtail/wagtail.git
1
test_minibatch_dictionary_learning_warns_and_ignore_n_iter
def test_minibatch_dictionary_learning_warns_and_ignore_n_iter(): warn_msg = "'n_iter' is deprecated in version 1.1" with pytest.warns(FutureWarning, match=warn_msg): model = MiniBatchDictionaryLearning(batch_size=256, n_iter=2, max_iter=2).fit(X) assert model.n_iter_ == 2
06834fc8e197f1f308f5ab690ec73a66b6b0b10b
13
test_dict_learning.py
79
DEPR deprecate n_iter in MiniBatchSparsePCA (#23726) Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Olivier Grisel <[email protected]>
76,179
0
41
47
21
260,317
22
scikit-learn
14
sklearn/decomposition/tests/test_dict_learning.py
Python
5
{ "docstring": "Check that we always raise a warning when `n_iter` is set even if it is\n ignored if `max_iter` is set.\n ", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 17 }
https://github.com/scikit-learn/scikit-learn.git
3
installed_with_setuptools_egg_info
def installed_with_setuptools_egg_info(self) -> bool: info_location = self.info_location if not info_location: return False if not info_location.endswith(".egg-info"): return False return pathlib.Path(info_location).is_dir()
f3166e673fe8d40277b804d35d77dcdb760fc3b3
9
base.py
72
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,143
0
76
41
14
19,906
19
pipenv
8
pipenv/patched/notpip/_internal/metadata/base.py
Python
16
{ "docstring": "Whether this distribution is installed with the ``.egg-info`` format.\n\n This usually indicates the distribution was installed with setuptools\n with an old pip version or with ``single-version-externally-managed``.\n\n Note that this ensure the metadata store is a directory. distutils can\n also installs an ``.egg-info``, but as a file, not a directory. This\n property is *False* for that case. Also see ``installed_by_distutils``.\n ", "language": "en", "n_whitespaces": 101, "n_words": 59, "vocab_size": 43 }
https://github.com/pypa/pipenv.git
2
generate_hash
def generate_hash(): generate = True conn = sqlite3.connect(DB_FILE) c = conn.cursor() while generate: hash = uuid.uuid4().hex c.execute( , (hash,), ) generate = c.fetchone() is not None conn.commit() return hash
cc0cff893f9d7d472788adc2510c123967b384fe
11
queueing.py
105
Format The Codebase - black formatting - isort formatting
42,942
0
96
63
23
179,292
29
gradio
15
gradio/queueing.py
Python
16
{ "docstring": "\n SELECT hash FROM queue\n WHERE hash = ?;\n ", "language": "en", "n_whitespaces": 38, "n_words": 8, "vocab_size": 7 }
https://github.com/gradio-app/gradio.git
1
unrank_gray
def unrank_gray(self, rank, superset): graycode_bitlist = GrayCode.unrank(len(superset), rank) return Subset.subset_from_bitlist(superset, graycode_bitlist)
498015021131af4dbb07eb110e5badaba8250c7b
10
subsets.py
50
Updated import locations
47,703
0
32
32
11
196,203
11
sympy
10
sympy/combinatorics/subsets.py
Python
3
{ "docstring": "\n Gets the Gray code ordered subset of the specified rank.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset\n ['a', 'b']\n >>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset\n []\n\n See Also\n ========\n\n iterate_graycode, rank_gray\n ", "language": "en", "n_whitespaces": 120, "n_words": 35, "vocab_size": 27 }
https://github.com/sympy/sympy.git
3
resample_sidelines
def resample_sidelines(self, sideline1, sideline2, resample_step): assert sideline1.ndim == sideline2.ndim == 2 assert sideline1.shape[1] == sideline2.shape[1] == 2 assert sideline1.shape[0] >= 2 assert sideline2.shape[0] >= 2 assert isinstance(resample_step, float) length1 = sum([ norm(sideline1[i + 1] - sideline1[i]) for i in range(len(sideline1) - 1) ]) length2 = sum([ norm(sideline2[i + 1] - sideline2[i]) for i in range(len(sideline2) - 1) ]) total_length = (length1 + length2) / 2 resample_point_num = max(int(float(total_length) / resample_step), 1) resampled_line1 = self.resample_line(sideline1, resample_point_num) resampled_line2 = self.resample_line(sideline2, resample_point_num) return resampled_line1, resampled_line2
9f62b610dea6161627200ed85d92e19b1923279a
15
fce_targets.py
266
add fcenet
4,537
0
231
178
49
23,193
82
PaddleOCR
23
ppocr/data/imaug/fce_targets.py
Python
19
{ "docstring": "Resample two sidelines to be of the same points number according to\n step size.\n\n Args:\n sideline1 (ndarray): The points composing a sideline of a text\n polygon.\n sideline2 (ndarray): The points composing another sideline of a\n text polygon.\n resample_step (float): The resampled step size.\n\n Returns:\n resampled_line1 (ndarray): The resampled line 1.\n resampled_line2 (ndarray): The resampled line 2.\n ", "language": "en", "n_whitespaces": 169, "n_words": 56, "vocab_size": 33 }
https://github.com/PaddlePaddle/PaddleOCR.git
3
set_read_only
def set_read_only(self, names, msg="Attribute is read-only"): new_read_only = {name: msg for name in names} if getattr(self, "_read_only", False): self._read_only.update(new_read_only) else: object.__setattr__(self, "_read_only", new_read_only)
b3bb96da2002383625b7ff595d9862a4f0085c17
11
_utils.py
87
Make AttributeDict additive ._read_only attribute is now a dict of name -> msgs Overrides .update() method to use __setitem__ implementation Includes updates to unit tests
7,256
0
73
53
22
39,793
23
dash
11
dash/_utils.py
Python
6
{ "docstring": "\n Designate named attributes as read-only with the corresponding msg\n\n Method is additive. Making additional calls to this method will update\n existing messages and add to the current set of _read_only names.\n ", "language": "en", "n_whitespaces": 60, "n_words": 31, "vocab_size": 29 }
https://github.com/plotly/dash.git
1
test_help_text_examples_are_contextualized
def test_help_text_examples_are_contextualized(): rendered_inline = render(spacing_invalid_value("padding", "inline")) assert "widget.styles.padding" in rendered_inline rendered_css = render(spacing_invalid_value("padding", "css")) assert "padding:" in rendered_css
91783b7c1e06a45e93fd89dbdb6aa3d1a9c2e990
11
test_help_text.py
70
Testing for help text
44,041
0
33
35
12
183,062
18
textual
5
tests/css/test_help_text.py
Python
5
{ "docstring": "Ensure that if the user is using CSS, they see CSS-specific examples\n and if they're using inline styles they see inline-specific examples.", "language": "en", "n_whitespaces": 24, "n_words": 22, "vocab_size": 18 }
https://github.com/Textualize/textual.git
1
test_unlock_post_own_snippet_with_bad_permissions
def test_unlock_post_own_snippet_with_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.groups.add(Group.objects.get(name="Editors")) self.user.save() # Lock the snippet self.lock_snippet(self.user) next_url = reverse("wagtailadmin_home") response = self.client.post(self.get_url("unlock"), {"next": next_url}) self.refresh_snippet() # Check response self.assertRedirects(response, next_url) # Check that the snippet is not locked self.assertFalse(self.snippet.locked) self.assertIsNone(self.snippet.locked_by) self.assertIsNone(self.snippet.locked_at)
10dbbddaf35607e4257f50dd960520a1268dd225
11
test_locking.py
202
Add tests for locking snippets
17,036
0
154
117
33
80,230
42
wagtail
26
wagtail/snippets/tests/test_locking.py
Python
12
{ "docstring": "User can unlock a snippet they have locked without the unlock permission.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/wagtail/wagtail.git
6
to_integral_value
def to_integral_value(self, rounding=None, context=None): if context is None: context = getcontext() if rounding is None: rounding = context.rounding if self._is_special: ans = self._check_nans(context=context) if ans: return ans return Decimal(self) if self._exp >= 0: return Decimal(self) else: return self._rescale(0, rounding) # the method name changed, but we provide also the old one, for compatibility to_integral = to_integral_value
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
_pydecimal.py
138
add python 3.10.4 for windows
55,831
0
196
82
39
219,818
56
XX-Net
12
python3.10.4/Lib/_pydecimal.py
Python
14
{ "docstring": "Rounds to the nearest integer, without raising inexact, rounded.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/XX-net/XX-Net.git
4
_update_trial_info
def _update_trial_info(self, expr_dir): trial_id = expr_dir[-8:] meta_file = os.path.join(expr_dir, EXPR_META_FILE) meta = parse_json(meta_file) result_file = os.path.join(expr_dir, EXPR_RESULT_FILE) offset = self._result_offsets.get(trial_id, 0) results, new_offset = parse_multiple_json(result_file, offset) self._add_results(results, trial_id) self._result_offsets[trial_id] = new_offset if meta: TrialRecord.objects.filter(trial_id=trial_id).update( trial_status=meta["status"], end_time=timestamp2date(meta.get("end_time", None)), ) elif len(results) > 0: metrics = { "episode_reward": results[-1].get("episode_reward_mean", None), "accuracy": results[-1].get("mean_accuracy", None), "loss": results[-1].get("loss", None), } if results[-1].get("done"): TrialRecord.objects.filter(trial_id=trial_id).update( trial_status="TERMINATED", end_time=results[-1].get("date", None), metrics=str(metrics), ) else: TrialRecord.objects.filter(trial_id=trial_id).update( metrics=str(metrics) )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
17
collector.py
407
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,665
0
421
253
50
132,072
67
ray
30
python/ray/tune/automlboard/backend/collector.py
Python
30
{ "docstring": "Update information for given trial.\n\n Meta file will be loaded if exists, and the trial information\n in db backend will be updated.\n\n Args:\n expr_dir(str)\n ", "language": "en", "n_whitespaces": 63, "n_words": 24, "vocab_size": 21 }
https://github.com/ray-project/ray.git
1
test_avatar_allowed_mime_type_per_room
def test_avatar_allowed_mime_type_per_room(self): self._setup_local_files( { "good": {"mimetype": "image/png"}, "bad": {"mimetype": "application/octet-stream"}, } ) room_id = self.helper.create_room_as(tok=self.owner_tok) channel = self.make_request( "PUT", f"/rooms/{room_id}/state/m.room.member/{self.owner}", content={"membership": "join", "avatar_url": "mxc://test/bad"}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 403, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body ) channel = self.make_request( "PUT", f"/rooms/{room_id}/state/m.room.member/{self.owner}", content={"membership": "join", "avatar_url": "mxc://test/good"}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 200, channel.result)
bf60da1a60096fac5fb778b732ff2214862ac808
12
test_profile.py
276
Configurable limits on avatars (#11846) Only allow files which file size and content types match configured limits to be set as avatar. Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19
71,034
0
283
150
32
246,133
48
synapse
19
tests/rest/client/test_profile.py
Python
25
{ "docstring": "Tests that the MIME type whitelist for avatars is enforced when updating a\n per-room profile.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 15 }
https://github.com/matrix-org/synapse.git
1
test_blocked_public_room_list_over_federation
def test_blocked_public_room_list_over_federation(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/publicRooms", ) self.assertEquals(403, channel.code)
c3db7a0b59d48b8872bc24096f9a2467ef35f703
9
test_server.py
48
Tests: replace mocked Authenticator with the real thing (#11913) If we prepopulate the test homeserver with a key for a remote homeserver, we can make federation requests to it without having to stub out the authenticator. This has two advantages: * means that what we are testing is closer to reality (ie, we now have complete tests for the incoming-request-authorisation flow) * some tests require that other objects be signed by the remote server (eg, the event in `/send_join`), and doing that would require a whole separate set of mocking out. It's much simpler just to use real keys.
71,153
0
60
27
10
246,320
10
synapse
6
tests/federation/transport/test_server.py
Python
6
{ "docstring": "Test that unauthenticated requests to the public rooms directory 403 when\n allow_public_rooms_over_federation is False.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
https://github.com/matrix-org/synapse.git
1
test_background_populate_rooms_creator_column
def test_background_populate_rooms_creator_column(self) -> None: # Insert a room without the creator room_id = self._generate_room() self.get_success( self.store.db_pool.simple_update( table="rooms", keyvalues={"room_id": room_id}, updatevalues={"creator": None}, desc="test", ) ) # Make sure the test is starting out with a room without a creator room_creator_before = self.get_success( self.store.db_pool.simple_select_one_onecol( table="rooms", keyvalues={"room_id": room_id}, retcol="creator", allow_none=True, ) ) self.assertEqual(room_creator_before, None) # Insert and run the background update. self.get_success( self.store.db_pool.simple_insert( "background_updates", { "update_name": _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN, "progress_json": "{}", }, ) ) # ... and tell the DataStore that it hasn't finished all updates yet self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion self.wait_for_background_updates() # Make sure the background update filled in the room creator room_creator_after = self.get_success( self.store.db_pool.simple_select_one_onecol( table="rooms", keyvalues={"room_id": room_id}, retcol="creator", allow_none=True, ) ) self.assertEqual(room_creator_after, self.user_id)
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
14
test_room.py
316
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
73,263
0
610
188
73
250,085
119
synapse
25
tests/storage/databases/main/test_room.py
Python
42
{ "docstring": "Test that the background update to populate the rooms creator column\n works properly.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git