id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/pyka-1.3.6.0.tar.gz/pyka-1.3.6.0/kafka/coordinator/consumer.py | from __future__ import absolute_import, division
import collections
import copy
import logging
import time
from kafka.vendor import six
from .base import BaseCoordinator, Generation
from .assignors.range import RangePartitionAssignor
from .assignors.roundrobin import RoundRobinPartitionAssignor
from .protocol import ConsumerProtocol
from .. import errors as Errors
from ..future import Future
from ..metrics import AnonMeasurable
from ..metrics.stats import Avg, Count, Max, Rate
from ..protocol.commit import OffsetCommitRequest, OffsetFetchRequest
from ..structs import OffsetAndMetadata, TopicPartition
from ..util import WeakMethod
log = logging.getLogger(__name__)
class ConsumerCoordinator(BaseCoordinator):
"""This class manages the coordination process with the consumer coordinator."""
DEFAULT_CONFIG = {
'group_id': 'kafka-python-default-group',
'enable_auto_commit': True,
'auto_commit_interval_ms': 5000,
'default_offset_commit_callback': None,
'assignors': (RangePartitionAssignor, RoundRobinPartitionAssignor),
'session_timeout_ms': 10000,
'heartbeat_interval_ms': 3000,
'max_poll_interval_ms': 300000,
'retry_backoff_ms': 100,
'api_version': (0, 10, 1),
'exclude_internal_topics': True,
'metric_group_prefix': 'consumer'
}
def __init__(self, client, subscription, metrics, **configs):
"""Initialize the coordination manager.
Keyword Arguments:
group_id (str): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. Default: 'kafka-python-default-group'
enable_auto_commit (bool): If true the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): milliseconds between automatic
offset commits, if enable_auto_commit is True. Default: 5000.
default_offset_commit_callback (callable): called as
callback(offsets, exception) response will be either an Exception
or None. This callback can be used to trigger custom actions when
a commit request completes.
assignors (list): List of objects to use to distribute partition
ownership amongst consumer instances when group management is
used. Default: [RangePartitionAssignor, RoundRobinPartitionAssignor]
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management feature. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group managementment facilities. Default: 30000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to
True the only way to receive records from an internal topic is
subscribing to it. Requires 0.10+. Default: True
"""
super(ConsumerCoordinator, self).__init__(client, metrics, **configs)
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self._subscription = subscription
self._metadata_snapshot = self._build_metadata_snapshot(subscription, client.cluster)
self._assignment_snapshot = None
self._cluster = client.cluster
self.auto_commit_interval = self.config['auto_commit_interval_ms'] / 1000
self.next_auto_commit_deadline = None
self.completed_offset_commits = collections.deque()
if self.config['default_offset_commit_callback'] is None:
self.config['default_offset_commit_callback'] = self._default_offset_commit_callback
if self.config['group_id'] is not None:
if self.config['api_version'] >= (0, 9):
if not self.config['assignors']:
raise Errors.KafkaConfigurationError('Coordinator requires assignors')
if self.config['api_version'] < (0, 10, 1):
if self.config['max_poll_interval_ms'] != self.config['session_timeout_ms']:
raise Errors.KafkaConfigurationError("Broker version %s does not support "
"different values for max_poll_interval_ms "
"and session_timeout_ms")
if self.config['enable_auto_commit']:
if self.config['api_version'] < (0, 8, 1):
log.warning('Broker version (%s) does not support offset'
' commits; disabling auto-commit.',
self.config['api_version'])
self.config['enable_auto_commit'] = False
elif self.config['group_id'] is None:
log.warning('group_id is None: disabling auto-commit.')
self.config['enable_auto_commit'] = False
else:
self.next_auto_commit_deadline = time.time() + self.auto_commit_interval
self.consumer_sensors = ConsumerCoordinatorMetrics(
metrics, self.config['metric_group_prefix'], self._subscription)
self._cluster.request_update()
self._cluster.add_listener(WeakMethod(self._handle_metadata_update))
def __del__(self):
if hasattr(self, '_cluster') and self._cluster:
self._cluster.remove_listener(WeakMethod(self._handle_metadata_update))
super(ConsumerCoordinator, self).__del__()
def protocol_type(self):
return ConsumerProtocol.PROTOCOL_TYPE
def group_protocols(self):
"""Returns list of preferred (protocols, metadata)"""
topics = self._subscription.subscription
assert topics is not None, 'Consumer has not subscribed to topics'
metadata_list = []
for assignor in self.config['assignors']:
metadata = assignor.metadata(topics)
group_protocol = (assignor.name, metadata)
metadata_list.append(group_protocol)
return metadata_list
def _handle_metadata_update(self, cluster):
# if we encounter any unauthorized topics, raise an exception
if cluster.unauthorized_topics:
raise Errors.TopicAuthorizationFailedError(cluster.unauthorized_topics)
if self._subscription.subscribed_pattern:
topics = []
for topic in cluster.topics(self.config['exclude_internal_topics']):
if self._subscription.subscribed_pattern.match(topic):
topics.append(topic)
if set(topics) != self._subscription.subscription:
self._subscription.change_subscription(topics)
self._client.set_topics(self._subscription.group_subscription())
# check if there are any changes to the metadata which should trigger
# a rebalance
if self._subscription_metadata_changed(cluster):
if (self.config['api_version'] >= (0, 9)
and self.config['group_id'] is not None):
self._subscription.mark_for_reassignment()
# If we haven't got group coordinator support,
# just assign all partitions locally
else:
self._subscription.assign_from_subscribed([
TopicPartition(topic, partition)
for topic in self._subscription.subscription
for partition in self._metadata_snapshot[topic]
])
def _build_metadata_snapshot(self, subscription, cluster):
metadata_snapshot = {}
for topic in subscription.group_subscription():
partitions = cluster.partitions_for_topic(topic) or []
metadata_snapshot[topic] = set(partitions)
return metadata_snapshot
def _subscription_metadata_changed(self, cluster):
if not self._subscription.partitions_auto_assigned():
return False
metadata_snapshot = self._build_metadata_snapshot(self._subscription, cluster)
if self._metadata_snapshot != metadata_snapshot:
self._metadata_snapshot = metadata_snapshot
return True
return False
def _lookup_assignor(self, name):
for assignor in self.config['assignors']:
if assignor.name == name:
return assignor
return None
def _on_join_complete(self, generation, member_id, protocol,
member_assignment_bytes):
# if we were the assignor, then we need to make sure that there have
# been no metadata updates since the rebalance begin. Otherwise, we
# won't rebalance again until the next metadata change
if self._assignment_snapshot is not None and self._assignment_snapshot != self._metadata_snapshot:
self._subscription.mark_for_reassignment()
return
assignor = self._lookup_assignor(protocol)
assert assignor, 'Coordinator selected invalid assignment protocol: %s' % protocol
assignment = ConsumerProtocol.ASSIGNMENT.decode(member_assignment_bytes)
# set the flag to refresh last committed offsets
self._subscription.needs_fetch_committed_offsets = True
# update partition assignment
self._subscription.assign_from_subscribed(assignment.partitions())
# give the assignor a chance to update internal state
# based on the received assignment
assignor.on_assignment(assignment)
# reschedule the auto commit starting from now
self.next_auto_commit_deadline = time.time() + self.auto_commit_interval
assigned = set(self._subscription.assigned_partitions())
log.info("Setting newly assigned partitions %s for group %s",
assigned, self.group_id)
# execute the user's callback after rebalance
if self._subscription.listener:
try:
self._subscription.listener.on_partitions_assigned(assigned)
except Exception:
log.exception("User provided listener %s for group %s"
" failed on partition assignment: %s",
self._subscription.listener, self.group_id,
assigned)
def poll(self):
"""
Poll for coordinator events. Only applicable if group_id is set, and
broker version supports GroupCoordinators. This ensures that the
coordinator is known, and if using automatic partition assignment,
ensures that the consumer has joined the group. This also handles
periodic offset commits if they are enabled.
"""
if self.group_id is None or self.config['api_version'] < (0, 8, 2):
return
self._invoke_completed_offset_commit_callbacks()
self.ensure_coordinator_ready()
if self.config['api_version'] >= (0, 9) and self._subscription.partitions_auto_assigned():
if self.need_rejoin():
# due to a race condition between the initial metadata fetch and the
# initial rebalance, we need to ensure that the metadata is fresh
# before joining initially, and then request the metadata update. If
# metadata update arrives while the rebalance is still pending (for
# example, when the join group is still inflight), then we will lose
# track of the fact that we need to rebalance again to reflect the
# change to the topic subscription. Without ensuring that the
# metadata is fresh, any metadata update that changes the topic
# subscriptions and arrives while a rebalance is in progress will
# essentially be ignored. See KAFKA-3949 for the complete
# description of the problem.
if self._subscription.subscribed_pattern:
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
self.ensure_active_group()
self.poll_heartbeat()
self._maybe_auto_commit_offsets_async()
def time_to_next_poll(self):
"""Return seconds (float) remaining until :meth:`.poll` should be called again"""
if not self.config['enable_auto_commit']:
return self.time_to_next_heartbeat()
if time.time() > self.next_auto_commit_deadline:
return 0
return min(self.next_auto_commit_deadline - time.time(),
self.time_to_next_heartbeat())
def _perform_assignment(self, leader_id, assignment_strategy, members):
assignor = self._lookup_assignor(assignment_strategy)
assert assignor, 'Invalid assignment protocol: %s' % assignment_strategy
member_metadata = {}
all_subscribed_topics = set()
for member_id, metadata_bytes in members:
metadata = ConsumerProtocol.METADATA.decode(metadata_bytes)
member_metadata[member_id] = metadata
all_subscribed_topics.update(metadata.subscription) # pylint: disable-msg=no-member
# the leader will begin watching for changes to any of the topics
# the group is interested in, which ensures that all metadata changes
# will eventually be seen
# Because assignment typically happens within response callbacks,
# we cannot block on metadata updates here (no recursion into poll())
self._subscription.group_subscribe(all_subscribed_topics)
self._client.set_topics(self._subscription.group_subscription())
# keep track of the metadata used for assignment so that we can check
# after rebalance completion whether anything has changed
self._cluster.request_update()
self._assignment_snapshot = self._metadata_snapshot
log.debug("Performing assignment for group %s using strategy %s"
" with subscriptions %s", self.group_id, assignor.name,
member_metadata)
assignments = assignor.assign(self._cluster, member_metadata)
log.debug("Finished assignment for group %s: %s", self.group_id, assignments)
group_assignment = {}
for member_id, assignment in six.iteritems(assignments):
group_assignment[member_id] = assignment
return group_assignment
def _on_join_prepare(self, generation, member_id):
# commit offsets prior to rebalance if auto-commit enabled
self._maybe_auto_commit_offsets_sync()
# execute the user's callback before rebalance
log.info("Revoking previously assigned partitions %s for group %s",
self._subscription.assigned_partitions(), self.group_id)
if self._subscription.listener:
try:
revoked = set(self._subscription.assigned_partitions())
self._subscription.listener.on_partitions_revoked(revoked)
except Exception:
log.exception("User provided subscription listener %s"
" for group %s failed on_partitions_revoked",
self._subscription.listener, self.group_id)
self._assignment_snapshot = None
self._subscription.mark_for_reassignment()
def need_rejoin(self):
"""Check whether the group should be rejoined
Returns:
bool: True if consumer should rejoin group, False otherwise
"""
return (self._subscription.partitions_auto_assigned() and
(super(ConsumerCoordinator, self).need_rejoin() or
self._subscription.needs_partition_assignment))
def refresh_committed_offsets_if_needed(self):
"""Fetch committed offsets for assigned partitions."""
if self._subscription.needs_fetch_committed_offsets:
offsets = self.fetch_committed_offsets(self._subscription.assigned_partitions())
for partition, offset in six.iteritems(offsets):
# verify assignment is still active
if self._subscription.is_assigned(partition):
self._subscription.assignment[partition].committed = offset.offset
self._subscription.needs_fetch_committed_offsets = False
def fetch_committed_offsets(self, partitions):
"""Fetch the current committed offsets for specified partitions
Arguments:
partitions (list of TopicPartition): partitions to fetch
Returns:
dict: {TopicPartition: OffsetAndMetadata}
"""
if not partitions:
return {}
while True:
self.ensure_coordinator_ready()
# contact coordinator to fetch committed offsets
future = self._send_offset_fetch_request(partitions)
self._client.poll(future=future)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000)
def close(self, autocommit=True):
"""Close the coordinator, leave the current group,
and reset local generation / member_id.
Keyword Arguments:
autocommit (bool): If auto-commit is configured for this consumer,
this optional flag causes the consumer to attempt to commit any
pending consumed offsets prior to close. Default: True
"""
try:
if autocommit:
self._maybe_auto_commit_offsets_sync()
finally:
super(ConsumerCoordinator, self).close()
def _invoke_completed_offset_commit_callbacks(self):
while self.completed_offset_commits:
callback, offsets, exception = self.completed_offset_commits.popleft()
callback(offsets, exception)
def commit_offsets_async(self, offsets, callback=None):
"""Commit specific offsets asynchronously.
Arguments:
offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit
callback (callable, optional): called as callback(offsets, response)
response will be either an Exception or a OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
"""
self._invoke_completed_offset_commit_callbacks()
if not self.coordinator_unknown():
self._do_commit_offsets_async(offsets, callback)
else:
# we don't know the current coordinator, so try to find it and then
# send the commit or fail (we don't want recursive retries which can
# cause offset commits to arrive out of order). Note that there may
# be multiple offset commits chained to the same coordinator lookup
# request. This is fine because the listeners will be invoked in the
# same order that they were added. Note also that BaseCoordinator
# prevents multiple concurrent coordinator lookup requests.
future = self.lookup_coordinator()
future.add_callback(self._do_commit_offsets_async, offsets, callback)
if callback:
future.add_errback(lambda e: self.completed_offset_commits.appendleft((callback, offsets, e)))
# ensure the commit has a chance to be transmitted (without blocking on
# its completion). Note that commits are treated as heartbeats by the
# coordinator, so there is no need to explicitly allow heartbeats
# through delayed task execution.
self._client.poll() # no wakeup if we add that feature
def _do_commit_offsets_async(self, offsets, callback=None):
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
if callback is None:
callback = self.config['default_offset_commit_callback']
self._subscription.needs_fetch_committed_offsets = True
future = self._send_offset_commit_request(offsets)
future.add_both(lambda res: self.completed_offset_commits.appendleft((callback, offsets, res)))
return future
def commit_offsets_sync(self, offsets):
"""Commit specific offsets synchronously.
This method will retry until the commit completes successfully or an
unrecoverable error is encountered.
Arguments:
offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit
Raises error on failure
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
self._invoke_completed_offset_commit_callbacks()
if not offsets:
return
while True:
self.ensure_coordinator_ready()
future = self._send_offset_commit_request(offsets)
self._client.poll(future=future)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000)
def _maybe_auto_commit_offsets_sync(self):
if self.config['enable_auto_commit']:
try:
self.commit_offsets_sync(self._subscription.all_consumed_offsets())
# The three main group membership errors are known and should not
# require a stacktrace -- just a warning
except (Errors.UnknownMemberIdError,
Errors.IllegalGenerationError,
Errors.RebalanceInProgressError):
log.warning("Offset commit failed: group membership out of date"
" This is likely to cause duplicate message"
" delivery.")
except Exception:
log.exception("Offset commit failed: This is likely to cause"
" duplicate message delivery")
def _send_offset_commit_request(self, offsets):
"""Commit offsets for the specified list of topics and partitions.
This is a non-blocking call which returns a request future that can be
polled in the case of a synchronous commit or ignored in the
asynchronous case.
Arguments:
offsets (dict of {TopicPartition: OffsetAndMetadata}): what should
be committed
Returns:
Future: indicating whether the commit was successful or not
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
if not offsets:
log.debug('No offsets to commit')
return Future().success(None)
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
# create the offset commit request
offset_data = collections.defaultdict(dict)
for tp, offset in six.iteritems(offsets):
offset_data[tp.topic][tp.partition] = offset
if self._subscription.partitions_auto_assigned():
generation = self.generation()
else:
generation = Generation.NO_GENERATION
# if the generation is None, we are not part of an active group
# (and we expect to be). The only thing we can do is fail the commit
# and let the user rejoin the group in poll()
if self.config['api_version'] >= (0, 9) and generation is None:
return Future().failure(Errors.CommitFailedError())
if self.config['api_version'] >= (0, 9):
request = OffsetCommitRequest[2](
self.group_id,
generation.generation_id,
generation.member_id,
OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 2):
request = OffsetCommitRequest[1](
self.group_id, -1, '',
[(
topic, [(
partition,
offset.offset,
-1,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 1):
request = OffsetCommitRequest[0](
self.group_id,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
log.debug("Sending offset-commit request with %s for group %s to %s",
offsets, self.group_id, node_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_offset_commit_response(self, offsets, future, send_time, response):
# TODO look at adding request_latency_ms to response (like java kafka)
self.consumer_sensors.commit_latency.record((time.time() - send_time) * 1000)
unauthorized_topics = set()
for topic, partitions in response.topics:
for partition, error_code in partitions:
tp = TopicPartition(topic, partition)
offset = offsets[tp]
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
log.debug("Group %s committed offset %s for partition %s",
self.group_id, offset, tp)
if self._subscription.is_assigned(tp):
self._subscription.assignment[tp].committed = offset.offset
elif error_type is Errors.GroupAuthorizationFailedError:
log.error("Not authorized to commit offsets for group %s",
self.group_id)
future.failure(error_type(self.group_id))
return
elif error_type is Errors.TopicAuthorizationFailedError:
unauthorized_topics.add(topic)
elif error_type in (Errors.OffsetMetadataTooLargeError,
Errors.InvalidCommitOffsetSizeError):
# raise the error to the user
log.debug("OffsetCommit for group %s failed on partition %s"
" %s", self.group_id, tp, error_type.__name__)
future.failure(error_type())
return
elif error_type is Errors.GroupLoadInProgressError:
# just retry
log.debug("OffsetCommit for group %s failed: %s",
self.group_id, error_type.__name__)
future.failure(error_type(self.group_id))
return
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError,
Errors.RequestTimedOutError):
log.debug("OffsetCommit for group %s failed: %s",
self.group_id, error_type.__name__)
self.coordinator_dead(error_type())
future.failure(error_type(self.group_id))
return
elif error_type in (Errors.UnknownMemberIdError,
Errors.IllegalGenerationError,
Errors.RebalanceInProgressError):
# need to re-join group
error = error_type(self.group_id)
log.debug("OffsetCommit for group %s failed: %s",
self.group_id, error)
self.reset_generation()
future.failure(Errors.CommitFailedError(
"Commit cannot be completed since the group has"
" already rebalanced and assigned the partitions to"
" another member. This means that the time between"
" subsequent calls to poll() was longer than the"
" configured session_timeout_ms, which typically"
" implies that the poll loop is spending too much time"
" message processing. You can address this either by"
" increasing the session timeout or by reducing the"
" maximum size of batches returned in poll() with"
" max_poll_records."))
return
else:
log.error("Group %s failed to commit partition %s at offset"
" %s: %s", self.group_id, tp, offset,
error_type.__name__)
future.failure(error_type())
return
if unauthorized_topics:
log.error("Not authorized to commit to topics %s for group %s",
unauthorized_topics, self.group_id)
future.failure(Errors.TopicAuthorizationFailedError(unauthorized_topics))
else:
future.success(None)
def _send_offset_fetch_request(self, partitions):
"""Fetch the committed offsets for a set of partitions.
This is a non-blocking call. The returned future can be polled to get
the actual offsets returned from the broker.
Arguments:
partitions (list of TopicPartition): the partitions to fetch
Returns:
Future: resolves to dict of offsets: {TopicPartition: int}
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
if not partitions:
return Future().success({})
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
# Verify node is ready
if not self._client.ready(node_id):
log.debug("Node %s not ready -- failing offset fetch request",
node_id)
return Future().failure(Errors.NodeNotReadyError)
log.debug("Group %s fetching committed offsets for partitions: %s",
self.group_id, partitions)
# construct the request
topic_partitions = collections.defaultdict(set)
for tp in partitions:
topic_partitions[tp.topic].add(tp.partition)
if self.config['api_version'] >= (0, 8, 2):
request = OffsetFetchRequest[1](
self.group_id,
list(topic_partitions.items())
)
else:
request = OffsetFetchRequest[0](
self.group_id,
list(topic_partitions.items())
)
# send the request with a callback
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_fetch_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_offset_fetch_response(self, future, response):
offsets = {}
for topic, partitions in response.topics:
for partition, offset, metadata, error_code in partitions:
tp = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if error_type is not Errors.NoError:
error = error_type()
log.debug("Group %s failed to fetch offset for partition"
" %s: %s", self.group_id, tp, error)
if error_type is Errors.GroupLoadInProgressError:
# just retry
future.failure(error)
elif error_type is Errors.NotCoordinatorForGroupError:
# re-discover the coordinator and retry
self.coordinator_dead(error_type())
future.failure(error)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.warning("OffsetFetchRequest -- unknown topic %s"
" (have you committed any offsets yet?)",
topic)
continue
else:
log.error("Unknown error fetching offsets for %s: %s",
tp, error)
future.failure(error)
return
elif offset >= 0:
# record the position with the offset
# (-1 indicates no committed offset to fetch)
offsets[tp] = OffsetAndMetadata(offset, metadata)
else:
log.debug("Group %s has no committed offset for partition"
" %s", self.group_id, tp)
future.success(offsets)
def _default_offset_commit_callback(self, offsets, exception):
if exception is not None:
log.error("Offset commit failed: %s", exception)
def _commit_offsets_async_on_complete(self, offsets, exception):
if exception is not None:
log.warning("Auto offset commit failed for group %s: %s",
self.group_id, exception)
if getattr(exception, 'retriable', False):
self.next_auto_commit_deadline = min(time.time() + self.config['retry_backoff_ms'] / 1000, self.next_auto_commit_deadline)
else:
log.debug("Completed autocommit of offsets %s for group %s",
offsets, self.group_id)
def _maybe_auto_commit_offsets_async(self):
if self.config['enable_auto_commit']:
if self.coordinator_unknown():
self.next_auto_commit_deadline = time.time() + self.config['retry_backoff_ms'] / 1000
elif time.time() > self.next_auto_commit_deadline:
self.next_auto_commit_deadline = time.time() + self.auto_commit_interval
self.commit_offsets_async(self._subscription.all_consumed_offsets(),
self._commit_offsets_async_on_complete)
class ConsumerCoordinatorMetrics(object):
def __init__(self, metrics, metric_group_prefix, subscription):
self.metrics = metrics
self.metric_group_name = '%s-coordinator-metrics' % metric_group_prefix
self.commit_latency = metrics.sensor('commit-latency')
self.commit_latency.add(metrics.metric_name(
'commit-latency-avg', self.metric_group_name,
'The average time taken for a commit request'), Avg())
self.commit_latency.add(metrics.metric_name(
'commit-latency-max', self.metric_group_name,
'The max time taken for a commit request'), Max())
self.commit_latency.add(metrics.metric_name(
'commit-rate', self.metric_group_name,
'The number of commit calls per second'), Rate(sampled_stat=Count()))
num_parts = AnonMeasurable(lambda config, now:
len(subscription.assigned_partitions()))
metrics.add_metric(metrics.metric_name(
'assigned-partitions', self.metric_group_name,
'The number of partitions currently assigned to this consumer'),
num_parts) | PypiClean |
/pyramids-gis-0.4.2.tar.gz/pyramids-gis-0.4.2/pyramids/dem.py | from typing import Dict
import numpy as np
from osgeo import gdal
from pyramids.dataset import Dataset
import sys
sys.setrecursionlimit(5000)
class DEM(Dataset):
"""GISCatchment class contains methods to deal with the MED and generate the flow direction based on the D8 method and process the DEM.
Methods:
1- D8
2- FlowDirectIndex
3- FlowDirecTable
4- DeleteBasins
5- NearestCell
6- GroupNeighbours
7- Cluster
8- ListAttributes
"""
def __init__(self, src: gdal.Dataset):
super().__init__(src)
def D8(self):
"""D8 method generate flow direction raster from DEM and fill sinks.
Returns
-------
flow_direction_cell: [numpy array]
with the same dimensions of the raster and 2 layers
first layer for rows index and second rows for column index
elev_sinkless: [numpy array]
DEM after filling sinks
"""
cellsize = self.cell_size
dist2 = cellsize * np.sqrt(2)
no_columns = self.columns
no_rows = self.rows
elev = self.read_array(band=0)
# get the value stores in novalue cells
dem_no_val = self.no_data_value[0]
elev = elev.astype(np.float32)
elev[np.isclose(elev, dem_no_val, rtol=0.00001)] = np.nan
slopes = np.ones((no_rows, no_columns, 9)) * np.nan
distances = [cellsize, dist2, cellsize, dist2, cellsize, dist2, cellsize, dist2]
# filling sinks
elev_sinkless = elev
for i in range(1, no_rows - 1):
for j in range(1, no_columns - 1):
# get elevation of surrounding cells
f = [
elev[i - 1, j],
elev[i - 1, j - 1],
elev[i, j - 1],
elev[i + 1, j - 1],
elev[i + 1, j],
elev[i + 1, j + 1],
elev[i, j + 1],
elev[i - 1, j + 1],
]
if elev[i, j] < min(f):
elev_sinkless[i, j] = min(f) + 0.1
flow_direction = np.ones((no_rows, no_columns)) * np.nan
for i in range(1, no_rows - 1):
for j in range(1, no_columns - 1):
# calculate only if cell in elev is not nan
if not np.isnan(elev[i, j]):
# calculate slope
# slope with cell to the right
slopes[i, j, 0] = (
elev_sinkless[i, j] - elev_sinkless[i, j + 1]
) / distances[0]
# slope with cell to the top right
slopes[i, j, 1] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j + 1]
) / distances[1]
# slope with cell to the top
slopes[i, j, 2] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j]
) / distances[2]
# slope with cell to the top left
slopes[i, j, 3] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j - 1]
) / distances[3]
# slope with cell to the left
slopes[i, j, 4] = (
elev_sinkless[i, j] - elev_sinkless[i, j - 1]
) / distances[4]
# slope with cell to the bottom left
slopes[i, j, 5] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j - 1]
) / distances[5]
# slope with cell to the bottom
slopes[i, j, 6] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j]
) / distances[6]
# slope with cell to the bottom right
slopes[i, j, 7] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j + 1]
) / distances[7]
# get the flow direction index
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# first rows without corners
for i in [0]:
for j in range(1, no_columns - 1): # all columns
if not np.isnan(elev[i, j]):
# slope with cell to the right
slopes[i, j, 0] = (
elev_sinkless[i, j] - elev_sinkless[i, j + 1]
) / distances[0]
# slope with cell to the left
slopes[i, j, 4] = (
elev_sinkless[i, j] - elev_sinkless[i, j - 1]
) / distances[4]
# slope with cell to the bottom left
slopes[i, j, 5] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j - 1]
) / distances[5]
# slope with cell to the bottom
slopes[i, j, 6] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j]
) / distances[6]
# slope with cell to the bottom right
slopes[i, j, 7] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j + 1]
) / distances[7]
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# last rows without corners
for i in [no_rows - 1]:
for j in range(1, no_columns - 1): # all columns
if not np.isnan(elev[i, j]):
# slope with cell to the right
slopes[i, j, 0] = (
elev_sinkless[i, j] - elev_sinkless[i, j + 1]
) / distances[0]
# slope with cell to the top right
slopes[i, j, 1] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j + 1]
) / distances[1]
# slope with cell to the top
slopes[i, j, 2] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j]
) / distances[2]
# slope with cell to the top left
slopes[i, j, 3] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j - 1]
) / distances[3]
# slope with cell to the left
slopes[i, j, 4] = (
elev_sinkless[i, j] - elev_sinkless[i, j - 1]
) / distances[4]
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# top left corner
i = 0
j = 0
if not np.isnan(elev[i, j]):
# slope with cell to the left
slopes[i, j, 0] = (
elev_sinkless[i, j] - elev_sinkless[i, j + 1]
) / distances[0]
# slope with cell to the bottom
slopes[i, j, 6] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j]
) / distances[6]
# slope with cell to the bottom right
slopes[i, j, 7] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j + 1]
) / distances[7]
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# top right corner
i = 0
j = no_columns - 1
if not np.isnan(elev[i, j]):
# slope with cell to the left
slopes[i, j, 4] = (
elev_sinkless[i, j] - elev_sinkless[i, j - 1]
) / distances[4]
# slope with cell to the bottom left
slopes[i, j, 5] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j - 1]
) / distances[5]
# slope with cell to the bott
slopes[i, j, 6] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j]
) / distances[6]
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# bottom left corner
i = no_rows - 1
j = 0
if not np.isnan(elev[i, j]):
# slope with cell to the right
slopes[i, j, 0] = (
elev_sinkless[i, j] - elev_sinkless[i, j + 1]
) / distances[0]
# slope with cell to the top right
slopes[i, j, 1] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j + 1]
) / distances[1]
# slope with cell to the top
slopes[i, j, 2] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j]
) / distances[2]
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# bottom right
i = no_rows - 1
j = no_columns - 1
if not np.isnan(elev[i, j]):
slopes[i, j, 2] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j]
) / distances[
2
] # slope with cell to the top
slopes[i, j, 3] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j - 1]
) / distances[
3
] # slope with cell to the top left
slopes[i, j, 4] = (
elev_sinkless[i, j] - elev_sinkless[i, j - 1]
) / distances[
4
] # slope with cell to the left
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# first column
for i in range(1, no_rows - 1):
for j in [0]:
if not np.isnan(elev[i, j]):
slopes[i, j, 0] = (
elev_sinkless[i, j] - elev_sinkless[i, j + 1]
) / distances[
0
] # slope with cell to the right
slopes[i, j, 1] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j + 1]
) / distances[
1
] # slope with cell to the top right
slopes[i, j, 2] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j]
) / distances[
2
] # slope with cell to the top
slopes[i, j, 6] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j]
) / distances[
6
] # slope with cell to the bottom
slopes[i, j, 7] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j + 1]
) / distances[
7
] # slope with cell to the bottom right
# get the flow direction index
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# last column
for i in range(1, no_rows - 1):
for j in [no_columns - 1]:
if not np.isnan(elev[i, j]):
slopes[i, j, 2] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j]
) / distances[
2
] # slope with cell to the top
slopes[i, j, 3] = (
elev_sinkless[i, j] - elev_sinkless[i - 1, j - 1]
) / distances[
3
] # slope with cell to the top left
slopes[i, j, 4] = (
elev_sinkless[i, j] - elev_sinkless[i, j - 1]
) / distances[
4
] # slope with cell to the left
slopes[i, j, 5] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j - 1]
) / distances[
5
] # slope with cell to the bottom left
slopes[i, j, 6] = (
elev_sinkless[i, j] - elev_sinkless[i + 1, j]
) / distances[
6
] # slope with cell to the bottom
# get the flow direction index
flow_direction[i, j] = np.where(
slopes[i, j, :] == np.nanmax(slopes[i, j, :])
)[0][0]
slopes[i, j, 8] = np.nanmax(slopes[i, j, :])
# print(str(i)+","+str(j))
flow_direction_cell = np.ones((no_rows, no_columns, 2)) * np.nan
# for i in range(1,no_rows-1):
# for j in range(1,no_columns-1):
for i in range(no_rows):
for j in range(no_columns):
if flow_direction[i, j] == 0:
flow_direction_cell[i, j, 0] = i # index of the rows
flow_direction_cell[i, j, 1] = j + 1 # index of the column
elif flow_direction[i, j] == 1:
flow_direction_cell[i, j, 0] = i - 1
flow_direction_cell[i, j, 1] = j + 1
elif flow_direction[i, j] == 2:
flow_direction_cell[i, j, 0] = i - 1
flow_direction_cell[i, j, 1] = j
elif flow_direction[i, j] == 3:
flow_direction_cell[i, j, 0] = i - 1
flow_direction_cell[i, j, 1] = j - 1
elif flow_direction[i, j] == 4:
flow_direction_cell[i, j, 0] = i
flow_direction_cell[i, j, 1] = j - 1
elif flow_direction[i, j] == 5:
flow_direction_cell[i, j, 0] = i + 1
flow_direction_cell[i, j, 1] = j - 1
elif flow_direction[i, j] == 6:
flow_direction_cell[i, j, 0] = i + 1
flow_direction_cell[i, j, 1] = j
elif flow_direction[i, j] == 7:
flow_direction_cell[i, j, 0] = i + 1
flow_direction_cell[i, j, 1] = j + 1
return flow_direction_cell, elev_sinkless
def flowDirectionIndex(self) -> np.ndarray:
"""this function takes flow firection raster and convert codes for the 8 directions (1,2,4,8,16,32,64,128) into indices of the Downstream cell.
flow_direct:
[gdal.dataset] flow direction raster obtained from catchment delineation
it only contains values [1,2,4,8,16,32,64,128]
Returns
-------
fd_indices:
[numpy array] with the same dimensions of the raster and 2 layers
first layer for rows index and second rows for column index
Example:
----------
fd=gdal.Open("Flowdir.tif")
fd_indices=FlowDirectِِIndex(fd)
"""
# check flow direction input raster
no_val = self.no_data_value[0]
cols = self.columns
rows = self.rows
fd = self.read_array(band=0)
fd_val = np.unique(fd[~np.isclose(fd, no_val, rtol=0.00001)])
fd_should = [1, 2, 4, 8, 16, 32, 64, 128]
if not all(fd_val[i] in fd_should for i in range(len(fd_val))):
raise ValueError(
"flow direction raster should contain values 1,2,4,8,16,32,64,128 only "
)
fd_cell = np.ones((rows, cols, 2)) * np.nan
for i in range(rows):
for j in range(cols):
if fd[i, j] == 1:
fd_cell[i, j, 0] = i # index of the rows
fd_cell[i, j, 1] = j + 1 # index of the column
elif fd[i, j] == 128:
fd_cell[i, j, 0] = i - 1
fd_cell[i, j, 1] = j + 1
elif fd[i, j] == 64:
fd_cell[i, j, 0] = i - 1
fd_cell[i, j, 1] = j
elif fd[i, j] == 32:
fd_cell[i, j, 0] = i - 1
fd_cell[i, j, 1] = j - 1
elif fd[i, j] == 16:
fd_cell[i, j, 0] = i
fd_cell[i, j, 1] = j - 1
elif fd[i, j] == 8:
fd_cell[i, j, 0] = i + 1
fd_cell[i, j, 1] = j - 1
elif fd[i, j] == 4:
fd_cell[i, j, 0] = i + 1
fd_cell[i, j, 1] = j
elif fd[i, j] == 2:
fd_cell[i, j, 0] = i + 1
fd_cell[i, j, 1] = j + 1
return fd_cell
def flowDirectionTable(self) -> Dict:
"""Flow Direction Table.
- This function takes flow direction indices created by FlowDirectِِIndex function and create a
dictionary with the cells indices as a key and indices of directly upstream cells as values (list of tuples)
flow_direct:
[gdal.dataset] flow direction raster obtained from catchment delineation
it only contains values [1,2,4,8,16,32,64,128]
Returns
-------
flowAccTable:
[Dict] dictionary with the cells indices as a key and indices of directly
upstream cells as values (list of tuples)
"""
FDI = self.flowDirectionIndex()
rows = self.rows
cols = self.columns
celli = []
cellj = []
celli_content = []
cellj_content = []
for i in range(rows):
for j in range(cols):
if not np.isnan(FDI[i, j, 0]):
# store the indexes of not empty cells and the indexes stored inside these cells
celli.append(i)
cellj.append(j)
# store the index of the receiving cells
celli_content.append(FDI[i, j, 0])
cellj_content.append(FDI[i, j, 1])
flow_acc_table = {}
# for each cell store the directly giving cells
for i in range(rows):
for j in range(cols):
if not np.isnan(FDI[i, j, 0]):
# get the indexes of the cell and use it as a key in a dictionary
name = str(i) + "," + str(j)
flow_acc_table[name] = []
for k in range(len(celli_content)):
# search if any cell are giving this cell
if i == celli_content[k] and j == cellj_content[k]:
flow_acc_table[name].append((celli[k], cellj[k]))
return flow_acc_table
@staticmethod
def deleteBasins(basins, pathout):
"""Delete Basins
- this function deletes all the basins in a basin raster created when delineating a catchment and leave
only the first basin which is the biggest basin in the raster.
Parameters
----------
basins: [gdal.dataset]
raster you create during delineation of a catchment
values of its cells are the number of the basin it belongs to
pathout: [str]
path you want to save the resulted raster to it should include
the extension ".tif"
Returns
-------
raster with only one basin (the basin that its name is 1 )
"""
assert type(pathout) == str, "A_path input should be string type"
assert (
type(basins) == gdal.Dataset
), "basins raster should be read using gdal (gdal dataset please read it using gdal library) "
# get number of rows
rows = basins.RasterYSize
# get number of columns
cols = basins.RasterXSize
# array
basins_A = basins.ReadAsArray()
# no data value
no_val = np.float32(basins.GetRasterBand(1).GetNoDataValue())
# get number of basins and there names
basins_val = list(
set(
[
int(basins_A[i, j])
for i in range(rows)
for j in range(cols)
if basins_A[i, j] != no_val
]
)
)
# keep the first basin and delete the others by filling their cells by nodata value
for i in range(rows):
for j in range(cols):
if basins_A[i, j] != no_val and basins_A[i, j] != basins_val[0]:
basins_A[i, j] = no_val
Dataset.dataset_like(basins, basins_A, pathout)
def listAttributes(self):
"""Print Attributes List."""
print("\n")
print(
"Attributes List of: "
+ repr(self.__dict__["name"])
+ " - "
+ self.__class__.__name__
+ " Instance\n"
)
self_keys = list(self.__dict__.keys())
self_keys.sort()
for key in self_keys:
if key != "name":
print(str(key) + " : " + repr(self.__dict__[key]))
print("\n") | PypiClean |
/ensmallen_graph-0.6.0-cp37-cp37m-manylinux2010_x86_64.whl/ensmallen_graph/datasets/string/roseivivaxsp22iis10s.py | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def RoseivivaxSp22iis10s(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Roseivivax sp. 22IIs10s graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Roseivivax sp. 22IIs10s graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 21:49:52.471274
The undirected graph Roseivivax sp. 22IIs10s has 4231 nodes and 595764
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.06658 and has 15 connected components, where the component
with most nodes has 4191 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 250, the mean node degree is 281.62,
and the node degree mode is 7. The top 5 most central nodes are 1317118.ATO8_05251
(degree 1738), 1317118.ATO8_12061 (degree 1627), 1317118.ATO8_01455 (degree
1398), 1317118.ATO8_19529 (degree 1386) and 1317118.ATO8_08996 (degree
1282).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import RoseivivaxSp22iis10s
# Then load the graph
graph = RoseivivaxSp22iis10s()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="RoseivivaxSp22iis10s",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)() | PypiClean |
/jhc_cf_sdk_test-1.2.5-py3-none-any.whl/jhc_cf_sdk_test/paths/api_v1_admin_access_rules_rule_id_versions/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from jhc_cf_sdk_test import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from jhc_cf_sdk_test import schemas # noqa: F401
from jhc_cf_sdk_test.model.access_rule_detail import AccessRuleDetail
from . import path
# Path params
RuleIdSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'ruleId': typing.Union[RuleIdSchema, str, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_rule_id = api_client.PathParameter(
name="ruleId",
style=api_client.ParameterStyle.SIMPLE,
schema=RuleIdSchema,
required=True,
)
class SchemaFor200ResponseBodyApplicationJson(
schemas.DictSchema
):
class MetaOapg:
required = {
"next",
"accessRules",
}
class properties:
class accessRules(
schemas.ListSchema
):
class MetaOapg:
@staticmethod
def items() -> typing.Type['AccessRuleDetail']:
return AccessRuleDetail
def __new__(
cls,
arg: typing.Union[typing.Tuple['AccessRuleDetail'], typing.List['AccessRuleDetail']],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'accessRules':
return super().__new__(
cls,
arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> 'AccessRuleDetail':
return super().__getitem__(i)
class next(
schemas.StrBase,
schemas.NoneBase,
schemas.Schema,
schemas.NoneStrMixin
):
def __new__(
cls,
*args: typing.Union[None, str, ],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'next':
return super().__new__(
cls,
*args,
_configuration=_configuration,
)
__annotations__ = {
"accessRules": accessRules,
"next": next,
}
next: MetaOapg.properties.next
accessRules: MetaOapg.properties.accessRules
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["accessRules"]) -> MetaOapg.properties.accessRules: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["next"]) -> MetaOapg.properties.next: ...
@typing.overload
def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
def __getitem__(self, name: typing.Union[typing_extensions.Literal["accessRules", "next", ], str]):
# dict_instance[name] accessor
return super().__getitem__(name)
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["accessRules"]) -> MetaOapg.properties.accessRules: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["next"]) -> MetaOapg.properties.next: ...
@typing.overload
def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["accessRules", "next", ], str]):
return super().get_item_oapg(name)
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, ],
next: typing.Union[MetaOapg.properties.next, None, str, ],
accessRules: typing.Union[MetaOapg.properties.accessRules, list, tuple, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
*args,
next=next,
accessRules=accessRules,
_configuration=_configuration,
**kwargs,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
)
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
)
_status_code_to_response = {
'200': _response_for_200,
'401': _response_for_401,
'404': _response_for_404,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _admin_get_access_rule_versions_oapg(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _admin_get_access_rule_versions_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _admin_get_access_rule_versions_oapg(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _admin_get_access_rule_versions_oapg(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Get Access Rule version history
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_rule_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class AdminGetAccessRuleVersions(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def admin_get_access_rule_versions(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def admin_get_access_rule_versions(
self,
skip_deserialization: typing_extensions.Literal[True],
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def admin_get_access_rule_versions(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def admin_get_access_rule_versions(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._admin_get_access_rule_versions_oapg(
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._admin_get_access_rule_versions_oapg(
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/pysaml2-7.4.2-py3-none-any.whl/saml2/response.py | import calendar
import logging
from saml2 import SAMLError
from saml2 import class_name
from saml2 import extension_elements_to_elements
from saml2 import saml
from saml2 import samlp
from saml2 import time_util
from saml2 import xmldsig as ds
from saml2 import xmlenc as xenc
from saml2.attribute_converter import to_local
from saml2.s_utils import RequestVersionTooHigh
from saml2.s_utils import RequestVersionTooLow
from saml2.saml import SCM_BEARER
from saml2.saml import SCM_HOLDER_OF_KEY
from saml2.saml import SCM_SENDER_VOUCHES
from saml2.saml import XSI_TYPE
from saml2.saml import attribute_from_string
from saml2.saml import encrypted_attribute_from_string
from saml2.samlp import STATUS_AUTHN_FAILED
from saml2.samlp import STATUS_INVALID_ATTR_NAME_OR_VALUE
from saml2.samlp import STATUS_INVALID_NAMEID_POLICY
from saml2.samlp import STATUS_NO_AUTHN_CONTEXT
from saml2.samlp import STATUS_NO_AVAILABLE_IDP
from saml2.samlp import STATUS_NO_PASSIVE
from saml2.samlp import STATUS_NO_SUPPORTED_IDP
from saml2.samlp import STATUS_PARTIAL_LOGOUT
from saml2.samlp import STATUS_PROXY_COUNT_EXCEEDED
from saml2.samlp import STATUS_REQUEST_DENIED
from saml2.samlp import STATUS_REQUEST_UNSUPPORTED
from saml2.samlp import STATUS_REQUEST_VERSION_DEPRECATED
from saml2.samlp import STATUS_REQUEST_VERSION_TOO_HIGH
from saml2.samlp import STATUS_REQUEST_VERSION_TOO_LOW
from saml2.samlp import STATUS_RESOURCE_NOT_RECOGNIZED
from saml2.samlp import STATUS_RESPONDER
from saml2.samlp import STATUS_TOO_MANY_RESPONSES
from saml2.samlp import STATUS_UNKNOWN_ATTR_PROFILE
from saml2.samlp import STATUS_UNKNOWN_PRINCIPAL
from saml2.samlp import STATUS_UNSUPPORTED_BINDING
from saml2.samlp import STATUS_VERSION_MISMATCH
from saml2.sigver import DecryptError
from saml2.sigver import SignatureError
from saml2.sigver import security_context
from saml2.sigver import signed
from saml2.time_util import later_than
from saml2.time_util import str_to_time
from saml2.validate import NotValid
from saml2.validate import valid_address
from saml2.validate import valid_instance
from saml2.validate import validate_before
from saml2.validate import validate_on_or_after
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
class IncorrectlySigned(SAMLError):
pass
class InvalidAssertion(SAMLError):
pass
class DecryptionFailed(SAMLError):
pass
class VerificationError(SAMLError):
pass
class StatusError(SAMLError):
pass
class UnsolicitedResponse(SAMLError):
pass
class StatusVersionMismatch(StatusError):
pass
class StatusAuthnFailed(StatusError):
pass
class StatusInvalidAttrNameOrValue(StatusError):
pass
class StatusInvalidAuthnResponseStatement(StatusError):
pass
class StatusInvalidNameidPolicy(StatusError):
pass
class StatusNoAuthnContext(StatusError):
pass
class StatusNoAvailableIdp(StatusError):
pass
class StatusNoPassive(StatusError):
pass
class StatusNoSupportedIdp(StatusError):
pass
class StatusPartialLogout(StatusError):
pass
class StatusProxyCountExceeded(StatusError):
pass
class StatusRequestDenied(StatusError):
pass
class StatusRequestUnsupported(StatusError):
pass
class StatusRequestVersionDeprecated(StatusError):
pass
class StatusRequestVersionTooHigh(StatusError):
pass
class StatusRequestVersionTooLow(StatusError):
pass
class StatusResourceNotRecognized(StatusError):
pass
class StatusTooManyResponses(StatusError):
pass
class StatusUnknownAttrProfile(StatusError):
pass
class StatusUnknownPrincipal(StatusError):
pass
class StatusUnsupportedBinding(StatusError):
pass
class StatusResponder(StatusError):
pass
STATUSCODE2EXCEPTION = {
STATUS_VERSION_MISMATCH: StatusVersionMismatch,
STATUS_AUTHN_FAILED: StatusAuthnFailed,
STATUS_INVALID_ATTR_NAME_OR_VALUE: StatusInvalidAttrNameOrValue,
STATUS_INVALID_NAMEID_POLICY: StatusInvalidNameidPolicy,
STATUS_NO_AUTHN_CONTEXT: StatusNoAuthnContext,
STATUS_NO_AVAILABLE_IDP: StatusNoAvailableIdp,
STATUS_NO_PASSIVE: StatusNoPassive,
STATUS_NO_SUPPORTED_IDP: StatusNoSupportedIdp,
STATUS_PARTIAL_LOGOUT: StatusPartialLogout,
STATUS_PROXY_COUNT_EXCEEDED: StatusProxyCountExceeded,
STATUS_REQUEST_DENIED: StatusRequestDenied,
STATUS_REQUEST_UNSUPPORTED: StatusRequestUnsupported,
STATUS_REQUEST_VERSION_DEPRECATED: StatusRequestVersionDeprecated,
STATUS_REQUEST_VERSION_TOO_HIGH: StatusRequestVersionTooHigh,
STATUS_REQUEST_VERSION_TOO_LOW: StatusRequestVersionTooLow,
STATUS_RESOURCE_NOT_RECOGNIZED: StatusResourceNotRecognized,
STATUS_TOO_MANY_RESPONSES: StatusTooManyResponses,
STATUS_UNKNOWN_ATTR_PROFILE: StatusUnknownAttrProfile,
STATUS_UNKNOWN_PRINCIPAL: StatusUnknownPrincipal,
STATUS_UNSUPPORTED_BINDING: StatusUnsupportedBinding,
STATUS_RESPONDER: StatusResponder,
}
# ---------------------------------------------------------------------------
def _dummy(_):
return None
def for_me(conditions, myself):
"""Am I among the intended audiences"""
if not conditions.audience_restriction: # No audience restriction
return True
for restriction in conditions.audience_restriction:
if not restriction.audience:
continue
for audience in restriction.audience:
if audience.text and audience.text.strip() == myself:
return True
else:
logger.debug(f"AudienceRestriction - One condition not satisfied: {audience.text} != {myself}")
logger.debug("AudienceRestrictions not satisfied!")
return False
def authn_response(
conf,
return_addrs,
outstanding_queries=None,
timeslack=0,
asynchop=True,
allow_unsolicited=False,
want_assertions_signed=False,
conv_info=None,
):
sec = security_context(conf)
if not timeslack:
try:
timeslack = int(conf.accepted_time_diff)
except TypeError:
timeslack = 0
return AuthnResponse(
sec,
conf.attribute_converters,
conf.entityid,
return_addrs,
outstanding_queries,
timeslack,
asynchop=asynchop,
allow_unsolicited=allow_unsolicited,
want_assertions_signed=want_assertions_signed,
conv_info=conv_info,
)
# comes in over SOAP so synchronous
def attribute_response(conf, return_addrs, timeslack=0, asynchop=False, test=False, conv_info=None):
sec = security_context(conf)
if not timeslack:
try:
timeslack = int(conf.accepted_time_diff)
except TypeError:
timeslack = 0
return AttributeResponse(
sec,
conf.attribute_converters,
conf.entityid,
return_addrs,
timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
class StatusResponse:
msgtype = "status_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, request_id=0, asynchop=True, conv_info=None):
self.sec = sec_context
self.return_addrs = return_addrs or []
self.timeslack = timeslack
self.request_id = request_id
self.xmlstr = ""
self.origxml = ""
self.name_id = None
self.response = None
self.not_on_or_after = 0
self.in_response_to = None
self.signature_check = self.sec.correctly_signed_response
self.require_signature = False
self.require_response_signature = False
self.require_signature_or_response_signature = False
self.not_signed = False
self.asynchop = asynchop
self.do_not_verify = False
self.conv_info = conv_info or {}
def _clear(self):
self.xmlstr = ""
self.name_id = None
self.response = None
self.not_on_or_after = 0
def _postamble(self):
if not self.response:
logger.warning("Response was not correctly signed")
if self.xmlstr:
logger.debug("Response: %s", self.xmlstr)
raise IncorrectlySigned()
logger.debug("response: %s", self.response)
try:
valid_instance(self.response)
except NotValid as exc:
logger.warning("Not valid response: %s", exc.args[0])
self._clear()
return self
self.in_response_to = self.response.in_response_to
return self
def load_instance(self, instance):
if signed(instance):
# This will check signature on Assertion which is the default
try:
self.response = self.sec.check_signature(instance)
except SignatureError:
# The response as a whole might be signed or not
self.response = self.sec.check_signature(instance, f"{samlp.NAMESPACE}:Response")
else:
self.not_signed = True
self.response = instance
return self._postamble()
def _loads(self, xmldata, decode=True, origxml=None):
# own copy
if isinstance(xmldata, bytes):
self.xmlstr = xmldata[:].decode("utf-8")
else:
self.xmlstr = xmldata[:]
logger.debug("xmlstr: %s", self.xmlstr)
if origxml:
self.origxml = origxml
else:
self.origxml = self.xmlstr
if self.do_not_verify:
args = {"do_not_verify": True}
else:
args = {}
try:
self.response = self.signature_check(
xmldata,
origdoc=origxml,
must=self.require_signature,
require_response_signature=self.require_response_signature,
**args,
)
except TypeError:
raise
except SignatureError:
raise
except Exception as excp:
logger.exception("EXCEPTION: %s", str(excp))
raise
return self._postamble()
def status_ok(self):
status = self.response.status
logger.debug("status: %s", status)
if not status or status.status_code.value == samlp.STATUS_SUCCESS:
return True
err_code = status.status_code.status_code.value if status.status_code.status_code else None
err_msg = status.status_message.text if status.status_message else err_code or "Unknown error"
err_cls = STATUSCODE2EXCEPTION.get(err_code, StatusError)
msg = f"Unsuccessful operation: {status}\n{err_msg} from {err_code}"
logger.debug(msg)
raise err_cls(msg)
def issue_instant_ok(self):
"""Check that the response was issued at a reasonable time"""
upper = time_util.shift_time(time_util.time_in_a_while(days=1), self.timeslack).timetuple()
lower = time_util.shift_time(time_util.time_a_while_ago(days=1), -self.timeslack).timetuple()
# print("issue_instant: %s" % self.response.issue_instant)
# print("%s < x < %s" % (lower, upper))
issued_at = str_to_time(self.response.issue_instant)
return lower < issued_at < upper
def _verify(self):
if self.request_id and self.in_response_to and self.in_response_to != self.request_id:
logger.error("Not the id I expected: %s != %s", self.in_response_to, self.request_id)
return None
if self.response.version != "2.0":
_ver = float(self.response.version)
if _ver < 2.0:
raise RequestVersionTooLow()
else:
raise RequestVersionTooHigh()
if self.asynchop:
if self.response.destination and self.response.destination not in self.return_addrs:
logger.error(
"destination '%s' not in return addresses '%s'", self.response.destination, self.return_addrs
)
return None
valid = self.issue_instant_ok() and self.status_ok()
return valid
def loads(self, xmldata, decode=True, origxml=None):
return self._loads(xmldata, decode, origxml)
def verify(self, keys=None):
try:
return self._verify()
except AssertionError:
logger.exception("verify")
return None
def update(self, mold):
self.xmlstr = mold.xmlstr
self.in_response_to = mold.in_response_to
self.response = mold.response
def issuer(self):
issuer_value = (self.response.issuer.text if self.response.issuer is not None else "").strip()
return issuer_value
class LogoutResponse(StatusResponse):
msgtype = "logout_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, asynchop=True, conv_info=None):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, asynchop=asynchop, conv_info=conv_info)
self.signature_check = self.sec.correctly_signed_logout_response
class NameIDMappingResponse(StatusResponse):
msgtype = "name_id_mapping_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, request_id=0, asynchop=True, conv_info=None):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, request_id, asynchop, conv_info=conv_info)
self.signature_check = self.sec.correctly_signed_name_id_mapping_response
class ManageNameIDResponse(StatusResponse):
msgtype = "manage_name_id_response"
def __init__(self, sec_context, return_addrs=None, timeslack=0, request_id=0, asynchop=True, conv_info=None):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, request_id, asynchop, conv_info=conv_info)
self.signature_check = self.sec.correctly_signed_manage_name_id_response
# ----------------------------------------------------------------------------
class AuthnResponse(StatusResponse):
"""This is where all the profile compliance is checked.
This one does saml2int compliance."""
msgtype = "authn_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
outstanding_queries=None,
timeslack=0,
asynchop=True,
allow_unsolicited=False,
test=False,
allow_unknown_attributes=False,
want_assertions_signed=False,
want_assertions_or_response_signed=False,
want_response_signed=False,
conv_info=None,
**kwargs,
):
StatusResponse.__init__(self, sec_context, return_addrs, timeslack, asynchop=asynchop, conv_info=conv_info)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
if outstanding_queries:
self.outstanding_queries = outstanding_queries
else:
self.outstanding_queries = {}
self.context = "AuthnReq"
self.came_from = None
self.ava = None
self.assertion = None
self.assertions = []
self.session_not_on_or_after = 0
self.allow_unsolicited = allow_unsolicited
self.require_signature = want_assertions_signed
self.require_signature_or_response_signature = want_assertions_or_response_signed
self.require_response_signature = want_response_signed
self.test = test
self.allow_unknown_attributes = allow_unknown_attributes
#
try:
self.extension_schema = kwargs["extension_schema"]
except KeyError:
self.extension_schema = {}
def check_subject_confirmation_in_response_to(self, irp):
for assertion in self.response.assertion:
for _sc in assertion.subject.subject_confirmation:
if _sc.subject_confirmation_data.in_response_to != irp:
return False
return True
def loads(self, xmldata, decode=True, origxml=None):
self._loads(xmldata, decode, origxml)
if self.asynchop:
if self.in_response_to in self.outstanding_queries:
self.came_from = self.outstanding_queries[self.in_response_to]
# del self.outstanding_queries[self.in_response_to]
try:
if not self.check_subject_confirmation_in_response_to(self.in_response_to):
raise UnsolicitedResponse(f"Unsolicited response: {self.in_response_to}")
except AttributeError:
pass
elif self.allow_unsolicited:
# Should check that I haven't seen this before
pass
else:
raise UnsolicitedResponse(f"Unsolicited response: {self.in_response_to}")
return self
def clear(self):
self._clear()
self.came_from = None
self.ava = None
self.assertion = None
def authn_statement_ok(self, optional=False):
n_authn_statements = len(self.assertion.authn_statement)
if n_authn_statements != 1:
if optional:
return True
else:
msg = f"Invalid number of AuthnStatement found in Response: {n_authn_statements}"
raise ValueError(msg)
authn_statement = self.assertion.authn_statement[0]
if authn_statement.session_not_on_or_after:
if validate_on_or_after(authn_statement.session_not_on_or_after, self.timeslack):
self.session_not_on_or_after = calendar.timegm(
time_util.str_to_time(authn_statement.session_not_on_or_after)
)
else:
return False
return True
# check authn_statement.session_index
def condition_ok(self, lax=False):
if not self.assertion.conditions:
# Conditions is Optional for Assertion, so, if it's absent, then we
# assume that its valid
return True
if self.test:
lax = True
conditions = self.assertion.conditions
logger.debug("conditions: %s", conditions)
# if no sub-elements or elements are supplied, then the
# assertion is considered to be valid.
if not conditions.keyswv():
return True
# if both are present NotBefore must be earlier than NotOnOrAfter
if conditions.not_before and conditions.not_on_or_after:
if not later_than(conditions.not_on_or_after, conditions.not_before):
return False
try:
if conditions.not_on_or_after:
self.not_on_or_after = validate_on_or_after(conditions.not_on_or_after, self.timeslack)
if conditions.not_before:
validate_before(conditions.not_before, self.timeslack)
except Exception as excp:
logger.error("Exception on conditions: %s", str(excp))
if not lax:
raise
else:
self.not_on_or_after = 0
if not for_me(conditions, self.entity_id):
if not lax:
raise Exception(f"AudienceRestrictions conditions not satisfied! (Local entity_id={self.entity_id})")
if conditions.condition: # extra conditions
for cond in conditions.condition:
try:
if cond.extension_attributes[XSI_TYPE] in self.extension_schema:
pass
else:
raise Exception("Unknown condition")
except KeyError:
raise Exception("Missing xsi:type specification")
return True
def decrypt_attributes(self, attribute_statement, keys=None):
"""
Decrypts possible encrypted attributes and adds the decrypts to the
list of attributes.
:param attribute_statement: A SAML.AttributeStatement which might
contain both encrypted attributes and attributes.
"""
# _node_name = [
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedData",
# "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAttribute"]
for encattr in attribute_statement.encrypted_attribute:
if not encattr.encrypted_key:
_decr = self.sec.decrypt_keys(encattr.encrypted_data, keys=keys)
_attr = attribute_from_string(_decr)
attribute_statement.attribute.append(_attr)
else:
_decr = self.sec.decrypt_keys(encattr, keys=keys)
enc_attr = encrypted_attribute_from_string(_decr)
attrlist = enc_attr.extensions_as_elements("Attribute", saml)
attribute_statement.attribute.extend(attrlist)
def read_attribute_statement(self, attr_statem):
logger.debug("Attribute Statement: %s", attr_statem)
# for aconv in self.attribute_converters:
# logger.debug("Converts name format: %s", aconv.name_format)
self.decrypt_attributes(attr_statem)
return to_local(self.attribute_converters, attr_statem, self.allow_unknown_attributes)
def get_identity(self):
"""The assertion can contain zero or more attributeStatements"""
ava = {}
for _assertion in self.assertions:
if _assertion.advice:
if _assertion.advice.assertion:
for tmp_assertion in _assertion.advice.assertion:
if tmp_assertion.attribute_statement:
n_attr_statements = len(tmp_assertion.attribute_statement)
if n_attr_statements != 1:
msg = "Invalid number of AuthnStatement found in Response: {n}".format(
n=n_attr_statements
)
raise ValueError(msg)
ava.update(self.read_attribute_statement(tmp_assertion.attribute_statement[0]))
if _assertion.attribute_statement:
logger.debug("Assertion contains %s attribute statement(s)", (len(self.assertion.attribute_statement)))
for _attr_statem in _assertion.attribute_statement:
logger.debug(f"Attribute Statement: {_attr_statem}")
ava.update(self.read_attribute_statement(_attr_statem))
if not ava:
logger.debug("Assertion contains no attribute statements")
return ava
def _bearer_confirmed(self, data):
if not data:
return False
if data.address:
if not valid_address(data.address):
return False
# verify that I got it from the correct sender
# These two will raise exception if untrue
validate_on_or_after(data.not_on_or_after, self.timeslack)
validate_before(data.not_before, self.timeslack)
# not_before must be < not_on_or_after
if not later_than(data.not_on_or_after, data.not_before):
return False
if self.asynchop and self.came_from is None:
if data.in_response_to:
if data.in_response_to in self.outstanding_queries:
self.came_from = self.outstanding_queries[data.in_response_to]
# del self.outstanding_queries[data.in_response_to]
elif self.allow_unsolicited:
pass
else:
# This is where I don't allow unsolicited reponses
# Either in_response_to == None or has a value I don't
# recognize
logger.debug("in response to: '%s'", data.in_response_to)
logger.info("outstanding queries: %s", self.outstanding_queries.keys())
raise Exception("Combination of session id and requestURI I don't " "recall")
return True
def _holder_of_key_confirmed(self, data):
if not data or not data.extension_elements:
return False
has_keyinfo = False
for element in extension_elements_to_elements(data.extension_elements, [samlp, saml, xenc, ds]):
if isinstance(element, ds.KeyInfo):
has_keyinfo = True
return has_keyinfo
def get_subject(self, keys=None):
"""The assertion must contain a Subject"""
if not self.assertion:
raise ValueError("Missing assertion")
if not self.assertion.subject:
raise ValueError(f"Invalid assertion subject: {self.assertion.subject}")
subject = self.assertion.subject
subjconf = []
if not self.verify_attesting_entity(subject.subject_confirmation):
raise VerificationError("No valid attesting address")
for subject_confirmation in subject.subject_confirmation:
_data = subject_confirmation.subject_confirmation_data
if subject_confirmation.method == SCM_BEARER:
if not self._bearer_confirmed(_data):
continue
elif subject_confirmation.method == SCM_HOLDER_OF_KEY:
if not self._holder_of_key_confirmed(_data):
continue
elif subject_confirmation.method == SCM_SENDER_VOUCHES:
pass
else:
raise ValueError(f"Unknown subject confirmation method: {subject_confirmation.method}")
_recip = _data.recipient
if not _recip or not self.verify_recipient(_recip):
raise VerificationError("No valid recipient")
subjconf.append(subject_confirmation)
if not subjconf:
raise VerificationError("No valid subject confirmation")
subject.subject_confirmation = subjconf
# The subject may contain a name_id
if subject.name_id:
self.name_id = subject.name_id
elif subject.encrypted_id:
# decrypt encrypted ID
_name_id_str = self.sec.decrypt_keys(subject.encrypted_id.encrypted_data.to_string(), keys=keys)
_name_id = saml.name_id_from_string(_name_id_str)
self.name_id = _name_id
logger.info("Subject NameID: %s", self.name_id)
return self.name_id
def _assertion(self, assertion, verified=False):
"""
Check the assertion
:param assertion:
:return: True/False depending on if the assertion is sane or not
"""
if not hasattr(assertion, "signature") or not assertion.signature:
logger.debug("unsigned")
if self.require_signature:
raise SignatureError("Signature missing for assertion")
else:
logger.debug("signed")
if not verified and self.do_not_verify is False:
try:
self.sec.check_signature(assertion, class_name(assertion), self.xmlstr)
except Exception as exc:
logger.error("correctly_signed_response: %s", exc)
raise
self.assertion = assertion
logger.debug("assertion context: %s", self.context)
logger.debug("assertion keys: %s", assertion.keyswv())
logger.debug("outstanding_queries: %s", self.outstanding_queries)
# if self.context == "AuthnReq" or self.context == "AttrQuery":
if self.context == "AuthnReq":
self.authn_statement_ok()
# elif self.context == "AttrQuery":
# self.authn_statement_ok(True)
if not self.condition_ok():
raise VerificationError("Condition not OK")
logger.debug("--- Getting Identity ---")
# if self.context == "AuthnReq" or self.context == "AttrQuery":
# self.ava = self.get_identity()
# logger.debug("--- AVA: {0}".format(self.ava))
try:
self.get_subject()
if self.asynchop:
if self.allow_unsolicited:
pass
elif self.came_from is None:
raise VerificationError("Came from")
return True
except Exception:
logger.exception("get subject")
raise
def decrypt_assertions(self, encrypted_assertions, decr_txt, issuer=None, verified=False):
"""Moves the decrypted assertion from the encrypted assertion to a
list.
:param encrypted_assertions: A list of encrypted assertions.
:param decr_txt: The string representation containing the decrypted
data. Used when verifying signatures.
:param issuer: The issuer of the response.
:param verified: If True do not verify signatures, otherwise verify
the signature if it exists.
:return: A list of decrypted assertions.
"""
res = []
for encrypted_assertion in encrypted_assertions:
if encrypted_assertion.extension_elements:
assertions = extension_elements_to_elements(encrypted_assertion.extension_elements, [saml, samlp])
for assertion in assertions:
if assertion.signature and not verified:
if not self.sec.check_signature(
assertion, origdoc=decr_txt, node_name=class_name(assertion), issuer=issuer
):
logger.error("Failed to verify signature on '%s'", assertion)
raise SignatureError()
res.append(assertion)
return res
def find_encrypt_data_assertion(self, enc_assertions):
"""Verifies if a list of encrypted assertions contains encrypted data.
:param enc_assertions: A list of encrypted assertions.
:return: True encrypted data exists otherwise false.
"""
for _assertion in enc_assertions:
if _assertion.encrypted_data is not None:
return True
def find_encrypt_data_assertion_list(self, _assertions):
"""Verifies if a list of assertions contains encrypted data in the
advice element.
:param _assertions: A list of assertions.
:return: True encrypted data exists otherwise false.
"""
for _assertion in _assertions:
if _assertion.advice:
if _assertion.advice.encrypted_assertion:
res = self.find_encrypt_data_assertion(_assertion.advice.encrypted_assertion)
if res:
return True
def find_encrypt_data(self, resp):
"""Verifies if a saml response contains encrypted assertions with
encrypted data.
:param resp: A saml response.
:return: True encrypted data exists otherwise false.
"""
if resp.encrypted_assertion:
res = self.find_encrypt_data_assertion(resp.encrypted_assertion)
if res:
return True
if resp.assertion:
for tmp_assertion in resp.assertion:
if tmp_assertion.advice:
if tmp_assertion.advice.encrypted_assertion:
res = self.find_encrypt_data_assertion(tmp_assertion.advice.encrypted_assertion)
if res:
return True
return False
def parse_assertion(self, keys=None):
"""Parse the assertions for a saml response.
:param keys: A string representing a RSA key or a list of strings
containing RSA keys.
:return: True if the assertions are parsed otherwise False.
"""
if self.context == "AuthnQuery":
# can contain one or more assertions
pass
else:
# This is a saml2int limitation
n_assertions = len(self.response.assertion)
n_assertions_enc = len(self.response.encrypted_assertion)
if n_assertions != 1 and n_assertions_enc != 1 and self.assertion is None:
raise InvalidAssertion(f"Invalid number of assertions in Response: {n_assertions + n_assertions_enc}")
if self.response.assertion:
logger.debug("***Unencrypted assertion***")
for assertion in self.response.assertion:
if not self._assertion(assertion, False):
return False
if self.find_encrypt_data(self.response):
logger.debug("***Encrypted assertion/-s***")
_enc_assertions = []
resp = self.response
decr_text = str(self.response)
decr_text_old = None
while self.find_encrypt_data(resp) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys=keys)
except DecryptError:
continue
else:
resp = samlp.response_from_string(decr_text)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, bytes):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
_enc_assertions = self.decrypt_assertions(resp.encrypted_assertion, decr_text)
decr_text_old = None
while (
self.find_encrypt_data(resp) or self.find_encrypt_data_assertion_list(_enc_assertions)
) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys=keys)
except DecryptError:
continue
else:
resp = samlp.response_from_string(decr_text)
_enc_assertions = self.decrypt_assertions(resp.encrypted_assertion, decr_text, verified=True)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, bytes):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
all_assertions = _enc_assertions
if resp.assertion:
all_assertions = all_assertions + resp.assertion
if len(all_assertions) > 0:
for tmp_ass in all_assertions:
if tmp_ass.advice and tmp_ass.advice.encrypted_assertion:
advice_res = self.decrypt_assertions(
tmp_ass.advice.encrypted_assertion, decr_text, tmp_ass.issuer
)
if tmp_ass.advice.assertion:
tmp_ass.advice.assertion.extend(advice_res)
else:
tmp_ass.advice.assertion = advice_res
if len(advice_res) > 0:
tmp_ass.advice.encrypted_assertion = []
self.response.assertion = resp.assertion
for assertion in _enc_assertions:
if not self._assertion(assertion, True):
return False
else:
self.assertions.append(assertion)
self.xmlstr = decr_text
if len(_enc_assertions) > 0:
self.response.encrypted_assertion = []
if self.response.assertion:
for assertion in self.response.assertion:
self.assertions.append(assertion)
if self.assertions and len(self.assertions) > 0:
self.assertion = self.assertions[0]
if self.context == "AuthnReq" or self.context == "AttrQuery":
self.ava = self.get_identity()
logger.debug(f"--- AVA: {self.ava}")
return True
def verify(self, keys=None):
"""Verify that the assertion is syntactically correct and the
signature is correct if present.
:param keys: If not the default key file should be used then use one
of these.
"""
try:
res = self._verify()
except AssertionError as err:
logger.error("Verification error on the response: %s", str(err))
raise
else:
if not res:
return None
if not isinstance(self.response, samlp.Response):
return self
if self.parse_assertion(keys):
return self
else:
logger.error("Could not parse the assertion")
return None
def session_id(self):
"""Returns the SessionID of the response"""
return self.response.in_response_to
def id(self):
"""Return the ID of the response"""
return self.response.id
def authn_info(self):
res = []
for statement in getattr(self.assertion, "authn_statement", []):
authn_instant = getattr(statement, "authn_instant", "")
context = statement.authn_context
if not context:
continue
authn_class = (
getattr(context.authn_context_class_ref, "text", None)
or getattr(context.authn_context_decl_ref, "text", None)
or ""
)
authenticating_authorities = getattr(context, "authenticating_authority", [])
authn_auth = [authority.text for authority in authenticating_authorities]
res.append((authn_class, authn_auth, authn_instant))
return res
def authz_decision_info(self):
res = {"permit": [], "deny": [], "indeterminate": []}
for adstat in self.assertion.authz_decision_statement:
# one of 'Permit', 'Deny', 'Indeterminate'
res[adstat.decision.text.lower()] = adstat
return res
def session_info(self):
"""Returns a predefined set of information gleened from the
response.
:returns: Dictionary with information
"""
if self.session_not_on_or_after > 0:
nooa = self.session_not_on_or_after
else:
nooa = self.not_on_or_after
if self.context == "AuthzQuery":
return {
"name_id": self.name_id,
"came_from": self.came_from,
"issuer": self.issuer(),
"not_on_or_after": nooa,
"authz_decision_info": self.authz_decision_info(),
}
elif getattr(self.assertion, "authn_statement", None):
authn_statement = self.assertion.authn_statement[0]
return {
"ava": self.ava,
"name_id": self.name_id,
"came_from": self.came_from,
"issuer": self.issuer(),
"not_on_or_after": nooa,
"authn_info": self.authn_info(),
"session_index": authn_statement.session_index,
}
else:
raise StatusInvalidAuthnResponseStatement("The Authn Response Statement is not valid")
def __str__(self):
return self.xmlstr
def verify_recipient(self, recipient):
"""
Verify that I'm the recipient of the assertion
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:return: True/False
"""
if not self.conv_info:
return True
_info = self.conv_info
try:
if recipient == _info["entity_id"]:
return True
except KeyError:
pass
try:
if recipient in self.return_addrs:
return True
except KeyError:
pass
return False
def verify_attesting_entity(self, subject_confirmation):
"""
At least one address specification has to be correct.
:param subject_confirmation: A SubbjectConfirmation instance
:return: True/False
"""
try:
address = self.conv_info["remote_addr"]
except KeyError:
address = "0.0.0.0"
correct = 0
for subject_conf in subject_confirmation:
if subject_conf.subject_confirmation_data is None:
correct += 1 # In reality undefined
elif subject_conf.subject_confirmation_data.address:
if address == "0.0.0.0": # accept anything
correct += 1
elif subject_conf.subject_confirmation_data.address == address:
correct += 1
else:
correct += 1
if correct:
return True
else:
return False
class AuthnQueryResponse(AuthnResponse):
msgtype = "authn_query_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
test=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AuthnQuery"
def condition_ok(self, lax=False): # Should I care about conditions ?
return True
class AttributeResponse(AuthnResponse):
msgtype = "attribute_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
test=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AttrQuery"
class AuthzResponse(AuthnResponse):
"""A successful response will be in the form of assertions containing
authorization decision statements."""
msgtype = "authz_decision_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AuthzQuery"
class ArtifactResponse(AuthnResponse):
msgtype = "artifact_response"
def __init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs=None,
timeslack=0,
asynchop=False,
test=False,
conv_info=None,
):
AuthnResponse.__init__(
self,
sec_context,
attribute_converters,
entity_id,
return_addrs,
timeslack=timeslack,
asynchop=asynchop,
test=test,
conv_info=conv_info,
)
self.entity_id = entity_id
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "ArtifactResolve"
def response_factory(
xmlstr,
conf,
return_addrs=None,
outstanding_queries=None,
timeslack=0,
decode=True,
request_id=0,
origxml=None,
asynchop=True,
allow_unsolicited=False,
want_assertions_signed=False,
conv_info=None,
):
sec_context = security_context(conf)
if not timeslack:
try:
timeslack = int(conf.accepted_time_diff)
except TypeError:
timeslack = 0
attribute_converters = conf.attribute_converters
entity_id = conf.entityid
extension_schema = conf.extension_schema
response = StatusResponse(sec_context, return_addrs, timeslack, request_id, asynchop, conv_info=conv_info)
try:
response.loads(xmlstr, decode, origxml)
if response.response.assertion or response.response.encrypted_assertion:
authnresp = AuthnResponse(
sec_context,
attribute_converters,
entity_id,
return_addrs,
outstanding_queries,
timeslack,
asynchop,
allow_unsolicited,
extension_schema=extension_schema,
want_assertions_signed=want_assertions_signed,
conv_info=conv_info,
)
authnresp.update(response)
return authnresp
except TypeError:
response.signature_check = sec_context.correctly_signed_logout_response
response.loads(xmlstr, decode, origxml)
logoutresp = LogoutResponse(sec_context, return_addrs, timeslack, asynchop=asynchop, conv_info=conv_info)
logoutresp.update(response)
return logoutresp
return response
# ===========================================================================
# A class of it's own
class AssertionIDResponse:
msgtype = "assertion_id_response"
def __init__(self, sec_context, attribute_converters, timeslack=0, **kwargs):
self.sec = sec_context
self.timeslack = timeslack
self.xmlstr = ""
self.origxml = ""
self.name_id = ""
self.response = None
self.not_signed = False
self.attribute_converters = attribute_converters
self.assertion = None
self.context = "AssertionIdResponse"
self.signature_check = self.sec.correctly_signed_assertion_id_response
# Because this class is not a subclass of StatusResponse we need
# to add these attributes directly so that the _parse_response()
# method of the Entity class can treat instances of this class
# like all other responses.
self.require_signature = False
self.require_response_signature = False
self.require_signature_or_response_signature = False
def loads(self, xmldata, decode=True, origxml=None):
# own copy
self.xmlstr = xmldata[:]
logger.debug("xmlstr: %s", self.xmlstr)
self.origxml = origxml
try:
self.response = self.signature_check(xmldata, origdoc=origxml)
self.assertion = self.response
except TypeError:
raise
except SignatureError:
raise
except Exception as excp:
logger.exception("EXCEPTION: %s", str(excp))
raise
# print("<", self.response)
return self._postamble()
def verify(self, keys=None):
try:
valid_instance(self.response)
except NotValid as exc:
logger.error("Not valid response: %s", exc.args[0])
raise
return self
def _postamble(self):
if not self.response:
logger.warning("Response was not correctly signed")
if self.xmlstr:
logger.debug("Response: %s", self.xmlstr)
raise IncorrectlySigned()
logger.debug("response: %s", self.response)
return self | PypiClean |
/OpenCobolIDE-4.7.6.tar.gz/OpenCobolIDE-4.7.6/open_cobol_ide/extlibs/pyqode/core/panels/marker.py | import logging
from pyqode.core.api import TextDecoration
from pyqode.core.api.panel import Panel
from pyqode.core.api.utils import DelayJobRunner, TextHelper
from pyqode.qt import QtCore, QtWidgets, QtGui
def _logger():
""" Gets module's logger """
return logging.getLogger(__name__)
class Marker(QtCore.QObject):
"""
A marker is an icon draw on a marker panel at a specific line position and
with a possible tooltip.
"""
@property
def position(self):
"""
Gets the marker position (line number)
:type: int
"""
try:
return self.block.blockNumber()
except AttributeError:
return self._position # not added yet
@property
def icon(self):
"""
Gets the icon file name. Read-only.
"""
if isinstance(self._icon, str):
if QtGui.QIcon.hasThemeIcon(self._icon):
return QtGui.QIcon.fromTheme(self._icon)
else:
return QtGui.QIcon(self._icon)
elif isinstance(self._icon, tuple):
return QtGui.QIcon.fromTheme(self._icon[0],
QtGui.QIcon(self._icon[1]))
elif isinstance(self._icon, QtGui.QIcon):
return self._icon
return QtGui.QIcon()
@property
def description(self):
""" Gets the marker description. """
return self._description
def __init__(self, position, icon="", description="", parent=None):
"""
:param position: The marker position/line number.
:type position: int
:param icon: The icon to display
:type icon: QtGui.QIcon
:param parent: The optional parent object.
:type parent: QtCore.QObject or None
"""
QtCore.QObject.__init__(self, parent)
#: The position of the marker (line number)
self._position = position
self._icon = icon
self._description = description
class MarkerPanel(Panel):
"""
General purpose marker panel.
This panels takes care of drawing icons at a specific line number.
Use addMarker, removeMarker and clearMarkers to manage the collection of
displayed makers.
You can create a user editable panel (e.g. a breakpoints panel) by using
the following signals:
- :attr:`pyqode.core.panels.MarkerPanel.add_marker_requested`
- :attr:`pyqode.core.panels.MarkerPanel.remove_marker_requested`
"""
#: Signal emitted when the user clicked in a place where there is no
#: marker.
add_marker_requested = QtCore.Signal(int)
#: Signal emitted when the user right clicked on an existing marker.
edit_marker_requested = QtCore.Signal(int)
#: Signal emitted when the user left clicked on an existing marker.
remove_marker_requested = QtCore.Signal(int)
@property
def background(self):
"""
Marker background color in editor. Use None if no text decoration
should be used.
"""
return self._background
@background.setter
def background(self, value):
self._background = value
def __init__(self):
Panel.__init__(self)
self._background = QtGui.QColor('#FFC8C8')
self._markers = []
self._icons = {}
self._previous_line = -1
self.scrollable = True
self._job_runner = DelayJobRunner(delay=100)
self.setMouseTracking(True)
self._to_remove = []
@property
def markers(self):
"""
Gets all markers.
"""
return self._markers
def add_marker(self, marker):
"""
Adds the marker to the panel.
:param marker: Marker to add
:type marker: pyqode.core.modes.Marker
"""
self._markers.append(marker)
doc = self.editor.document()
assert isinstance(doc, QtGui.QTextDocument)
block = doc.findBlockByLineNumber(marker._position)
marker.block = block
d = TextDecoration(block)
d.set_full_width()
if self._background:
d.set_background(QtGui.QBrush(self._background))
marker.decoration = d
self.editor.decorations.append(d)
self.repaint()
def remove_marker(self, marker):
"""
Removes a marker from the panel
:param marker: Marker to remove
:type marker: pyqode.core.Marker
"""
self._markers.remove(marker)
self._to_remove.append(marker)
if hasattr(marker, 'decoration'):
self.editor.decorations.remove(marker.decoration)
self.repaint()
def clear_markers(self):
""" Clears the markers list """
while len(self._markers):
self.remove_marker(self._markers[0])
def marker_for_line(self, line):
"""
Returns the marker that is displayed at the specified line number if
any.
:param line: The marker line.
:return: Marker of None
:rtype: pyqode.core.Marker
"""
markers = []
for marker in self._markers:
if line == marker.position:
markers.append(marker)
return markers
def sizeHint(self):
"""
Returns the panel size hint. (fixed with of 16px)
"""
metrics = QtGui.QFontMetricsF(self.editor.font())
size_hint = QtCore.QSize(metrics.height(), metrics.height())
if size_hint.width() > 16:
size_hint.setWidth(16)
return size_hint
def paintEvent(self, event):
Panel.paintEvent(self, event)
painter = QtGui.QPainter(self)
for top, block_nbr, block in self.editor.visible_blocks:
for marker in self._markers:
if marker.block == block and marker.icon:
rect = QtCore.QRect()
rect.setX(0)
rect.setY(top)
rect.setWidth(self.sizeHint().width())
rect.setHeight(self.sizeHint().height())
marker.icon.paint(painter, rect)
def mousePressEvent(self, event):
# Handle mouse press:
# - emit add marker signal if there were no marker under the mouse
# cursor
# - emit remove marker signal if there were one or more markers under
# the mouse cursor.
line = TextHelper(self.editor).line_nbr_from_position(event.pos().y())
if self.marker_for_line(line):
if event.button() == QtCore.Qt.LeftButton:
self.remove_marker_requested.emit(line)
else:
self.edit_marker_requested.emit(line)
else:
self.add_marker_requested.emit(line)
def mouseMoveEvent(self, event):
# Requests a tooltip if the cursor is currently over a marker.
line = TextHelper(self.editor).line_nbr_from_position(event.pos().y())
markers = self.marker_for_line(line)
text = '\n'.join([marker.description for marker in markers if
marker.description])
if len(markers):
if self._previous_line != line:
top = TextHelper(self.editor).line_pos_from_number(
markers[0].position)
if top:
self._job_runner.request_job(self._display_tooltip,
text, top)
else:
self._job_runner.cancel_requests()
self._previous_line = line
def leaveEvent(self, *args, **kwargs):
"""
Hide tooltip when leaving the panel region.
"""
QtWidgets.QToolTip.hideText()
self._previous_line = -1
def _display_tooltip(self, tooltip, top):
"""
Display tooltip at the specified top position.
"""
QtWidgets.QToolTip.showText(self.mapToGlobal(QtCore.QPoint(
self.sizeHint().width(), top)), tooltip, self) | PypiClean |
/aws_terraform_casper-0.3.0-py3-none-any.whl/casper/states/aws.py | from casper.states.terraform import TerraformState
import re
import boto3
import json
import tempfile
class AWSState(TerraformState):
def __init__(
self,
profile=None,
bucket=None,
state_file=None,
load_state=False
):
self._resource_group_remap = {
'aws_spot_instance_request': 'aws_instance',
'aws_lb': 'aws_alb'
}
self.session = boto3.Session()
self.state_object = state_file
if profile:
self.session = boto3.Session(profile_name=profile)
super().__init__(profile=profile, bucket=bucket, load_state=load_state)
def _save_state(self):
s3_client = self.session.client('s3')
with tempfile.NamedTemporaryFile(mode='w+') as fid:
fid.write(json.dumps(self.state_resources))
fid.flush()
s3_client.upload_file(
fid.name, self.bucket, self.state_object
)
def _load_state(self):
s3 = self.session.resource('s3')
obj = s3.Object(self.bucket, self.state_object)
data = obj.get()['Body'].read()
self.state_resources = json.loads(data)
@classmethod
def _get_field(cls, field, resource):
pattern = f"(\\n|^)({field}\\s+.*?)(\\n)"
match = re.search(pattern, resource)[0]
value = match.split("=")[1].strip()
return value
def _get_state_aws_instance(self, text):
return self._get_field('id', text)
def _get_state_aws_autoscaling_group(self, text):
return self._get_field('id', text)
def _get_state_aws_spot_instance_request(self, text):
return self._get_field('spot_instance_id', text)
def _get_state_aws_security_group(self, text):
return self._get_field('id', text)
def _get_state_aws_s3_bucket(self, text):
return self._get_field('id', text)
def _get_state_aws_iam_user(self, text):
return self._get_field('id', text)
def _get_state_aws_iam_role(self, text):
return self._get_field('id', text)
def _get_state_aws_elb(self, text):
return self._get_field('name', text)
def _get_state_aws_alb(self, text):
return self._get_field('name', text)
def _get_state_aws_lb(self, text):
return self._get_field('name', text) | PypiClean |
/async-suds-v7k-1.0.8.3.tar.gz/async-suds-v7k-1.0.8.3/asyncsuds/sudsobject.py | from logging import getLogger
from asyncsuds import *
log = getLogger(__name__)
def items(sobject):
"""
Extract the I{items} from a suds object.
Much like the items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
for item in sobject:
yield item
def asdict(sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the items contained in I{sobject}.
@rtype: dict
"""
return dict(items(sobject))
def merge(a, b):
"""
Merge all attributes and metadata from I{a} to I{b}.
@param a: A I{source} object
@type a: L{Object}
@param b: A I{destination} object
@type b: L{Object}
"""
for item in a:
setattr(b, item[0], item[1])
b.__metadata__ = b.__metadata__
return b
def footprint(sobject):
"""
Get the I{virtual footprint} of the object.
This is really a count of all the significant value attributes in the
branch.
@param sobject: A suds object.
@type sobject: L{Object}
@return: The branch footprint.
@rtype: int
"""
n = 0
for a in sobject.__keylist__:
v = getattr(sobject, a)
if v is None:
continue
if isinstance(v, Object):
n += footprint(v)
continue
if hasattr(v, "__len__"):
if len(v):
n += 1
continue
n += 1
return n
class Factory:
cache = {}
@classmethod
def subclass(cls, name, bases, dict={}):
if not isinstance(bases, tuple):
bases = (bases,)
# name is of type unicode in python 2 -> not accepted by type()
name = str(name)
key = ".".join((name, str(bases)))
subclass = cls.cache.get(key)
if subclass is None:
subclass = type(name, bases, dict)
cls.cache[key] = subclass
return subclass
@classmethod
def object(cls, classname=None, dict={}):
if classname is not None:
subclass = cls.subclass(classname, Object)
inst = subclass()
else:
inst = Object()
for a in dict.items():
setattr(inst, a[0], a[1])
return inst
@classmethod
def metadata(cls):
return Metadata()
@classmethod
def property(cls, name, value=None):
subclass = cls.subclass(name, Property)
return subclass(value)
class Object(object):
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
self.__metadata__ = Metadata()
def __setattr__(self, name, value):
builtin = name.startswith("__") and name.endswith("__")
if not builtin and name not in self.__keylist__:
self.__keylist__.append(name)
self.__dict__[name] = value
def __delattr__(self, name):
try:
del self.__dict__[name]
builtin = name.startswith("__") and name.endswith("__")
if not builtin:
self.__keylist__.remove(name)
except Exception:
self.__class__.__name__
raise AttributeError
def __getitem__(self, name):
if isinstance(name, int):
name = self.__keylist__[int(name)]
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __iter__(self):
return Iter(self)
def __len__(self):
return len(self.__keylist__)
def __contains__(self, name):
return name in self.__keylist__
def __repr__(self):
return str(self)
def __str__(self):
return self.__printer__.tostr(self)
class Iter:
def __init__(self, sobject):
self.sobject = sobject
self.keylist = self.__keylist(sobject)
self.index = 0
def __next__(self):
keylist = self.keylist
nkeys = len(self.keylist)
while self.index < nkeys:
k = keylist[self.index]
self.index += 1
if hasattr(self.sobject, k):
v = getattr(self.sobject, k)
return (k, v)
raise StopIteration()
def __keylist(self, sobject):
keylist = sobject.__keylist__
try:
keyset = set(keylist)
ordering = sobject.__metadata__.ordering
ordered = set(ordering)
if not ordered.issuperset(keyset):
log.debug(
"%s must be superset of %s, ordering ignored", keylist, ordering
)
raise KeyError()
return ordering
except Exception:
return keylist
def __iter__(self):
return self
class Metadata(Object):
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
class Facade(Object):
def __init__(self, name):
Object.__init__(self)
md = self.__metadata__
md.facade = name
class Property(Object):
def __init__(self, value):
Object.__init__(self)
self.value = value
def items(self):
for item in self:
if item[0] != "value":
yield item
def get(self):
return self.value
def set(self, value):
self.value = value
return self
class Printer:
"""Pretty printing of a Object object."""
@classmethod
def indent(cls, n):
return "%*s" % (n * 3, " ")
def tostr(self, object, indent=-2):
"""Get s string representation of object."""
history = []
return self.process(object, history, indent)
def process(self, object, h, n=0, nl=False):
"""Print object using the specified indent (n) and newline (nl)."""
if object is None:
return "None"
if isinstance(object, Object):
if len(object) == 0:
return "<empty>"
return self.print_object(object, h, n + 2, nl)
if isinstance(object, dict):
if len(object) == 0:
return "<empty>"
return self.print_dictionary(object, h, n + 2, nl)
if isinstance(object, (list, tuple)):
if len(object) == 0:
return "<empty>"
return self.print_collection(object, h, n + 2)
if isinstance(object, str):
return '"%s"' % (tostr(object),)
return "%s" % (tostr(object),)
def print_object(self, d, h, n, nl=False):
"""Print complex using the specified indent (n) and newline (nl)."""
s = []
cls = d.__class__
if d in h:
s.append("(")
s.append(cls.__name__)
s.append(")")
s.append("...")
return "".join(s)
h.append(d)
if nl:
s.append("\n")
s.append(self.indent(n))
if cls != Object:
s.append("(")
if isinstance(d, Facade):
s.append(d.__metadata__.facade)
else:
s.append(cls.__name__)
s.append(")")
s.append("{")
for item in d:
if self.exclude(d, item):
continue
item = self.unwrap(d, item)
s.append("\n")
s.append(self.indent(n + 1))
if isinstance(item[1], (list, tuple)):
s.append(item[0])
s.append("[]")
else:
s.append(item[0])
s.append(" = ")
s.append(self.process(item[1], h, n, True))
s.append("\n")
s.append(self.indent(n))
s.append("}")
h.pop()
return "".join(s)
def print_dictionary(self, d, h, n, nl=False):
"""Print complex using the specified indent (n) and newline (nl)."""
if d in h:
return "{}..."
h.append(d)
s = []
if nl:
s.append("\n")
s.append(self.indent(n))
s.append("{")
for item in d.items():
s.append("\n")
s.append(self.indent(n + 1))
if isinstance(item[1], (list, tuple)):
s.append(tostr(item[0]))
s.append("[]")
else:
s.append(tostr(item[0]))
s.append(" = ")
s.append(self.process(item[1], h, n, True))
s.append("\n")
s.append(self.indent(n))
s.append("}")
h.pop()
return "".join(s)
def print_collection(self, c, h, n):
"""Print collection using the specified indent (n) and newline (nl)."""
if c in h:
return "[]..."
h.append(c)
s = []
for item in c:
s.append("\n")
s.append(self.indent(n))
s.append(self.process(item, h, n - 2))
s.append(",")
h.pop()
return "".join(s)
def unwrap(self, d, item):
"""Translate (unwrap) using an optional wrapper function."""
try:
md = d.__metadata__
pmd = getattr(md, "__print__", None)
if pmd is None:
return item
wrappers = getattr(pmd, "wrappers", {})
fn = wrappers.get(item[0], lambda x: x)
return (item[0], fn(item[1]))
except Exception:
pass
return item
def exclude(self, d, item):
"""Check metadata for excluded items."""
try:
md = d.__metadata__
pmd = getattr(md, "__print__", None)
if pmd is None:
return False
excludes = getattr(pmd, "excludes", [])
return item[0] in excludes
except Exception:
pass
return False | PypiClean |
/africunia-0.7.1.tar.gz/africunia-0.7.1/africuniabase/account.py | import hashlib
import sys
from binascii import hexlify, unhexlify
from graphenebase.account import Address as GPHAddress
from graphenebase.account import BrainKey as GPHBrainKey
from graphenebase.account import PasswordKey as GPHPasswordKey
from graphenebase.account import PrivateKey as GPHPrivateKey
from graphenebase.account import PublicKey as GPHPublicKey
from graphenebase.account import Prefix
default_prefix = "AFCASH"
class PasswordKey(GPHPasswordKey):
"""
This class derives a private key given the account name, the role and a password.
It leverages the technology of Brainkeys and allows people to have a secure private
key by providing a passphrase only.
"""
prefix = default_prefix
class BrainKey(GPHBrainKey):
"""
Brainkey implementation similar to the graphene-ui web-wallet.
:param str brainkey: Brain Key
:param int sequence: Sequence number for consecutive keys
Keys in Graphene are derived from a seed brain key which is a string of
16 words out of a predefined dictionary with 49744 words. It is a
simple single-chain key derivation scheme that is not compatible with
BIP44 but easy to use.
Given the brain key, a private key is derived as::
privkey = SHA256(SHA512(brainkey + " " + sequence))
Incrementing the sequence number yields a new key that can be
regenerated given the brain key.
"""
prefix = default_prefix
class Address(GPHAddress):
"""
Address class.
This class serves as an address representation for Public Keys.
:param str address: Base58 encoded address (defaults to ``None``)
:param str pubkey: Base58 encoded pubkey (defaults to ``None``)
:param str prefix: Network prefix (defaults to ``AFCASH``)
Example::
Address("BTSFN9r6VYzBK8EKtMewfNbfiGCr56pHDBFi")
"""
prefix = default_prefix
class PublicKey(GPHPublicKey):
"""
This class deals with Public Keys and inherits ``Address``.
:param str pk: Base58 encoded public key
:param str prefix: Network prefix (defaults to ``AFCASH``)
Example:::
PublicKey("BTS6UtYWWs3rkZGV8JA86qrgkG6tyFksgECefKE1MiH4HkLD8PFGL")
.. note:: By default, graphene-based networks deal with **compressed**
public keys. If an **uncompressed** key is required, the
method ``unCompressed`` can be used::
PublicKey("xxxxx").unCompressed()
"""
prefix = default_prefix
class PrivateKey(GPHPrivateKey):
"""
Derives the compressed and uncompressed public keys and constructs two instances of
``PublicKey``:
:param str wif: Base58check-encoded wif key
:param str prefix: Network prefix (defaults to ``AFCASH``)
Example:::
PrivateKey("5HqUkGuo62BfcJU5vNhTXKJRXuUi9QSE6jp8C3uBJ2BVHtB8WSd")
Compressed vs. Uncompressed:
* ``PrivateKey("w-i-f").pubkey``:
Instance of ``PublicKey`` using compressed key.
* ``PrivateKey("w-i-f").pubkey.address``:
Instance of ``Address`` using compressed key.
* ``PrivateKey("w-i-f").uncompressed``:
Instance of ``PublicKey`` using uncompressed key.
* ``PrivateKey("w-i-f").uncompressed.address``:
Instance of ``Address`` using uncompressed key.
"""
prefix = default_prefix | PypiClean |
/qiskit-dell-runtime-0.1.4.tar.gz/qiskit-dell-runtime-0.1.4/doc/examples.ipynb | # Qiskit Dell Runtime Examples
## Local Execution
The following program walks through a (simple) example usage of the
Qiskit Dell Runtime in a local execution environment: i.e. potentially
using a locally installed simulator or a remote call directly from a
local machine to a remote simulator or QPU.
```
from dell_runtime import DellRuntimeProvider
from qiskit import QuantumCircuit
import logging
import requests
import time
import os
```
If the program that interacts with the simulator/QPU is small enough,
it can be stored as a string in the file that interfaces with the
provider. Both directories and files can be taken as input, as well.
```
RUNTIME_PROGRAM = """
# This code is part of qiskit-runtime.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit.compiler import transpile, schedule
def main(
backend,
user_messenger,
circuits,
**kwargs,
):
circuits = transpile(
circuits,
)
user_messenger.publish({'results': 'intermittently'}, final=False)
if not isinstance(circuits, list):
circuits = [circuits]
# Compute raw results using either simulator or QPU backend.
result = backend.run(circuits, **kwargs).result()
user_messenger.publish(result.to_dict(), final=True)
"""
RUNTIME_PROGRAM_METADATA = {
"max_execution_time": 600,
"description": "Qiskit test program"
}
PROGRAM_PREFIX = 'qiskit-test'
REMOTE_RUNTIME = os.getenv("SERVER_URL")
logging.basicConfig(level=logging.DEBUG)
```
The DellRuntimeProvider is an interface that offers a choice of runtime (local or remote). The client is able to select through this interface whether or not they would like to run their code on a remote environment
```
provider = DellRuntimeProvider()
```
The runtime is a service provided that allows clients to upload, update,
view, and run programs inside an execution environment. Since the client
has not specified a remote runtime to the provider it defaults to local.
```
program_id = provider.runtime.upload_program(RUNTIME_PROGRAM, metadata=RUNTIME_PROGRAM_METADATA)
print(f"PROGRAM ID: {program_id}")
programs = provider.runtime.pprint_programs(refresh=True)
```
The following updates the existing program with a new description - this can be done for any of the metadata fields or the program data itself, though changes to the program data are not shown in the `pprint_programs` output.
```
provider.runtime.update_program(program_id, description="IBM/Dell Updated Qiskit Runtime Program")
programs = provider.runtime.pprint_programs(refresh=True)
```
Below we use the Qiskit QuantumCircuit to create a circuit for our program to run. We then place that circuit in `program_inputs` - a dictionary of things that will be provided to our runtime program.
```
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
program_inputs = {
'circuits': qc,
}
```
Through the `provider` we are able to run an instance of our program with the inputs we have created.
When we run a job locally, a new process is started. This new process returns results to the main process via a socket connection.
```
job = provider.runtime.run(program_id, options=None, inputs=program_inputs)
```
We can obtain a job's final results and specify a timeout for how long we are willing to wait. If no timeout is specified, the function will return `None` or the final results if they are present.
```
results = job.result(timeout=60)
print(results)
```
We can also provide a callback function to the runtime for a job. A thread launched in the client process to poll for messages will call the callback when a non-final message is received.
```
def callback_function(msg):
print(f'******************\n\n\nFrom Callback Function: {msg}\n\n\n******************')
job = provider.runtime.run(program_id, inputs=program_inputs, options=None, callback=callback_function)
```
You may also specify a different backend on which you would like the quantum code to run. The default backend is the Qiskit Aer simulator.
```
program_inputs['backend_name'] = 'emulator'
job = provider.runtime.run(program_id, inputs=program_inputs, options=None, callback=callback_function)
```
## Remote Execution
The following example does mainly the same things as the local version, but establishes a connection to a remote server on which to run bundled code. The program starts identically to the local example:
```
from dell_runtime import DellRuntimeProvider
from qiskit import QuantumCircuit
import logging
import requests
import time
import os
RUNTIME_PROGRAM = """
# This code is part of qiskit-runtime.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit.compiler import transpile, schedule
def main(
backend,
user_messenger,
circuits,
**kwargs,
):
circuits = transpile(
circuits,
)
user_messenger.publish({'results': 'intermittently'}, final=False)
if not isinstance(circuits, list):
circuits = [circuits]
# Compute raw results
result = backend.run(circuits, **kwargs).result()
user_messenger.publish(result.to_dict(), final=True)
"""
RUNTIME_PROGRAM_METADATA = {
"max_execution_time": 600,
"description": "Qiskit test program"
}
PROGRAM_PREFIX = 'qiskit-test'
REMOTE_RUNTIME = os.getenv("SERVER_URL")
logging.basicConfig(level=logging.DEBUG)
```
Here we get our first difference - the `provider.remote()` call establishes a connection to our remote sever running on Kubernetes.
If SSO is not enabled on the server, the client is returned a user ID that they may save and set as an environment variable (`$QDR_ID`) to return to uploaded data.
If SSO is enabled on the server, the client will follow the usual set of SSO authentication steps (logging in using a pop-up browser window with their credentials) and the server will authenticate them using a token they send back.
```
provider = DellRuntimeProvider()
provider.remote(REMOTE_RUNTIME)
```
Uploading a program looks exactly the same as it did in the local version.
```
text_program_id = provider.runtime.upload_program(RUNTIME_PROGRAM, metadata=RUNTIME_PROGRAM_METADATA)
print(f"PROGRAM ID: {text_program_id}")
```
Printing out program metadata and updating programs also works the same as locally.
```
programs = provider.runtime.pprint_programs(refresh=True)
provider.runtime.update_program(text_program_id, description="IBM/Dell Updated Qiskit Runtime Program")
programs = provider.runtime.pprint_programs(refresh=True)
```
It is also possible to upload programs stored in files or directories. To do so, instead of providing a string containing the entire program to `provider.runtime.run()` you may provide a path to a file or directory:
```
file_program_id = provider.runtime.upload_program("qka.py", description="File Upload to Orchestrator")
dir_program_id = provider.runtime.upload_program("./qkad", description="Directory Upload to Orchestrator")
```
You'll be able to see those programs uploaded when you print out the list:
```
provider.runtime.pprint_programs(refresh=True)
```
From here we'll do the same things that we did in the local version. Set up a circuit, pass it as input to an instance of our circuit runner program, and then obtain our results:
```
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
program_inputs = {
'circuits': qc,
}
job = provider.runtime.run(text_program_id, options=None, inputs=program_inputs)
results = job.result(timeout=60)
print(results)
```
We can also do the same callback feature we saw locally and run on a backend provided on the remote server:
```
program_inputs['backend_name'] = 'emulator'
job = provider.runtime.run(text_program_id, inputs=program_inputs, options=None, callback=callback_function)
results = job.result(timeout=600)
print(results)
```
## Common Algorithms
### QKA
It is wholly possible to run a Quantum Kernel Alignment implementation on the Qiskit Dell Runtime. Below is an example that utilizes the directory upload feature (the bundle uploaded is located in `../examples/programs/qkad`) to execute an instance of QKA.
The inputs for this version are already part of the bundle, though it is possible to manipulate the files so that inputs are generated as part of the client code and provided to the bundle upon initiating a job.
```
from dell_runtime import DellRuntimeProvider
from qiskit import QuantumCircuit
import pandas as pd
from time import sleep
import os
import base64
import shutil
import json
provider = DellRuntimeProvider()
RUNTIME_PROGRAM_METADATA = {
"max_execution_time": 600,
"description": "Qiskit test program"
}
provider.remote(os.getenv("SERVER_URL"))
here = os.path.dirname(os.path.realpath(__file__))
program_id = provider.runtime.upload_program(here + "../examples/programs/qkad", metadata=RUNTIME_PROGRAM_METADATA)
job = provider.runtime.run(program_id, options=None, inputs={})
res = job.result(timeout=1000)
print(res)
```
### VQE
It is also possible to run Variational Quantum Eigensolver algorithms using the Qiskit Dell Runtime. An example of the client code is visible below (adapted from the IBM Qiskit Textbook):
Note that any inputs you need in your program can be placed inside the same dictionary - they will be contained in `kwargs` in your program's `main` function.
The Qiskit Terra implementation of the VQE algorithm also provides an opportunity to experience quantum emulation (as discussed in the [introduction](intro.md#emulation-vs-simulation)). The Terra implementation provides the `include_custom` parameter, which guarantees an ideal outcome with no shot noise (as in Qiskit's statevector simulator). This parameter can therefore be used to emulate ideal results instead of simulate shots to converge on a non-ideal outcome.
You can read more about advanced VQE options in the [Qiskit Terra documentation](https://qiskit.org/documentation/tutorials/algorithms/04_vqe_advanced.html)
```
from qiskit.opflow import Z, I
from qiskit.circuit.library import EfficientSU2
import numpy as np
from qiskit.algorithms.optimizers import SPSA
from dell_runtime import DellRuntimeProvider
import os
from time import sleep
from datetime import datetime, timedelta
num_qubits = 4
hamiltonian = (Z ^ Z) ^ (I ^ (num_qubits - 2))
target_energy = -1
# the rotation gates are chosen randomly, so we set a seed for reproducibility
ansatz = EfficientSU2(num_qubits, reps=1, entanglement='linear', insert_barriers=True)
# ansatz.draw('mpl', style='iqx')
optimizer = SPSA(maxiter=50)
np.random.seed(10) # seed for reproducibility
initial_point = np.random.random(ansatz.num_parameters)
intermediate_info = {
'nfev': [],
'parameters': [],
'energy': [],
'stddev': []
}
timestamps = []
def raw_callback(*args):
(nfev, parameters, energy, stddev) = args[0]
intermediate_info['nfev'].append(nfev)
intermediate_info['parameters'].append(parameters)
intermediate_info['energy'].append(energy)
intermediate_info['stddev'].append(stddev)
vqe_inputs = {
'ansatz': ansatz,
'operator': hamiltonian,
'optimizer': {'name': 'SPSA', 'maxiter': 15}, # let's only do a few iterations!
'initial_point': initial_point,
'measurement_error_mitigation': True,
'shots': 1024,
# Include this parameter to use the snapshot instruction and return the ideal outcome
# that has no shot noise and avoids using the statevector simulator.
# 'include_custom': True
}
provider = DellRuntimeProvider()
provider.remote(os.getenv("SERVER_URL"))
program_id = provider.runtime.upload_program("vqe.py", description="Variational Quantum Eigensolver Program")
job = provider.runtime.run(
program_id=program_id,
inputs=vqe_inputs,
options=None,
callback=raw_callback
)
print('Job ID:', job.job_id)
result = job.result()
while not result:
print('no result yet.')
sleep(0.5)
result = job.result()
print(f"Intermediate Results: {intermediate_info}")
print(f'Reached {result["optimal_value"]} after {result["optimizer_evals"]} evaluations.')
print('Available keys:', list(result.keys()))
```
| PypiClean |
/odin-ai-1.2.0.tar.gz/odin-ai-1.2.0/odin/networks/mixture_density_network.py | from __future__ import absolute_import, division, print_function
import collections
import numpy as np
import tensorflow as tf
from sklearn.mixture import GaussianMixture
from tensorflow.python import keras
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.layers import Dense
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.layers.distribution_layer import (
DistributionLambda, _get_convert_to_tensor_fn, _serialize,
_serialize_function)
from tensorflow_probability.python.layers.internal import \
distribution_tensor_coercible as dtc
from tensorflow_probability.python.layers.internal import \
tensor_tuple as tensor_tuple
__all__ = ['MixtureDensityNetwork']
_COV_TYPES = ('none', 'diag', 'full', 'tril')
class MixtureDensityNetwork(Dense):
"""A mixture of Gaussian Keras layer.
Parameters
----------
units : `int`
number of output features for each component.
n_components : `int` (default=`2`)
The number of mixture components.
covariance_type : {'none', 'diag', 'full', 'tril'}
String describing the type of covariance parameters to use.
Must be one of:
'none' (each component has its own single variance).
'diag' (each component has its own diagonal covariance matrix),
'tril' (lower triangle matrix),
'full' (each component has its own general covariance matrix),
"""
def __init__(self,
units,
n_components=2,
covariance_type='none',
convert_to_tensor_fn=tfd.Distribution.sample,
softplus_scale=True,
validate_args=False,
activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
covariance_type = str(covariance_type).lower()
assert covariance_type in _COV_TYPES, \
"No support for covariance_type: '%s', the support value are: %s" % \
(covariance_type, ', '.join(_COV_TYPES))
self._covariance_type = covariance_type
self._n_components = int(n_components)
self._validate_args = bool(validate_args)
self._convert_to_tensor_fn = _get_convert_to_tensor_fn(convert_to_tensor_fn)
self._softplus_scale = bool(softplus_scale)
# We'll need to keep track of who's calling who since the functional
# API has a different way of injecting `_keras_history` than the
# `keras.Sequential` way.
self._enter_dunder_call = False
# ====== calculating the number of parameters ====== #
if covariance_type == 'none':
component_params_size = 2 * units
elif covariance_type == 'diag': # only the diagonal
component_params_size = units + units
elif covariance_type == 'tril': # lower triangle
component_params_size = units + units * (units + 1) // 2
elif covariance_type == 'full': # full matrix
component_params_size = units + units * units
else:
raise NotImplementedError
self._component_params_size = component_params_size
params_size = self.n_components + self.n_components * component_params_size
self._event_size = units
super(MixtureDensityNetwork,
self).__init__(units=params_size,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
@property
def event_size(self):
return self._event_size
@property
def covariance_type(self):
return self._covariance_type
@property
def n_components(self):
return self._n_components
@property
def component_params_size(self):
return self._component_params_size
def __call__(self, inputs, *args, **kwargs):
self._enter_dunder_call = True
distribution, _ = super(MixtureDensityNetwork,
self).__call__(inputs, *args, **kwargs)
self._enter_dunder_call = False
return distribution
def call(self, inputs, *args, **kwargs):
dense_kwargs = dict(kwargs)
dense_kwargs.pop('training', None)
params = super(MixtureDensityNetwork, self).call(inputs, *args,
**dense_kwargs)
n_components = tf.convert_to_tensor(value=self.n_components,
name='n_components',
dtype_hint=tf.int32)
# ====== mixture weights ====== #
mixture_coefficients = params[..., :n_components]
mixture_dist = tfd.Categorical(logits=mixture_coefficients,
validate_args=self._validate_args,
name="MixtureWeights")
# ====== initialize the components ====== #
params = tf.reshape(
params[..., n_components:],
tf.concat([tf.shape(input=params)[:-1], [n_components, -1]], axis=0))
if bool(self._softplus_scale):
scale_fn = lambda x: tf.math.softplus(x) + tfd.softplus_inverse(1.0)
else:
scale_fn = lambda x: x
if self.covariance_type == 'none':
cov = 'IndependentNormal'
loc_params, scale_params = tf.split(params, 2, axis=-1)
scale_params = scale_params
components_dist = tfd.Independent(tfd.Normal(
loc=loc_params,
scale=scale_fn(scale_params),
validate_args=self._validate_args),
reinterpreted_batch_ndims=1)
#
elif self.covariance_type == 'diag':
cov = 'MultivariateNormalDiag'
loc_params, scale_params = tf.split(params, 2, axis=-1)
components_dist = tfd.MultivariateNormalDiag(
loc=loc_params,
scale_diag=scale_fn(scale_params),
validate_args=self._validate_args)
#
elif self.covariance_type == 'tril':
cov = 'MultivariateNormalTriL'
loc_params = params[..., :self.event_size]
scale_params = scale_fn(params[..., self.event_size:])
scale_tril = tfb.ScaleTriL(diag_shift=np.array(
1e-5, params.dtype.as_numpy_dtype()),
validate_args=self._validate_args)
components_dist = tfd.MultivariateNormalTriL(
loc=loc_params,
scale_tril=scale_tril(scale_params),
validate_args=self._validate_args)
#
elif self.covariance_type == 'full':
cov = 'MultivariateNormalFull'
loc_params = params[..., :self.event_size]
scale_params = tf.reshape(
scale_fn(params[..., self.event_size:]),
tf.concat(
[tf.shape(input=params)[:-1], (self.event_size, self.event_size)],
axis=0))
components_dist = tfd.MultivariateNormalFullCovariance(
loc=loc_params,
covariance_matrix=scale_params,
validate_args=self._validate_args)
else:
raise NotImplementedError
# ====== finally the mixture ====== #
d = tfd.MixtureSameFamily(mixture_distribution=mixture_dist,
components_distribution=components_dist,
validate_args=False,
name="Mixture%s" % cov)
# Wraps the distribution to return both dist and concrete value."""
value_is_seq = isinstance(d.dtype, collections.Sequence)
maybe_composite_convert_to_tensor_fn = (
(lambda d: tensor_tuple.TensorTuple(self._convert_to_tensor_fn(d)))
if value_is_seq else self._convert_to_tensor_fn)
distribution = dtc._TensorCoercible( # pylint: disable=protected-access
distribution=d,
convert_to_tensor_fn=maybe_composite_convert_to_tensor_fn)
value = distribution._value() # pylint: disable=protected-access
value._tfp_distribution = distribution # pylint: disable=protected-access
if value_is_seq:
value.shape = value[-1].shape
value.get_shape = value[-1].get_shape
value.dtype = value[-1].dtype
distribution.shape = value[-1].shape
distribution.get_shape = value[-1].get_shape
else:
distribution.shape = value.shape
distribution.get_shape = value.get_shape
if self._enter_dunder_call:
# Its critical to return both distribution and concretization
# so Keras can inject `_keras_history` to both. This is what enables
# either to be used as an input to another Keras `Model`.
return distribution, value
return distribution
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
# the number of output units is equal to event_size, not number of
# hidden units
return input_shape[:-1].concatenate(self.event_size)
def get_config(self):
"""Returns the config of this layer. """
config = {
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
'covariance_type': self._covariance_type,
'validate_args': self._validate_args,
'n_components': self._n_components,
'softplus_scale': self._softplus_scale,
}
base_config = super(MixtureDensityNetwork, self).get_config()
base_config.update(config)
return base_config | PypiClean |
/sardana-3.3.6.tar.gz/sardana-3.3.6/doc/source/devel/api/api_1D.rst | .. currentmodule:: sardana.pool.poolonedexpchannel
.. _sardana-1d-api:
=============================
1D channel API reference
=============================
A 1D represents an experimental channel which acquisition result is a spectrum
value.
A 1D has a ``state``, and a ``value`` attributes. The state indicates at any
time if the 1D is stopped, in alarm or moving. The value, indicates the
current 1D value.
The other attributes are:
data source
Unique identifier for the 1D data (value attribute)
timer
name of the timer channel (proceeding from the same controller) to be used
when the channel is acquired independently
special values:
* __default - controller's default timer
* __self - the same channel acts like a timer
* None - independent acquisition is disabled
integration time
integration time (in seconds) to be used when the channel is acquired
independently
The available operations are:
start acquisition
starts to acquire the 1D
:meth:`~PoolCounterTimer.start_acquisition`
stop
stops the 1D acquisition in an orderly fashion
abort
stops the 1D acquisition as fast as possible
release
Release hung acquisition e.g. due to the hardware controller that
got hung. You should first try stop/abort.
.. seealso::
:ref:`sardana-1d-overview`
the 1D experiment channel overview
:class:`~sardana.tango.pool.OneDExpChannel.OneDExpChannel`
the 1D experiment channel tango device :term:`API`
.. :class:`~sardana.pool.poolonedexpchannel.Pool1DExpChannel`
.. the 1D experiment channel class :term:`API`
| PypiClean |
/theatre_of_spud-0.6.0.tar.gz/theatre_of_spud-0.6.0/build/lib/tos/dlg/act03/message.rst | .. |VERSION| property:: tos.story.version
:author: D E Haynes
:made_at: 2021-02-02
:project: Theatre of Spud
:version: |VERSION|
.. entity:: PLAYER
:types: tos.mixins.types.Character
:states: tos.mixins.types.Mode.playing
.. entity:: EDWARD
:types: tos.mixins.types.Character
:states: tos.types.Motivation.leader
tos.mixins.types.Proximity.present
.. entity:: SPUD
:types: tos.mixins.types.Character
:states: tos.types.Motivation.acting
.. entity:: PHONE
:types: tos.mixins.types.Artifact
:states: tos.map.Map.Location.office
tos.mixins.types.Significance.suppress
.. entity:: STORY
:types: tos.story.Story
.. entity:: DRAMA
:types: turberfield.catchphrase.drama.Drama
.. entity:: SETTINGS
:types: turberfield.catchphrase.render.Settings
.. |EDWARD| property:: EDWARD.name
.. |PLAYER| property:: PLAYER.name
Message
=======
{0}
Danny
-----
.. condition:: STORY.bookmark.drama.messengers[0].messages[0].tags[1] Danny
[EDWARD]_
|PLAYER|, was that the telephone ringing earlier?
[PLAYER]_
That was Danny's mother. They want him to go to the football tonight instead.
[EDWARD]_
Hmm. Oh well.
We'll need someone to stand in for the Officer.
That's fine. It's only a few lines.
[PLAYER]_
What about Spud? He's already here.
[EDWARD]_
Yes, why not? Would you go and let him know please |PLAYER|?
.. property:: SPUD.state tos.mixins.types.Significance.indicate
Mikey
-----
.. condition:: STORY.bookmark.drama.messengers[0].messages[0].tags[1] Mikey
[EDWARD]_
Ah, |PLAYER|, what now?
[PLAYER]_
It's Mikey. He's going to the football as well.
[EDWARD]_
That's disappointing. Very disappointing.
Of all the people to lose. Bluntschli.
[PLAYER]_
Should we call it off do you think?
[EDWARD]_
Never. Where's Spud?
[PLAYER]_
Bluntschli and the Officer. It's a lot to ask.
[EDWARD]_
But, as 'tis, we cannot miss him.
He serves in offices that profit us.
Go now, |PLAYER| and tell him to get ready.
.. property:: SPUD.state tos.mixins.types.Significance.indicate
Hayley
------
.. condition:: STORY.bookmark.drama.messengers[0].messages[0].tags[1] Hayley
[EDWARD]_
Ah, |PLAYER|, what now?
[PLAYER]_
I know don't how how to say this, but...
[EDWARD]_
Which one is it?
[PLAYER]_
Hayley.
[EDWARD]_
Aaah! How they mock me! But I will not be defeated.
Go and warn Spud he has more lines to learn.
[PLAYER]_
Are you sure this will work?.
[EDWARD]_
It will have to work.
*Aside*
These three have robbed me.
And this demi-devil; for he's a bastard one,
has plotted with them to take my life.
.. property:: SPUD.state tos.mixins.types.Significance.indicate
| PypiClean |
/django-suit-ckeditor-custom-1.0.11.tar.gz/django-suit-ckeditor-custom-1.0.11/suit_ckeditor/static/suit-ckeditor/ckeditor/plugins/specialchar/dialogs/lang/pt-br.js | /*
Copyright (c) 2003-2021, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang("specialchar","pt-br",{euro:"Euro",lsquo:"Aspas simples esquerda",rsquo:"Aspas simples direita",ldquo:"Aspas duplas esquerda",rdquo:"Aspas duplas direita",ndash:"Traço",mdash:"Travessão",iexcl:"Ponto de exclamação invertido",cent:"Cent",pound:"Cerquilha",curren:"Dinheiro",yen:"Yen",brvbar:"Bara interrompida",sect:"Símbolo de Parágrafo",uml:"Trema",copy:"Direito de Cópia",ordf:"Indicador ordinal feminino",laquo:"Aspas duplas angulares esquerda",not:"Negação",reg:"Marca Registrada",
macr:"Mácron",deg:"Grau",sup2:"2 Superscrito",sup3:"3 Superscrito",acute:"Acento agudo",micro:"Micro",para:"Pé de mosca",middot:"Ponto mediano",cedil:"Cedilha",sup1:"1 Superscrito",ordm:"Indicador ordinal masculino",raquo:"Aspas duplas angulares direita",frac14:"Um quarto",frac12:"Um meio",frac34:"Três quartos",iquest:"Interrogação invertida",Agrave:"A maiúsculo com acento grave",Aacute:"A maiúsculo com acento agudo",Acirc:"A maiúsculo com acento circunflexo",Atilde:"A maiúsculo com til",Auml:"A maiúsculo com trema",
Aring:"A maiúsculo com anel acima",AElig:"Æ maiúsculo",Ccedil:"Ç maiúlculo",Egrave:"E maiúsculo com acento grave",Eacute:"E maiúsculo com acento agudo",Ecirc:"E maiúsculo com acento circumflexo",Euml:"E maiúsculo com trema",Igrave:"I maiúsculo com acento grave",Iacute:"I maiúsculo com acento agudo",Icirc:"I maiúsculo com acento circunflexo",Iuml:"I maiúsculo com crase",ETH:"Eth maiúsculo",Ntilde:"N maiúsculo com til",Ograve:"O maiúsculo com acento grave",Oacute:"O maiúsculo com acento agudo",Ocirc:"O maiúsculo com acento circunflexo",
Otilde:"O maiúsculo com til",Ouml:"O maiúsculo com trema",times:"Multiplicação",Oslash:"Diâmetro",Ugrave:"U maiúsculo com acento grave",Uacute:"U maiúsculo com acento agudo",Ucirc:"U maiúsculo com acento circunflexo",Uuml:"U maiúsculo com trema",Yacute:"Y maiúsculo com acento agudo",THORN:"Thorn maiúsculo",szlig:"Eszett minúsculo",agrave:"a minúsculo com acento grave",aacute:"a minúsculo com acento agudo",acirc:"a minúsculo com acento circunflexo",atilde:"a minúsculo com til",auml:"a minúsculo com trema",
aring:"a minúsculo com anel acima",aelig:"æ minúsculo",ccedil:"ç minúsculo",egrave:"e minúsculo com acento grave",eacute:"e minúsculo com acento agudo",ecirc:"e minúsculo com acento circunflexo",euml:"e minúsculo com trema",igrave:"i minúsculo com acento grave",iacute:"i minúsculo com acento agudo",icirc:"i minúsculo com acento circunflexo",iuml:"i minúsculo com trema",eth:"eth minúsculo",ntilde:"n minúsculo com til",ograve:"o minúsculo com acento grave",oacute:"o minúsculo com acento agudo",ocirc:"o minúsculo com acento circunflexo",
otilde:"o minúsculo com til",ouml:"o minúsculo com trema",divide:"Divisão",oslash:"o minúsculo com cortado ou diâmetro",ugrave:"u minúsculo com acento grave",uacute:"u minúsculo com acento agudo",ucirc:"u minúsculo com acento circunflexo",uuml:"u minúsculo com trema",yacute:"y minúsculo com acento agudo",thorn:"thorn minúsculo",yuml:"y minúsculo com trema",OElig:"Ligação tipográfica OE maiúscula",oelig:"Ligação tipográfica oe minúscula",372:"W maiúsculo com acento circunflexo",374:"Y maiúsculo com acento circunflexo",
373:"w minúsculo com acento circunflexo",375:"y minúsculo com acento circunflexo",sbquo:"Aspas simples inferior direita",8219:"Aspas simples superior esquerda",bdquo:"Aspas duplas inferior direita",hellip:"Reticências",trade:"Trade mark",9658:"Ponta de seta preta para direita",bull:"Ponto lista",rarr:"Seta para direita",rArr:"Seta dupla para direita",hArr:"Seta dupla direita e esquerda",diams:"Ouros",asymp:"Aproximadamente"}); | PypiClean |
/SQLAlchemy_mmeyer724-1.4.0b1-cp27-cp27m-win_amd64.whl/SQLAlchemy_mmeyer724-1.4.0b1.data/purelib/sqlalchemy/dialects/sqlite/json.py | from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. seealso::
JSON1_
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
) | PypiClean |
/qinling-5.0.0.tar.gz/qinling-5.0.0/doc/source/contributor/contributing.rst | ============================
So You Want to Contribute...
============================
For general information on contributing to OpenStack, please check out the
`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
It covers all the basics that are common to all OpenStack projects: the accounts
you need, the basics of interacting with our Gerrit review system, how we
communicate as a community, etc.
Below will cover the more project specific information you need to get started
with Qinling.
Communication
~~~~~~~~~~~~~~
.. This would be a good place to put the channel you chat in as a project; when/
where your meeting is, the tags you prepend to your ML threads, etc.
- IRC channel: #openstack-qinling
- Mailing list's prefix: [qinling]
- Currently, we don't have team meeting given we have a small group of core
reviewers and their timezones, the situation may change in the future.
Contacting the Core Team
~~~~~~~~~~~~~~~~~~~~~~~~~
.. This section should list the core team, their irc nicks, emails, timezones etc. If
all this info is maintained elsewhere (i.e. a wiki), you can link to that instead of
enumerating everyone here.
The list of current Qinling core reviewers is available on `gerrit
<https://review.opendev.org/#/admin/groups/1765,members>`_.
New Feature Planning
~~~~~~~~~~~~~~~~~~~~
.. This section is for talking about the process to get a new feature in. Some
projects use blueprints, some want specs, some want both! Some projects
stick to a strict schedule when selecting what new features will be reviewed
for a release.
Qinling doesn't use launchpad or separate specs repo for feature requirement.
You only need to create a task in `Storyboard
<https://storyboard.openstack.org/#!/project/openstack/qinling>`_.
Task Tracking
~~~~~~~~~~~~~~
.. This section is about where you track tasks- launchpad? storyboard? is there more
than one launchpad project? what's the name of the project group in storyboard?
We track our tasks in `Storyboard
<https://storyboard.openstack.org/#!/project/openstack/qinling>`_
If you're looking for some smaller, easier work item to pick up and get started
on, search for the 'low-hanging-fruit' tag.
Reporting a Bug
~~~~~~~~~~~~~~~
.. Pretty self explanatory section, link directly to where people should report bugs for
your project.
You found an issue and want to make sure we are aware of it? You can do so
on `Storyboard <https://storyboard.openstack.org/#!/project/openstack/qinling>`_.
Getting Your Patch Merged
~~~~~~~~~~~~~~~~~~~~~~~~~
.. This section should have info about what it takes to get something merged. Do
you require one or two +2's before +W? Do some of your repos require unit test
changes with all patches? etc.
Due to the small number of core reviewers of the Qinling project, we only need
one +2 before ``Workflow +1``.
Project Team Lead Duties
------------------------
.. this section is where you can put PTL specific duties not already listed in
the common PTL guide (linked below) or if you already have them written
up elsewhere, you can link to that doc here.
All common PTL duties are enumerated here in the `PTL guide
<https://docs.openstack.org/project-team-guide/ptl.html>`_. | PypiClean |
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/an-xiao-yao/13-安小妖《好男人,你也要学会撩妹》:151-200:173.框架如何维持?我来教你.md | # 13-安小妖《好男人,你也要学会撩妹》:151-200:173.框架如何维持?我来教你
教寗最巧妙的说话方式,征服女人的内心世界。,我是夏洛,欢迎大家点击进度调下方的订阅案内,订阅我的专辑。,我们之前经常提到一个词叫做框架,,意思呢就是说,我们跟女生相处的过程中要有自己的态度,。
不要任何事情都被女生带节奏,尤其是你的情绪,你的状态,,不要轻而易举的收到女生的影响。,我们应该更多的去引导女生来顺从我们的态度与安排,,那么就会有很多男生来问我了。,我该怎么去维持自己的框架呢?。
我的想法经常得不到女生的认同,怎么办?,最简单的,我想简单,我想带她出去玩,,对吧,她总说要学习,要陪朋友,这个时候我该怎么办?,那其实这是一个非常好的问题,怎么维持框架?,难道。
仅仅把维持框架理解为一种引人,一种控制,,和一种伟众的冷淡吗?,她不听我的话,不出来见我,我明明已经想打人的心都有了,,但是呢,却要在她面前装作很平和冷静,对吧?装作不在乎不care?。
等她找我的时候呢,我有刻意地不理她,这就是维持框架吗?,这样理解框架其实未面有一点简单,所以我今天要跟大家讲讲框架的本质是什么?,这个问题呢,我在和一些同行聊天的时候,会经常去聊,。
那么我们聊的越多之后呢,慢慢就得出了一个一致的答案,框架是什么?,框架其实就是你的自身建设,你的外形,卓装得体,生活细节很讲究,,你自己的认同感非常强了,你自然而然就有框架了,为什么?,我有一个朋友呢。
就特别能够说明这样一点,,他既不是我的同行,也不是我的学生,他就是一个自己非常有想法的人,,他跟他上一任女朋友分手的时候呢,要死要活的,,天天找我喝酒哭素,说自己多爱他,都离不开他,对他多好,。
可是那个女生呢,就是不爱他了,,那其实他那个前女友在我们几个朋友看来也就是一般人吧,,也没说拿点特别突出,长相还行,家庭还行,教育背景呢,还行,,那我这哥们就难受了一段时间,眼泪也流干了。
终于慢慢好转了,,后来呢也就各忙各的,见得少了一点,,偶尔再碰我圈呢,给点个赞评论什么的,,结果今天5一的时候,我们一起去下门玩,见了一面,,才发现这小子变化真的是太大了,,原来呢还有点微胖。
结果5一见他的时候,穿一个紧身的小气序,,然后外面大的一件开山,就一下子变得特别有型,,而且一看就状态特别好的样子,,整个人航手挺凶的,然后带一个默契的鞋,还挺高了,,以前我们一起拍照呢,他都是站旁边。
,那这会在拍照,我们都不自觉的站到他旁边去了,,因为突然觉得这小子跟开的光是的,特别有光滑,,我们还问他,你小子怎么变化这么大,,不显山不漏水的,就把我们都给经验到了,,他怎么说呢,他说他自己上次分手。
一直特别不爽,,有一天自己终于想通了,,觉得没必要把自己搞得那么狼狈,,为什么不对自己好一点,把自己变成最好的样子,,这样就不怕被人抛弃了,那么多人喜欢我呢,,哎,唯一听人家这个心态确实就不一样了,。
由一个整天觉得自己被人看不起的弱者心态,,就变成了一个强者的心态了,,结果呢,这人呢,装态一变好,,胆子就变大了,我们在下门玩的五天,,他第二天晚上就在海边搭上了一个女生,,然后第三天呢,就拖团了。
告诉我们他谈恋爱了,,你们自己玩吧,这个你看,就了不得了啊,,结果呢到现在,人家还在谈着呢,,虽然一个在天津一个在山东,,但是一地点谈得还挺稳,一直没听说有什么问题,,那天我跟他聊天还跟他说。
你这个厉害了,,虽然有偶然性,对吧,但是在很拉程度上呢,,其实也是一种必然,我得把你的这个故事啊,,想给我的听证们来听一听,,你看,这个就是我今天要说的框架,,他就是自身建设,你自己强了,。
你这个气势就不一样了,,财大,你自然就起初了,,有名气了呢,你自然就赶给别人去分享你的经验,,赶去给别人上课了,你自己外修其身那种齐心了,,你的综合分数上去了,,那自然就开始有女生,往你身边湊了,。
你自然也就不怕失去这个失去那个了,,所以呢,咱们今天要讲,,几个提升自己的好习惯,首先第一个是什么呢,,就是你要养成嫉妒自己生活的习惯,,你是写日记啊,还是写电子文党啊,都一样,。
你要经常去复盘自己的生活,,把自己管理好,,每天时间呢都用在哪儿的,都干嘛了,,有没有让自己随着时间的流失而增值啊,,你要给自己设定目标,让自己能够去量化你的生活,,看得到自己的成长,。
不能说让自己随着时间呢,不断贬值,,越来越丑漏了,越来越无趣了,,越来越因安暴躁了,这肯定是不行的,,你这样就把生活给过费了,,所以呢你要怎么样,你要把自己管理好,,人的这个自我管理能力啊。
这个记录性啊,,是很难的一件事,,你稍微不注意呢,就把自己给黄费了,,所以呢一定要养成记录和复盘的这个习惯,,那第二个习惯是什么呢,就是你要多表达,,人马都有荒难的时候,,都有对自己擺办不满的时候,。
这种情况你千万不要去自己承受,,因为你自己根本承受不了,,你会失控,你会选择逃避,,所以呢你一定要即时地去跟朋友跟父母来溝通,,溝通的越深越贬烦,,你恢复常态的可能性才越高,,为什么。
因为人在遇到苦难平行的时候,,需要理性和情形是一方面,,但是更需要的是关怀和溝通,,但是更重要的是需要关怀和温暖,,需要陪伴和爱,,所以不要独自承受,,一定要去轻素出来,,不要怕麻烦别人,。
谁还没个优域的日子,,互相照顿这点儿,,托付这点儿,,也不要怕别人不懂你,,你多溝通别人才能更多地去了解你,,那第三个是什么呢,,就是要即时地直行自己的想法,,想到什么一定要马上做,。
人都有一个毛病是什么呢,,就是完美主义,,你一件事儿拖着,,你今天的这个气场就不顺,,你别的事儿就做不好,,你要是想起一个事儿就做完一件事儿呢,,你就有一种很好的正反馈,,你觉得这件事情我做得很出色,。
很有成就感,,然后有一种很舒服的感觉,,觉得整个人神清气爽,,然后劲儿很顺,,然后你就会想,,不能破坏这种很完美的感觉,,你就会努力地把下一件事儿也做好,,所以呢,。
你要去养成这种高小直行自己大脑里面的想法的这种习惯,,好了,,我们今天的内容就先讲到这里,,咱们今儿聊的这些,,说白了就是让大家都过得高大上一点,,过得体面一点,,然后你就发现你几百的人群中80%的人。
,到那个时候,你自然而然就长得稀有的真贵物品,,就不用每天去黏着别人了,,别人会来争奪你。,这个邏迹呢,你听着就很舒服、很提气,,对不对?,所以好好地把自己的生活管理起来,,我们下期节目见,。
如果你喜欢我的内容呢,,欢迎你点击进度调下方的订阅按钮,,订阅我的专辑,,我是下落,拜拜。 | PypiClean |
/PyStaticConfiguration-0.11.1-py3-none-any.whl/staticconf/config.py | from collections import namedtuple
import hashlib
import logging
import os
import sys
import time
import weakref
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
from staticconf import errors
from staticconf.proxy import ValueProxy
from staticconf.proxy import UndefToken
from staticconf.validation import Validator
if sys.version_info >= (3, 10):
from typing import Protocol
else:
from typing_extensions import Protocol
log = logging.getLogger(__name__)
# Name for the default namespace
DEFAULT = 'DEFAULT'
def remove_by_keys(
dictionary: Dict[str, Any],
keys: Set[str]
) -> List[Tuple[str, Any]]:
def filter_by_keys(item: Tuple[str, Any]) -> bool:
k, _ = item
return k not in keys
return list(filter(filter_by_keys, dictionary.items()))
class ConfigMap:
"""A ConfigMap can be used to wrap a dictionary in your configuration.
It will allow you to retain your mapping structure (and prevent it
from being flattened).
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.data = dict(*args, **kwargs)
def __getitem__(self, item: str) -> Any:
return self.data[item]
def get(self, item: str, default: Any = None) -> Any:
return self.data.get(item, default)
def __contains__(self, item: str) -> bool:
return item in self.data
def __len__(self) -> int:
return len(self.data)
class ConfigNamespace:
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name: str) -> None:
self.name = name
self.configuration_values: Dict[str, Any] = {}
self.value_proxies: weakref.WeakValueDictionary[int, ValueProxy] = \
weakref.WeakValueDictionary()
def get_name(self) -> str:
return self.name
def get_value_proxies(self) -> List[ValueProxy]:
return list(self.value_proxies.values())
def register_proxy(self, proxy: ValueProxy) -> None:
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data: Dict[str, Any],
error_on_unknown: bool,
error_on_dupe: bool,
log_keys_only: bool = False,
) -> None:
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args: Any, **kwargs: Any) -> None:
self.configuration_values.update(*args, **kwargs)
def get_config_values(self) -> Dict[str, Any]:
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_config_dict(self) -> Dict[str, Any]:
"""Reconstruct the nested structure of this object's configuration
and return it as a dict.
"""
config_dict: Dict[str, Any] = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
def get_known_keys(self) -> Set[str]:
return {vproxy.config_key for vproxy in self.get_value_proxies()}
def validate_keys(
self,
config_data: Dict[str, Any],
error_on_unknown: bool,
log_keys_only: bool = False,
) -> None:
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown_keys = [k for k, _ in unknown]
msg = f"Unexpected value in {self.name} configuration: {unknown_keys}"
else:
msg = f"Unexpected value in {self.name} configuration: {unknown}"
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(
self,
config_data: Dict[str, Any], error_on_duplicate: bool
) -> bool:
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item: str, default: Any = None) -> Any:
return self.configuration_values.get(item, default)
def __getitem__(self, item: str) -> Any:
return self.configuration_values[item]
def __setitem__(self, key: str, value: Any) -> None:
self.configuration_values[key] = value
def __contains__(self, item: str) -> bool:
return item in self.configuration_values
def clear(self) -> None:
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self) -> None:
self.clear()
self.value_proxies.clear()
def __str__(self) -> str:
return f"{type(self).__name__}({self.name})"
configuration_namespaces = {DEFAULT: ConfigNamespace(DEFAULT)}
KeyDescription = namedtuple('KeyDescription', 'name validator default help')
def get_namespaces_from_names(
name: str,
all_names: bool
) -> Iterator[ConfigNamespace]:
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name)
def get_namespace(name: str) -> ConfigNamespace:
"""Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist.
"""
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name]
def reload(name: str = DEFAULT, all_names: bool = False) -> None:
"""Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
"""
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset()
def validate(name: str = DEFAULT, all_names: bool = False) -> None:
"""Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean
"""
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
class ConfigHelp:
"""Register and display help messages about config keys."""
def __init__(self) -> None:
self.descriptions: Dict[str, List[KeyDescription]] = {}
def add(
self,
name: str,
validator: Validator,
default: Any,
namespace: str,
help: Optional[str]
) -> None:
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def view_help(self) -> str:
"""Return a help message describing all the statically configured keys.
"""
def format_desc(desc: KeyDescription) -> str:
return "{} (Type: {}, Default: {})\n{}".format(
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key: str, desc_list: List[KeyDescription]) -> str:
return "\nNamespace: {}\n{}".format(
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item: Tuple[str, Any]) -> str:
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(self.descriptions.items(),
key=namespace_cmp))
def clear(self) -> None:
self.descriptions.clear()
config_help = ConfigHelp()
view_help = config_help.view_help
def _reset() -> None:
"""Used for internal testing."""
for namespace in configuration_namespaces.values():
namespace._reset()
config_help.clear()
def has_duplicate_keys(
config_data: Dict[str, Any],
base_conf: Dict[str, Any],
raise_error: bool
) -> bool:
"""Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True."""
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return False
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
class ConfigurationWatcher:
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader: Callable[[], Dict[str, Any]],
filenames: Union[str, List[str]],
min_interval: float = 0,
reloader: Optional["Reloader"] = None,
comparators: Optional[List[Type["IComparator"]]] = None,
) -> None:
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames: Union[str, List[str]]) -> List[str]:
if isinstance(filenames, str):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self) -> bool:
return self.last_check + self.min_interval <= time.time()
def reload_if_changed(self, force: bool = False) -> Optional[Dict[str, Any]]:
"""If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
"""
if (force or self.should_check) and self.file_modified():
return self.reload()
return None
def file_modified(self) -> bool:
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self) -> Dict[str, Any]:
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self) -> "Reloader":
return self.reloader
def load_config(self) -> Dict[str, Any]:
return self.config_loader()
class IComparator:
"""Interface for a comparator which is used by :class:`ConfigurationWatcher`
to determine if a file has been modified since the last check. A comparator
is used to reduce the work required to reload configuration. Comparators
should implement a mechanism that is relatively efficient (and scalable),
so it can be performed frequently.
:param filenames: A list of absolute paths to configuration files.
"""
def __init__(self, filenames: List[str]) -> None:
pass
def has_changed(self) -> bool:
"""Returns True if any of the files have been modified since the last
call to :func:`has_changed`. Returns False otherwise.
"""
pass
class InodeComparator(IComparator):
"""Compare files by inode and device number. This is a good comparator to
use when your files can change multiple times per second.
"""
def __init__(self, filenames: List[str]) -> None:
self.filenames = filenames
self.inodes = self.get_inodes()
def get_inodes(self) -> List[Tuple[int, int]]:
def get_inode(stbuf: os.stat_result) -> Tuple[int, int]:
return stbuf.st_dev, stbuf.st_ino
return [get_inode(os.stat(filename)) for filename in self.filenames]
def has_changed(self) -> bool:
last_inodes, self.inodes = self.inodes, self.get_inodes()
return last_inodes != self.inodes
def build_compare_func(
err_logger: Optional[Callable[[str], None]] = None
) -> Callable[[str], float]:
"""Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger."""
def compare_func(filename: str) -> float:
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func
class MTimeComparator(IComparator):
"""Compare files by modified time, or using compare_func,
if it is not None.
.. note::
Most filesystems only store modified time with second grangularity
so multiple changes within the same second can be ignored.
"""
def __init__(
self,
filenames: List[str],
compare_func: Optional[Callable[[str], bool]] = None
) -> None:
self.compare_func = (os.path.getmtime if compare_func is None
else compare_func)
self.filenames_mtimes = {
filename: self.compare_func(filename) for filename in filenames
}
def has_changed(self) -> bool:
for filename, compare_val in self.filenames_mtimes.items():
current_compare_val = self.compare_func(filename)
if compare_val != current_compare_val:
self.filenames_mtimes[filename] = current_compare_val
return True
return False
class MD5Comparator(IComparator):
"""Compare files by md5 hash of their contents. This comparator will be
slower for larger files, but is more resilient to modifications which only
change mtime, but not the files contents.
"""
def __init__(self, filenames: str) -> None:
self.filenames = filenames
self.hashes = self.get_hashes()
def get_hashes(self) -> List[bytes]:
def build_hash(filename: str) -> bytes:
hasher = hashlib.md5()
with open(filename, 'rb') as fh:
hasher.update(fh.read())
return hasher.digest()
return [build_hash(filename) for filename in self.filenames]
def has_changed(self) -> bool:
last_hashes, self.hashes = self.hashes, self.get_hashes()
return last_hashes != self.hashes
class Reloader(Protocol):
def __init__(
self,
namespace: str = DEFAULT,
all_names: bool = False,
callbacks: List[Tuple[str, Callable[[], None]]] = None,
):
...
def add(self, identifier: str, callback: Callable[[], None]) -> None:
...
def remove(self, identifier: str) -> None:
...
def __call__(self) -> None:
...
class ReloadCallbackChain:
"""A chain of callbacks which will be triggered after configuration is
reloaded. Designed to work with :class:`ConfigurationWatcher`.
When this class is called it performs two operations:
* calls :func:`reload` on the `namespace`
* calls all attached callbacks
Usage:
.. code-block:: python
chain = ReloadCallbackChain()
chain.add('some_id', callback_foo)
chain.add('other_id', other_callback)
...
# some time later
chain.remove('some_id')
:param namespace: the name of the namespace to :func:`reload`
:param all_names: if True :func:`reload` all namespaces and ignore the
`namespace` param. Defaults to False
:param callbacks: initial list of tuples to add to the callback chain
"""
def __init__(
self,
namespace: str = DEFAULT,
all_names: bool = False,
callbacks: List[Tuple[str, Callable[[], None]]] = None,
):
self.namespace = namespace
self.all_names = all_names
self.callbacks = dict(callbacks or ())
def add(self, identifier: str, callback: Callable[[], None]) -> None:
self.callbacks[identifier] = callback
def remove(self, identifier: str) -> None:
del self.callbacks[identifier]
def __call__(self) -> None:
reload(name=self.namespace, all_names=self.all_names)
for callback in self.callbacks.values():
callback()
class LoadFunc(Protocol):
def __call__(self, filename: str, namespace: str) -> Dict[str, Any]:
...
def build_loader_callable(
load_func: LoadFunc,
filename: str,
namespace: str
) -> Callable[[], Dict[str, Any]]:
def load_configuration() -> Dict[str, Any]:
get_namespace(namespace).clear()
return load_func(filename, namespace=namespace)
return load_configuration
class ConfigFacade:
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher: ConfigurationWatcher) -> None:
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def load(
cls,
filename: str,
namespace: str,
loader_func: LoadFunc,
min_interval: float = 0,
comparators: Optional[List[Type[IComparator]]] = None,
) -> "ConfigFacade":
"""Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
"""
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
def add_callback(self, identifier: str, callback: Callable[[], None]) -> None:
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force: bool = False) -> None:
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
class ConfigGetValue(Protocol):
def __call__(
self,
key_name: str,
default: Any = UndefToken,
help_or_namespace: Optional[str] = None,
namespace_or_unused: Optional[str] = None
) -> ValueProxy:
...
class NameFactory(Protocol):
@staticmethod
def get_name(name: str) -> str:
...
@staticmethod
def get_list_of_name(validator_name: str) -> str:
... | PypiClean |
/masstransitpython_byQ96-0.0.5-py3-none-any.whl/masstransitpython/RabbitMQReceiver.py | from pika import BlockingConnection
from pika import ConnectionParameters
import logging
class MetaClass(type):
_instance = {}
def __call__(cls, *args, **kwargs):
""" Singleton Pattern """
if cls not in cls._instance:
cls._instance[cls] = super(MetaClass, cls).__call__(*args, **kwargs)
return cls._instance[cls]
class RabbitMQReceiver(metaclass=MetaClass):
__slots__ = ["_configuration", "_connection", "_channel", "_queue", "_routing_key", "_exchange",
"_on_message_callback"]
def __init__(self, configuration, exchange, routing_key=''):
"""
Create RabbitMQ Sender
:param configuration: RabbitMQConfiguration object
"""
self._configuration = configuration
self._connection = BlockingConnection(ConnectionParameters(host=self._configuration.host,
port=self._configuration.port,
virtual_host=self._configuration.virtual_host,
credentials=self._configuration.credentials))
self._channel = self._connection.channel()
self._queue = self._configuration.queue
self._routing_key = routing_key
self._exchange = exchange
self._channel.queue_declare(queue=self._queue)
self._channel.exchange_declare(exchange=exchange,
exchange_type='fanout',
durable=True)
self._channel.queue_bind(queue=self._queue,
exchange=self._exchange,
routing_key=self._routing_key)
self._on_message_callback = None
def add_on_message_callback(self, on_message_callback):
"""
Add function callback
:param self:
:param on_message_callback: function where the message is consumed
:return: None
"""
self._on_message_callback = on_message_callback
def start_consuming(self):
""" Start consumer with earlier defined callback """
logging.info(f"Listening to {self._queue} queue\n")
self._channel.basic_consume(queue=self._queue,
on_message_callback=self._on_message_callback,
auto_ack=True)
self._channel.start_consuming() | PypiClean |
/Modhex-0.1.tar.gz/Modhex-0.1/README.txt | modhex
======
The Yubikey is a one-time password device that acts as a USB
keyboard, emitting a unique sequence of keycodes each time the button
is pressed. These codes produce different characters depending on
your keyboard layout. This can be frustrating if your keyboard layout
is incompatible with the Yubico server.
`modhex.translate(otp)` compares the unicode output of a Yubikey
against the characters the Yubikey would emit in a variety of keyboard
layouts, returning the set of possible translations. In the likely
case `len(set(otp)) == 16`, almost every keyboard layout has an
unambigous translation into Yubico-compatible modhex, what the
Yubikey types under the QWERTY keyboard layout.
>>> import modhex
>>> modhex.translate(u"jjjjjjjjnhe.ngcgjeiuujjjdtgihjuecyixinxunkhj")
set([u'ccccccccljdeluiucdgffccchkugjcfditgbglbflvjc'])
>>> modhex.translate(u"jjjjjjjjnhe.ngcgjeiuujjjdtgihjuecyixinxunkhj",
... modhex.HEX)
set([u'00000000a823ae7e0254400069e580427d515a14af80'])
| PypiClean |
/PGPy-0.6.0.tar.gz/PGPy-0.6.0/pgpy/constants.py | import bz2
import hashlib
import imghdr
import os
import zlib
import warnings
from collections import namedtuple
from enum import Enum
from enum import IntEnum
from enum import IntFlag
from pyasn1.type.univ import ObjectIdentifier
from cryptography.hazmat.backends import openssl
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.ciphers import algorithms
from .types import FlagEnum
from .decorators import classproperty
from ._curves import BrainpoolP256R1, BrainpoolP384R1, BrainpoolP512R1, X25519, Ed25519
__all__ = [
'Backend',
'EllipticCurveOID',
'ECPointFormat',
'PacketTag',
'SymmetricKeyAlgorithm',
'PubKeyAlgorithm',
'CompressionAlgorithm',
'HashAlgorithm',
'RevocationReason',
'ImageEncoding',
'SignatureType',
'KeyServerPreferences',
'S2KGNUExtension',
'SecurityIssues',
'String2KeyType',
'TrustLevel',
'KeyFlags',
'Features',
'RevocationKeyClass',
'NotationDataFlags',
'TrustFlags',
]
# this is 50 KiB
_hashtunedata = bytearray([10, 11, 12, 13, 14, 15, 16, 17] * 128 * 50)
class Backend(Enum):
OpenSSL = openssl.backend
class EllipticCurveOID(Enum):
"""OIDs for supported elliptic curves."""
# these are specified as:
# id = (oid, curve)
Invalid = ('', )
#: DJB's fast elliptic curve
Curve25519 = ('1.3.6.1.4.1.3029.1.5.1', X25519)
#: Twisted Edwards variant of Curve25519
Ed25519 = ('1.3.6.1.4.1.11591.15.1', Ed25519)
#: NIST P-256, also known as SECG curve secp256r1
NIST_P256 = ('1.2.840.10045.3.1.7', ec.SECP256R1)
#: NIST P-384, also known as SECG curve secp384r1
NIST_P384 = ('1.3.132.0.34', ec.SECP384R1)
#: NIST P-521, also known as SECG curve secp521r1
NIST_P521 = ('1.3.132.0.35', ec.SECP521R1)
#: Brainpool Standard Curve, 256-bit
#:
#: .. note::
#: Requires OpenSSL >= 1.0.2
Brainpool_P256 = ('1.3.36.3.3.2.8.1.1.7', BrainpoolP256R1)
#: Brainpool Standard Curve, 384-bit
#:
#: .. note::
#: Requires OpenSSL >= 1.0.2
Brainpool_P384 = ('1.3.36.3.3.2.8.1.1.11', BrainpoolP384R1)
#: Brainpool Standard Curve, 512-bit
#:
#: .. note::
#: Requires OpenSSL >= 1.0.2
Brainpool_P512 = ('1.3.36.3.3.2.8.1.1.13', BrainpoolP512R1)
#: SECG curve secp256k1
SECP256K1 = ('1.3.132.0.10', ec.SECP256K1)
def __new__(cls, oid, curve=None):
# preprocessing stage for enum members:
# - set enum_member.value to ObjectIdentifier(oid)
# - if curve is not None and curve.name is in ec._CURVE_TYPES, set enum_member.curve to curve
# - otherwise, set enum_member.curve to None
obj = object.__new__(cls)
obj._value_ = ObjectIdentifier(oid)
obj.curve = None
if curve is not None and curve.name in ec._CURVE_TYPES:
obj.curve = curve
return obj
@property
def can_gen(self):
return self.curve is not None
@property
def key_size(self):
if self.curve is not None:
return self.curve.key_size
@property
def kdf_halg(self):
# return the hash algorithm to specify in the KDF fields when generating a key
algs = {256: HashAlgorithm.SHA256,
384: HashAlgorithm.SHA384,
512: HashAlgorithm.SHA512,
521: HashAlgorithm.SHA512}
return algs.get(self.key_size, None)
@property
def kek_alg(self):
# return the AES algorithm to specify in the KDF fields when generating a key
algs = {256: SymmetricKeyAlgorithm.AES128,
384: SymmetricKeyAlgorithm.AES192,
512: SymmetricKeyAlgorithm.AES256,
521: SymmetricKeyAlgorithm.AES256}
return algs.get(self.key_size, None)
class ECPointFormat(IntEnum):
# https://tools.ietf.org/html/draft-ietf-openpgp-rfc4880bis-07#appendix-B
Standard = 0x04
Native = 0x40
OnlyX = 0x41
OnlyY = 0x42
class PacketTag(IntEnum):
Invalid = 0
PublicKeyEncryptedSessionKey = 1
Signature = 2
SymmetricKeyEncryptedSessionKey = 3
OnePassSignature = 4
SecretKey = 5
PublicKey = 6
SecretSubKey = 7
CompressedData = 8
SymmetricallyEncryptedData = 9
Marker = 10
LiteralData = 11
Trust = 12
UserID = 13
PublicSubKey = 14
UserAttribute = 17
SymmetricallyEncryptedIntegrityProtectedData = 18
ModificationDetectionCode = 19
class SymmetricKeyAlgorithm(IntEnum):
"""Supported symmetric key algorithms."""
Plaintext = 0x00
#: .. warning:: IDEA is insecure. PGPy only allows it to be used for decryption, not encryption!
IDEA = 0x01
#: Triple-DES with 168-bit key derived from 192
TripleDES = 0x02
#: CAST5 (or CAST-128) with 128-bit key
CAST5 = 0x03
#: Blowfish with 128-bit key and 16 rounds
Blowfish = 0x04
#: AES with 128-bit key
AES128 = 0x07
#: AES with 192-bit key
AES192 = 0x08
#: AES with 256-bit key
AES256 = 0x09
# Twofish with 256-bit key - not currently supported
Twofish256 = 0x0A
#: Camellia with 128-bit key
Camellia128 = 0x0B
#: Camellia with 192-bit key
Camellia192 = 0x0C
#: Camellia with 256-bit key
Camellia256 = 0x0D
@property
def cipher(self):
bs = {SymmetricKeyAlgorithm.IDEA: algorithms.IDEA,
SymmetricKeyAlgorithm.TripleDES: algorithms.TripleDES,
SymmetricKeyAlgorithm.CAST5: algorithms.CAST5,
SymmetricKeyAlgorithm.Blowfish: algorithms.Blowfish,
SymmetricKeyAlgorithm.AES128: algorithms.AES,
SymmetricKeyAlgorithm.AES192: algorithms.AES,
SymmetricKeyAlgorithm.AES256: algorithms.AES,
SymmetricKeyAlgorithm.Twofish256: namedtuple('Twofish256', ['block_size'])(block_size=128),
SymmetricKeyAlgorithm.Camellia128: algorithms.Camellia,
SymmetricKeyAlgorithm.Camellia192: algorithms.Camellia,
SymmetricKeyAlgorithm.Camellia256: algorithms.Camellia}
if self in bs:
return bs[self]
raise NotImplementedError(repr(self))
@property
def is_supported(self):
return callable(self.cipher)
@property
def is_insecure(self):
insecure_ciphers = {SymmetricKeyAlgorithm.IDEA}
return self in insecure_ciphers
@property
def block_size(self):
return self.cipher.block_size
@property
def key_size(self):
ks = {SymmetricKeyAlgorithm.IDEA: 128,
SymmetricKeyAlgorithm.TripleDES: 192,
SymmetricKeyAlgorithm.CAST5: 128,
SymmetricKeyAlgorithm.Blowfish: 128,
SymmetricKeyAlgorithm.AES128: 128,
SymmetricKeyAlgorithm.AES192: 192,
SymmetricKeyAlgorithm.AES256: 256,
SymmetricKeyAlgorithm.Twofish256: 256,
SymmetricKeyAlgorithm.Camellia128: 128,
SymmetricKeyAlgorithm.Camellia192: 192,
SymmetricKeyAlgorithm.Camellia256: 256}
if self in ks:
return ks[self]
raise NotImplementedError(repr(self))
def gen_iv(self):
return os.urandom(self.block_size // 8)
def gen_key(self):
return os.urandom(self.key_size // 8)
class PubKeyAlgorithm(IntEnum):
"""Supported public key algorithms."""
Invalid = 0x00
#: Signifies that a key is an RSA key.
RSAEncryptOrSign = 0x01
RSAEncrypt = 0x02 # deprecated
RSASign = 0x03 # deprecated
#: Signifies that a key is an ElGamal key.
ElGamal = 0x10
#: Signifies that a key is a DSA key.
DSA = 0x11
#: Signifies that a key is an ECDH key.
ECDH = 0x12
#: Signifies that a key is an ECDSA key.
ECDSA = 0x13
FormerlyElGamalEncryptOrSign = 0x14 # deprecated - do not generate
DiffieHellman = 0x15 # X9.42
EdDSA = 0x16 # https://tools.ietf.org/html/draft-koch-eddsa-for-openpgp-04
@property
def can_gen(self):
return self in {PubKeyAlgorithm.RSAEncryptOrSign,
PubKeyAlgorithm.DSA,
PubKeyAlgorithm.ECDSA,
PubKeyAlgorithm.ECDH,
PubKeyAlgorithm.EdDSA}
@property
def can_encrypt(self): # pragma: no cover
return self in {PubKeyAlgorithm.RSAEncryptOrSign, PubKeyAlgorithm.ElGamal, PubKeyAlgorithm.ECDH}
@property
def can_sign(self):
return self in {PubKeyAlgorithm.RSAEncryptOrSign, PubKeyAlgorithm.DSA, PubKeyAlgorithm.ECDSA, PubKeyAlgorithm.EdDSA}
@property
def deprecated(self):
return self in {PubKeyAlgorithm.RSAEncrypt,
PubKeyAlgorithm.RSASign,
PubKeyAlgorithm.FormerlyElGamalEncryptOrSign}
def validate_params(self, size):
min_size = MINIMUM_ASYMMETRIC_KEY_LENGTHS.get(self)
if min_size is not None:
if isinstance(min_size, set):
# ECC
curve = size
safe_curves = min_size
if curve in safe_curves:
return SecurityIssues.OK
else:
return SecurityIssues.InsecureCurve
else:
# not ECC
if size >= min_size:
return SecurityIssues.OK
else:
return SecurityIssues.AsymmetricKeyLengthIsTooShort
# min_size is None
return SecurityIssues.BrokenAsymmetricFunc
class CompressionAlgorithm(IntEnum):
"""Supported compression algorithms."""
#: No compression
Uncompressed = 0x00
#: ZIP DEFLATE
ZIP = 0x01
#: ZIP DEFLATE with zlib headers
ZLIB = 0x02
#: Bzip2
BZ2 = 0x03
def compress(self, data):
if self is CompressionAlgorithm.Uncompressed:
return data
if self is CompressionAlgorithm.ZIP:
return zlib.compress(data)[2:-4]
if self is CompressionAlgorithm.ZLIB:
return zlib.compress(data)
if self is CompressionAlgorithm.BZ2:
return bz2.compress(data)
raise NotImplementedError(self)
def decompress(self, data):
if self is CompressionAlgorithm.Uncompressed:
return data
if self is CompressionAlgorithm.ZIP:
return zlib.decompress(data, -15)
if self is CompressionAlgorithm.ZLIB:
return zlib.decompress(data)
if self is CompressionAlgorithm.BZ2:
return bz2.decompress(data)
raise NotImplementedError(self)
class HashAlgorithm(IntEnum):
"""Supported hash algorithms."""
Invalid = 0x00
MD5 = 0x01
SHA1 = 0x02
RIPEMD160 = 0x03
_reserved_1 = 0x04
_reserved_2 = 0x05
_reserved_3 = 0x06
_reserved_4 = 0x07
SHA256 = 0x08
SHA384 = 0x09
SHA512 = 0x0A
SHA224 = 0x0B
#SHA3_256 = 13
#SHA3_384 = 14
#SHA3_512 = 15
def __init__(self, *args):
super(self.__class__, self).__init__()
self._tuned_count = 255
@property
def hasher(self):
return hashlib.new(self.name)
@property
def digest_size(self):
return self.hasher.digest_size
@property
def tuned_count(self):
return self._tuned_count
@property
def is_supported(self):
return True
@property
def is_second_preimage_resistant(self):
return self in {HashAlgorithm.SHA1}
@property
def is_collision_resistant(self):
return self in {HashAlgorithm.SHA256, HashAlgorithm.SHA384, HashAlgorithm.SHA512}
@property
def is_considered_secure(self):
if self.is_collision_resistant:
return SecurityIssues.OK
warnings.warn('Hash function {hash} is not considered collision resistant'.format(hash=repr(self)))
issues = SecurityIssues.HashFunctionNotCollisionResistant
if not self.is_second_preimage_resistant:
issues |= SecurityIssues.HashFunctionNotSecondPreimageResistant
return issues
class RevocationReason(IntEnum):
"""Reasons explaining why a key or certificate was revoked."""
#: No reason was specified. This is the default reason.
NotSpecified = 0x00
#: The key was superseded by a new key. Only meaningful when revoking a key.
Superseded = 0x01
#: Key material has been compromised. Only meaningful when revoking a key.
Compromised = 0x02
#: Key is retired and no longer used. Only meaningful when revoking a key.
Retired = 0x03
#: User ID information is no longer valid. Only meaningful when revoking a certification of a user id.
UserID = 0x20
class ImageEncoding(IntEnum):
Unknown = 0x00
JPEG = 0x01
@classmethod
def encodingof(cls, imagebytes):
type = imghdr.what(None, h=imagebytes)
if type == 'jpeg':
return ImageEncoding.JPEG
return ImageEncoding.Unknown # pragma: no cover
class SignatureType(IntEnum):
"""Types of signatures that can be found in a Signature packet."""
#: The signer either owns this document, created it, or certifies that it
#: has not been modified.
BinaryDocument = 0x00
#: The signer either owns this document, created it, or certifies that it
#: has not been modified. The signature is calculated over the text
#: data with its line endings converted to ``<CR><LF>``.
CanonicalDocument = 0x01
#: This signature is a signature of only its own subpacket contents.
#: It is calculated identically to a signature over a zero-length
#: ``BinaryDocument``.
Standalone = 0x02
#: The issuer of this certification does not make any particular
#: claim as to how well the certifier has checked that the owner
#: of the key is in fact the person described by the User ID.
Generic_Cert = 0x10
#: The issuer of this certification has not done any verification of
#: the claim that the owner of this key is the User ID specified.
Persona_Cert = 0x11
#: The issuer of this certification has done some casual
#: verification of the claim of identity.
Casual_Cert = 0x12
#: The issuer of this certification has done substantial
#: verification of the claim of identity.
Positive_Cert = 0x13
#: This signature is issued by the primary key over itself and its user ID (or user attribute).
#: See `draft-ietf-openpgp-rfc4880bis-08 <https://tools.ietf.org/html/draft-ietf-openpgp-rfc4880bis-08#section-5.2.1>`_
Attestation = 0x16
#: This signature is a statement by the top-level signing key that
#: indicates that it owns the subkey. This signature is calculated
#: directly on the primary key and subkey, and not on any User ID or
#: other packets.
Subkey_Binding = 0x18
#: This signature is a statement by a signing subkey, indicating
#: that it is owned by the primary key and subkey. This signature
#: is calculated the same way as a ``Subkey_Binding`` signature.
PrimaryKey_Binding = 0x19
#: A signature calculated directly on a key. It binds the
#: information in the Signature subpackets to the key, and is
#: appropriate to be used for subpackets that provide information
#: about the key, such as the Revocation Key subpacket. It is also
#: appropriate for statements that non-self certifiers want to make
#: about the key itself, rather than the binding between a key and a
#: name.
DirectlyOnKey = 0x1F
#: A signature calculated directly on the key being revoked.
#: Only revocation signatures by the key being revoked, or by an
#: authorized revocation key, should be considered valid revocation signatures.
KeyRevocation = 0x20
#: A signature calculated directly on the subkey being revoked.
#: Only revocation signatures by the top-level signature key that is bound to this subkey,
#: or by an authorized revocation key, should be considered valid revocation signatures.
SubkeyRevocation = 0x28
#: This signature revokes an earlier User ID certification signature or direct-key signature.
#: It should be issued by the same key that issued the revoked signature or an authorized revocation key.
#: The signature is computed over the same data as the certificate that it revokes.
CertRevocation = 0x30
#: This signature is only meaningful for the timestamp contained in it.
Timestamp = 0x40
#: This signature is a signature over some other OpenPGP Signature
#: packet(s). It is analogous to a notary seal on the signed data.
ThirdParty_Confirmation = 0x50
class KeyServerPreferences(FlagEnum):
NoModify = 0x80
class String2KeyType(IntEnum):
Simple = 0
Salted = 1
Reserved = 2
Iterated = 3
GNUExtension = 101
class S2KGNUExtension(IntEnum):
NoSecret = 1
Smartcard = 2
class TrustLevel(IntEnum):
Unknown = 0
Expired = 1
Undefined = 2
Never = 3
Marginal = 4
Fully = 5
Ultimate = 6
class KeyFlags(FlagEnum):
"""Flags that determine a key's capabilities."""
#: Signifies that a key may be used to certify keys and user ids. Primary keys always have this, even if it is not specified.
Certify = 0x01
#: Signifies that a key may be used to sign messages and documents.
Sign = 0x02
#: Signifies that a key may be used to encrypt messages.
EncryptCommunications = 0x04
#: Signifies that a key may be used to encrypt storage. Currently equivalent to :py:obj:`~pgpy.constants.EncryptCommunications`.
EncryptStorage = 0x08
#: Signifies that the private component of a given key may have been split by a secret-sharing mechanism. Split
#: keys are not currently supported by PGPy.
Split = 0x10
#: Signifies that a key may be used for authentication.
Authentication = 0x20
#: Signifies that the private component of a key may be in the possession of more than one person.
MultiPerson = 0x80
class Features(FlagEnum):
ModificationDetection = 0x01
@classproperty
def pgpy_features(cls):
return Features.ModificationDetection
class RevocationKeyClass(FlagEnum):
Sensitive = 0x40
Normal = 0x80
class NotationDataFlags(FlagEnum):
HumanReadable = 0x80
class TrustFlags(FlagEnum):
Revoked = 0x20
SubRevoked = 0x40
Disabled = 0x80
PendingCheck = 0x100
class SecurityIssues(IntFlag):
OK = 0
WrongSig = (1 << 0)
Expired = (1 << 1)
Disabled = (1 << 2)
Revoked = (1 << 3)
Invalid = (1 << 4)
BrokenAsymmetricFunc = (1 << 5)
HashFunctionNotCollisionResistant = (1 << 6)
HashFunctionNotSecondPreimageResistant = (1 << 7)
AsymmetricKeyLengthIsTooShort = (1 << 8)
InsecureCurve = (1 << 9)
NoSelfSignature = (1 << 10)
@property
def causes_signature_verify_to_fail(self):
return self in {
SecurityIssues.WrongSig,
SecurityIssues.Expired,
SecurityIssues.Disabled,
SecurityIssues.Invalid,
SecurityIssues.NoSelfSignature,
}
# https://safecurves.cr.yp.to/
SAFE_CURVES = {
EllipticCurveOID.Curve25519,
EllipticCurveOID.Ed25519,
}
MINIMUM_ASYMMETRIC_KEY_LENGTHS = {
PubKeyAlgorithm.RSAEncryptOrSign: 2048,
PubKeyAlgorithm.RSASign: 2048,
PubKeyAlgorithm.ElGamal: 2048,
PubKeyAlgorithm.DSA: 2048,
##
PubKeyAlgorithm.ECDSA: SAFE_CURVES,
PubKeyAlgorithm.EdDSA: SAFE_CURVES,
PubKeyAlgorithm.ECDH: SAFE_CURVES,
} | PypiClean |
/PICviewer-1.3.0.tar.gz/PICviewer-1.3.0/picviewer/dataplotter/makeplot.py | import matplotlib
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
from .cic_histogram import histogram_cic_2d
import matplotlib.patches as patches
from picviewer.dataloader.load_warpx import LoadWarpx
class MakePlot():
"""
Plot Data class
"""
def __init__(self,Mainwindow):
self.main = Mainwindow
#if self.main.dataformat == 'WarpX':
# from picviewer.dataloader.load_warpx import LoadWarpx
def plotfield2D(self,
x1min,
x1max,
x2min,
x2max,
iloc1,
iloc2,
jloc1,
jloc2):
figure = self.main.figure
nrow = self.main.nrow
ncolumn = self.main.ncolumn
panelselect = self.main.panelselect
field = self.main.field_panel[panelselect-1]
tstep = self.main.tstep_panel[panelselect-1]
time = self.main.taxis[tstep-1]
contrast = self.main.contrast_panel[panelselect-1]
aspect = self.main.aspect_panel[panelselect-1]
amrlevel = self.main.amrlevel_panel[self.main.panelselect-1]
axis = self.main.axes[panelselect-1]
cbar = self.main.cbars[panelselect-1]
axis.remove()
interpolation = 'nearest'
fontmax = 10; fontmin = 5.
barmax = 0.12; barmin = 0.05
matplotlib.rc('xtick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
matplotlib.rc('ytick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
fontsize = int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax)
cbarwidth = (barmin-barmax)/(30-1)*(nrow*ncolumn-1)+barmax
xtitle = r'z ($\mu$m)'; ytitle = r'x ($\mu$m)'
ax1 = figure.add_subplot(nrow,ncolumn,panelselect)
if cbar:
cbar.remove()
fdata = self.main.fdata_container[(field,tstep,amrlevel)][jloc1:jloc2,iloc1:iloc2]
vmin = fdata.min()*contrast/100.
vmax = fdata.max()*contrast/100.
im = ax1.imshow(fdata,
interpolation=interpolation, cmap='jet',
origin='lower', vmin = vmin, vmax = vmax,
extent=[x1min,x1max,x2min,x2max],
aspect=aspect)
ax1.axes.set_xlim([x1min,x1max])
ax1.axes.set_ylim([x2min,x2max])
ax1.set_title(field+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
ax1.set_xlabel(xtitle, fontsize=fontsize)
ax1.set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
if amrlevel !=0:
# loading AMR boundaries takes a time..
# needs to be updated --> loading the boundaries at all time steps initially.
x1leftedge, x1rightedge, x2leftedge, x2rightedge = \
LoadWarpx().getAMRboundaries(
self.main.filepath,
self.main.dim,
'dummy',
self.main.iterations[tstep-1],
amrlevel)
ax1.plot([x1leftedge,x1leftedge],[x2leftedge,x2rightedge],':', linewidth=0.6, color='black')
ax1.plot([x1leftedge,x1rightedge],[x2leftedge,x2leftedge],':', linewidth=0.6, color='black')
ax1.plot([x1leftedge,x1rightedge],[x2rightedge,x2rightedge],':', linewidth=0.6, color='black')
ax1.plot([x1rightedge,x1rightedge],[x2leftedge,x2rightedge],':', linewidth=0.6, color='black')
if nrow < 4 and ncolumn < 4:
ax1.axes.get_figure().tight_layout()
return ax1, cb
def plotfield3D(self,
x1min,
x1max,
x2min,
x2max,
iloc1,
iloc2,
jloc1,
jloc2,
kloc1,
kloc2):
figure = self.main.figure
nrow = self.main.nrow
ncolumn = self.main.ncolumn
panelselect = self.main.panelselect
field = self.main.field_panel[panelselect-1]
tstep = self.main.tstep_panel[panelselect-1]
time = self.main.taxis[tstep-1]
sliceplane = self.main.sliceplane_panel[panelselect-1]
contrast = self.main.contrast_panel[panelselect-1]
aspect = self.main.aspect_panel[panelselect-1]
amrlevel = self.main.amrlevel_panel[self.main.panelselect-1]
axis = self.main.axes[panelselect-1]
cbar = self.main.cbars[panelselect-1]
axis.remove()
interpolation = 'nearest'
fontmax = 11; fontmin = 5.
barmax = 0.12; barmin = 0.05
matplotlib.rc('xtick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
matplotlib.rc('ytick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
fontsize = int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax)
cbarwidth = (barmin-barmax)/(30-1)*(nrow*ncolumn-1)+barmax
fdata = self.main.fdata_container[(field,tstep,amrlevel)][iloc1:iloc2,jloc1:jloc2,kloc1:kloc2]
if sliceplane == 'yx':
xtitle = r'x ($\mu$m)'; ytitle = r'y ($\mu$m)'
fdata = fdata[:,:,0].T
if sliceplane == 'xz':
xtitle = r'z ($\mu$m)'; ytitle = r'x ($\mu$m)'
fdata = fdata[:,0,:]
if sliceplane == 'yz':
xtitle = r'z ($\mu$m)'; ytitle = r'y ($\mu$m)'
fdata = fdata[0,:,:]
ax1 = figure.add_subplot(nrow,ncolumn, panelselect)
if cbar:
cbar.remove()
vmin = fdata.min()*contrast/100.
vmax = fdata.max()*contrast/100.
im = ax1.imshow(fdata,
interpolation=interpolation, cmap='jet',
origin='lower', vmin = vmin, vmax = vmax,
extent=[x1min,x1max,x2min,x2max],
aspect=aspect)
ax1.axes.set_xlim([x1min,x1max])
ax1.axes.set_ylim([x2min,x2max])
ax1.set_title(field+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
ax1.set_xlabel(xtitle, fontsize=fontsize)
ax1.set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
if amrlevel !=0:
# loading AMR boundaries takes a time..
# needs to be updated --> loading the boundaries at all time steps initially.
x1leftedge, x1rightedge, x2leftedge, x2rightedge = \
LoadWarpx().getAMRboundaries(
self.main.filepath,
self.main.dim,
sliceplane,
self.main.iterations[tstep-1],
amrlevel)
ax1.plot([x1leftedge,x1leftedge],[x2leftedge,x2rightedge],':', linewidth=0.6, color='black')
ax1.plot([x1leftedge,x1rightedge],[x2leftedge,x2leftedge],':', linewidth=0.6, color='black')
ax1.plot([x1leftedge,x1rightedge],[x2rightedge,x2rightedge],':', linewidth=0.6, color='black')
ax1.plot([x1rightedge,x1rightedge],[x2leftedge,x2rightedge],':', linewidth=0.6, color='black')
#rect = patches.Rectangle((x1leftedge, x2leftedge),
# x1rightedge-x1leftedge, x2rightedge-x2leftedge,
# linestyle=':', linewidth=0.6,edgecolor='black',facecolor='none')
#ax1.add_patch(rect)
if nrow < 4 and ncolumn < 4:
ax1.axes.get_figure().tight_layout()
return ax1, cb
def plotparticle(self,
x1min,
x1max,
x2min,
x2max,
local):
figure = self.main.figure
nrow = self.main.nrow
ncolumn = self.main.ncolumn
panelselect = self.main.panelselect
species = self.main.species_panel[panelselect-1]
phase = self.main.phase_panel[panelselect-1]
tstep = self.main.tstep_panel[panelselect-1]
time = self.main.taxis[tstep-1]
#contrast = self.main.contrast_panel[panelselect-1]
aspect = self.main.aspect_panel[panelselect-1]
cbar = self.main.cbars[(panelselect-1)]
axis = self.main.axes[panelselect-1]
dim = self.main.dim
axis.remove()
interpolation = 'nearest'
fontmax = 10; fontmin = 5.
barmax = 0.12; barmin = 0.05
matplotlib.rc('xtick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
matplotlib.rc('ytick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
fontsize = int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax)
cbarwidth = (barmin-barmax)/(30-1)*(nrow*ncolumn-1)+barmax
title = species+' '+phase[0]+'-'+phase[1]
xtitle = r'z ($\mu$m)'; ytitle = r'x ($\mu$m)'
nbin = 300
ax1 = figure.add_subplot(nrow,ncolumn,panelselect)
#self.plot.cla()
if cbar:
cbar.remove()
if len(self.main.pdata_container[(species,'w',tstep)]) > 0 :
###################
# x-axis
###################
if phase[1] in ['px','py','pz']:
if dim == 2:
p1min = np.min(self.main.pdata_container[(species,phase[1],tstep)])
p1max = np.max(self.main.pdata_container[(species,phase[1],tstep)])
else:
p1min = np.min(self.main.pdata_container[(species,phase[1],tstep)][local])
p1max = np.max(self.main.pdata_container[(species,phase[1],tstep)][local])
xtitle = r'%s ($c$)'%(phase[1])
# if the minimunm and maximum values are the same,
# an error occurs in the histogram.
elif phase[1] in ['ene']:
if dim == 2:
p1min = np.min(self.main.pdata_container[(species,phase[1],tstep)])
p1max = np.max(self.main.pdata_container[(species,phase[1],tstep)])
else:
p1min = np.min(self.main.pdata_container[(species,phase[1],tstep)][local])
p1max = np.max(self.main.pdata_container[(species,phase[1],tstep)][local])
xtitle = r'%s ($\gamma$-1)'%(phase[1])
elif phase[1] in ['x','y','z']:
p1min = x1min
p1max = x1max
xtitle = r'%s ($\mu$m)'%(phase[1])
###################
# y-axis
###################
if phase[0] in ['px','py','pz']:
if dim == 2:
p2min = np.min(self.main.pdata_container[(species,phase[0],tstep)])
p2max = np.max(self.main.pdata_container[(species,phase[0],tstep)])
else:
p2min = np.min(self.main.pdata_container[(species,phase[0],tstep)][local])
p2max = np.max(self.main.pdata_container[(species,phase[0],tstep)][local])
ytitle = r'%s ($c$)'%(phase[0])
elif phase[0] in ['ene']:
if dim == 2:
p2min = np.min(self.main.pdata_container[(species,phase[0],tstep)])
p2max = np.max(self.main.pdata_container[(species,phase[0],tstep)])
else:
p2min = np.min(self.main.pdata_container[(species,phase[0],tstep)][local])
p2max = np.max(self.main.pdata_container[(species,phase[0],tstep)][local])
ytitle = r'%s ($\gamma$-1)'%(phase[0])
elif phase[0] in ['x','y','z']:
p2min = x2min
p2max = x2max
ytitle = r'%s ($\mu$m)'%(phase[0])
if p1min == p1max:
if p1min != 0:
p1min = p1min*.5
p1max = p1min*3.
else:
p1max = 1.0
if p2min == p2max:
if p2min != 0:
p2min = p2min*.5
p2max = p2min*3.
else:
p2max = 1.0
if dim == 2:
histogram = histogram_cic_2d( self.main.pdata_container[(species,phase[1],tstep)],
self.main.pdata_container[(species,phase[0],tstep)],
self.main.pdata_container[(species,'w',tstep)],
nbin, p1min, p1max, nbin, p2min, p2max)
else:
histogram = histogram_cic_2d( self.main.pdata_container[(species,phase[1],tstep)][local],
self.main.pdata_container[(species,phase[0],tstep)][local],
self.main.pdata_container[(species,'w',tstep)][local],
nbin, p1min, p1max, nbin, p2min, p2max)
vmax=np.max(histogram)
vmin = vmax*1.e-4
#vmax *= contrast/100.
#vmin *= 100./contrast
logthresh=-np.log10(vmin)
im = ax1.imshow( histogram.T,
origin='lower', extent=[ p1min,p1max,p2min,p2max],
aspect=aspect, interpolation=interpolation, cmap='jet',
vmin=vmin, vmax=vmax,
norm=matplotlib.colors.LogNorm(10**-logthresh))
ax1.axes.set_xlim([p1min,p1max])
ax1.axes.set_ylim([p2min,p2max])
ax1.set_title(title+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
ax1.set_xlabel(xtitle, fontsize=fontsize)
ax1.set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
else:
if phase[1] in ['px','py','pz']:
xtitle = r'%s ($c$)'%(phase[1])
elif phase[1] in ['ene']:
xtitle = r'%s ($\gamma$-1)'%(phase[1])
elif phase[1] in ['x','y','z']:
xtitle = r'%s ($\mu$m)'%(phase[1])
if phase[0] in ['px','py','pz']:
ytitle = r'%s ($c$)'%(phase[0])
elif phase[0] in ['ene']:
ytitle = r'%s ($\gamma$-1)'%(phase[0])
elif phase[0] in ['x','y','z']:
ytitle = r'%s ($\mu$m)'%(phase[0])
#self.plot.axes.set_xlim([x1min,x1max])
#self.plot.axes.set_ylim([x2min,x2max])
ax1.set_title(title+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
ax1.set_xlabel(xtitle, fontsize=fontsize)
ax1.set_ylabel(ytitle, fontsize=fontsize)
cb = []
#if nrow < 4 or ncolumn < 4:
ax1.axes.get_figure().tight_layout()
return ax1, cb
def makeplotsync2D(self):
figure = self.main.figure
nrow = self.main.nrow
ncolumn = self.main.ncolumn
field_select_panel = self.main.field_select_panel
field_panel = self.main.field_panel
species_panel = self.main.species_panel
phase_panel = self.main.phase_panel
tstep_panel = self.main.tstep_panel
taxis = self.main.taxis
xaxis_dic = self.main.xaxis_dic
zaxis_dic = self.main.zaxis_dic
xminloc_panel = self.main.xminloc_panel
xmaxloc_panel = self.main.xmaxloc_panel
zminloc_panel = self.main.zminloc_panel
zmaxloc_panel = self.main.zmaxloc_panel
contrast_panel = self.main.contrast_panel
aspect_panel = self.main.aspect_panel
amrlevel_panel = self.main.amrlevel_panel
figure.clear()
interpolation = 'nearest'
fontmax = 10; fontmin = 5.
barmax = 0.12; barmin = 0.05
matplotlib.rc('xtick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
matplotlib.rc('ytick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
fontsize = int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax)
cbarwidth = (barmin-barmax)/(30-1)*(nrow*ncolumn-1)+barmax
axes={}
cbars={}
for l in np.arange(nrow*ncolumn):
tstep = tstep_panel[l]
time = taxis[tstep-1]
xaxis = xaxis_dic[tstep-1]
zaxis = zaxis_dic[tstep-1]
amrlevel = amrlevel_panel[l]
dim_factor = 2**amrlevel
contrast = contrast_panel[l]
aspect = aspect_panel[l]
axes[l] = figure.add_subplot(nrow,ncolumn,l+1)
x1min = zaxis[0]+(zaxis[-1]-zaxis[0])*zminloc_panel[l]/100.
x1max = zaxis[0]+(zaxis[-1]-zaxis[0])*zmaxloc_panel[l]/100.
x2min = xaxis[0]+(xaxis[-1]-xaxis[0])*xminloc_panel[l]/100.
x2max = xaxis[0]+(xaxis[-1]-xaxis[0])*xmaxloc_panel[l]/100.
if field_select_panel[l]:
# field plot
field=field_panel[l]
xtitle = r'z ($\mu$m)'; ytitle = r'x ($\mu$m)'
iloc1 = dim_factor*int(len(zaxis)*zminloc_panel[l]/100.)
iloc2 = dim_factor*int(len(zaxis)*zmaxloc_panel[l]/100.)
jloc1 = dim_factor*int(len(xaxis)*xminloc_panel[l]/100.)
jloc2 = dim_factor*int(len(xaxis)*xmaxloc_panel[l]/100.)
vmin = self.main.fdata_container[(field, tstep, amrlevel)][jloc1:jloc2,iloc1:iloc2].min()
vmax = self.main.fdata_container[(field, tstep, amrlevel)][jloc1:jloc2,iloc1:iloc2].max()
vmin = vmin*contrast/100.
vmax = vmax*contrast/100.
im = axes[l].imshow(self.main.fdata_container[(field, tstep, amrlevel)][jloc1:jloc2,iloc1:iloc2],
interpolation=interpolation, cmap='jet',
origin='lower', vmin = vmin, vmax = vmax, extent=[x1min,x1max,x2min,x2max], aspect=aspect)
axes[l].axes.set_xlim([x1min,x1max])
axes[l].axes.set_ylim([x2min,x2max])
axes[l].set_title(field+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
axes[l].set_xlabel(xtitle, fontsize=fontsize)
axes[l].set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
else: # particle plot
species = species_panel[l]
# pahse is a tuple, i.e., (px,x) --> px is the x2 axis (y-axis), x is the x1 axis (x-axis)
phase = phase_panel[l]
title = species+' '+phase[0]+'-'+phase[1]
nbin = 300
if len(self.main.pdata_container[(species, 'w', tstep)]) > 0:
# x1-axis (or x-axis) variables
if phase[1] in ['px','py','pz']:
p1min = np.min(self.main.pdata_container[(species, phase[1], tstep)])
p1max = np.max(self.main.pdata_container[(species, phase[1], tstep)])
xtitle = r'%s ($c$)'%(phase[1])
elif phase[1] in ['ene']:
p1min = np.min(self.main.pdata_container[(species, phase[1], tstep)])
p1max = np.max(self.main.pdata_container[(species, phase[1], tstep)])
xtitle = r'%s ($\gamma$-1)'%(phase[1])
elif phase[1] in ['x','y','z']:
p1min = x1min
p1max = x1max
xtitle = r'%s ($\mu$m)'%(phase[1])
# x2-axis (y-axis) variables
if phase[0] in ['px','py','pz']:
p2min = np.min(self.main.pdata_container[(species, phase[0], tstep)])
p2max = np.max(self.main.pdata_container[(species, phase[0], tstep)])
ytitle = r'%s ($c$)'%(phase[0])
elif phase[0] in ['ene']:
p2min = np.min(self.main.pdata_container[(species, phase[0], tstep)])
p2max = np.max(self.main.pdata_container[(species, phase[0], tstep)])
ytitle = r'%s ($\gamma$-1)'%(phase[0])
elif phase[0] in ['x','y','z']:
p2min = x2min
p2max = x2max
ytitle = r'%s ($\mu$m)'%(phase[0])
if p1min == p1max:
if p1min != 0:
p1min = p1min*.5
p1max = p1min*3.
else:
p1max = 1.0
if p2min == p2max:
if p2min != 0:
p2min = p2min*.5
p2max = p2min*3.
else:
p2max = 1.0
histogram = histogram_cic_2d(
self.main.pdata_container[(species, phase[1], tstep)],
self.main.pdata_container[(species, phase[0], tstep)],
self.main.pdata_container[(species, 'w', tstep)],
nbin, p1min, p1max, nbin, p2min, p2max)
vmax=np.max(histogram)
vmin = vmax*1.e-4
#vmax *= contrast/100.
#vmin *= 100./contrast
logthresh=-np.log10(vmin)
im = axes[l].imshow( histogram.T,
origin='lower', extent=[ p1min,p1max,p2min,p2max ],
aspect=aspect, interpolation=interpolation, cmap='jet',
vmin=vmin, vmax=vmax,
norm=matplotlib.colors.LogNorm(10**-logthresh))
axes[l].axes.set_xlim([p1min,p1max])
axes[l].axes.set_ylim([p2min,p2max])
axes[l].set_title(title+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
axes[l].set_xlabel(xtitle, fontsize=fontsize)
axes[l].set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
else:
# x1-axis (or x-axis) variables
if phase[1] in ['px','py','pz']:
xtitle = r'%s ($c$)'%(phase[1])
elif phase[1] in ['ene']:
xtitle = r'%s ($\gamma$-1)'%(phase[1])
elif phase[1] in ['x','y','z']:
xtitle = r'%s ($\mu$m)'%(phase[1])
# x2-axis (y-axis) variables
if phase[0] in ['px','py','pz']:
ytitle = r'%s ($c$)'%(phase[0])
elif phase[0] in ['ene']:
ytitle = r'%s ($\gamma$-1)'%(phase[0])
elif phase[0] in ['x','y','z']:
ytitle = r'%s ($\mu$m)'%(phase[0])
#self.plot.axes.set_xlim([p1min,p1max])
#self.plot.axes.set_ylim([p2min,p2max])
axes[l].set_title(title+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
axes[l].set_xlabel(xtitle, fontsize=fontsize)
axes[l].set_ylabel(ytitle, fontsize=fontsize)
cb=[]
cbars[l]= cb
if nrow < 4 and ncolumn < 4:
axes[l].axes.get_figure().tight_layout()
return axes, cbars
def makeplotsync3D(self, loc_container):
figure = self.main.figure
nrow = self.main.nrow
ncolumn = self.main.ncolumn
field_select_panel = self.main.field_select_panel
field_panel = self.main.field_panel
species_panel = self.main.species_panel
phase_panel = self.main.phase_panel
tstep_panel = self.main.tstep_panel
taxis = self.main.taxis
sliceplane_panel = self.main.sliceplane_panel
slicevalue_panel = self.main.slicevalue_panel
xaxis_dic = self.main.xaxis_dic
yaxis_dic = self.main.yaxis_dic
zaxis_dic = self.main.zaxis_dic
xminloc_panel = self.main.xminloc_panel
xmaxloc_panel = self.main.xmaxloc_panel
yminloc_panel = self.main.yminloc_panel
ymaxloc_panel = self.main.ymaxloc_panel
zminloc_panel = self.main.zminloc_panel
zmaxloc_panel = self.main.zmaxloc_panel
amrlevel_panel = self.main.amrlevel_panel
contrast_panel = self.main.contrast_panel
aspect_panel = self.main.aspect_panel
figure.clear()
interpolation = 'nearest'
fontmax = 10; fontmin = 5.
barmax = 0.12; barmin = 0.05
matplotlib.rc('xtick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
matplotlib.rc('ytick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
fontsize = int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax)
cbarwidth = (barmin-barmax)/(30-1)*(nrow*ncolumn-1)+barmax
axes={}
cbars={}
for l in np.arange(nrow*ncolumn):
axes[l] = figure.add_subplot(nrow,ncolumn,l+1)
amrlevel = amrlevel_panel[l]
dim_factor = 2**amrlevel
contrast = contrast_panel[l]
aspect = aspect_panel[l]
tstep = tstep_panel[l]
time = taxis[tstep-1]
xaxis = xaxis_dic[tstep-1]
yaxis = yaxis_dic[tstep-1]
zaxis = zaxis_dic[tstep-1]
sliceplane = sliceplane_panel[l]
slicevalue = slicevalue_panel[l]
xminloc = xminloc_panel[l]
xmaxloc = xmaxloc_panel[l]
yminloc = yminloc_panel[l]
ymaxloc = ymaxloc_panel[l]
zminloc = zminloc_panel[l]
zmaxloc = zmaxloc_panel[l]
if sliceplane == 'yx':
x1min = xaxis[0]+(xaxis[-1]-xaxis[0])*xminloc/100.
x1max = xaxis[0]+(xaxis[-1]-xaxis[0])*xmaxloc/100.
x2min = yaxis[0]+(yaxis[-1]-yaxis[0])*yminloc/100.
x2max = yaxis[0]+(yaxis[-1]-yaxis[0])*ymaxloc/100.
if sliceplane == 'xz':
xtitle = r'z ($\mu$m)'; ytitle = r'x ($\mu$m)'
x1min = zaxis[0]+(zaxis[-1]-zaxis[0])*zminloc/100.
x1max = zaxis[0]+(zaxis[-1]-zaxis[0])*zmaxloc/100.
x2min = xaxis[0]+(xaxis[-1]-xaxis[0])*xminloc/100.
x2max = xaxis[0]+(xaxis[-1]-xaxis[0])*xmaxloc/100.
if sliceplane == 'yz':
xtitle = r'z ($\mu$m)'; ytitle = r'y ($\mu$m)'
x1min = zaxis[0]+(zaxis[-1]-zaxis[0])*zminloc/100.
x1max = zaxis[0]+(zaxis[-1]-zaxis[0])*zmaxloc/100.
x2min = yaxis[0]+(yaxis[-1]-yaxis[0])*yminloc/100.
x2max = yaxis[0]+(yaxis[-1]-yaxis[0])*ymaxloc/100.
if field_select_panel[l]:
# field plot
field=field_panel[l]
if sliceplane == 'yx':
xtitle = r'x ($\mu$m)'; ytitle = r'y ($\mu$m)'
iloc1 = dim_factor*int(len(xaxis)*xminloc/100.)
iloc2 = dim_factor*int(len(xaxis)*xmaxloc/100.)
jloc1 = dim_factor*int(len(yaxis)*yminloc/100.)
jloc2 = dim_factor*int(len(yaxis)*ymaxloc/100.)
kloc = int(dim_factor*len(zaxis)*slicevalue/50.)
vmin = self.main.fdata_container[(field, tstep, amrlevel)][iloc1:iloc2,jloc1:jloc2,kloc].min()
vmax = self.main.fdata_container[(field, tstep, amrlevel)][iloc1:iloc2,jloc1:jloc2,kloc].max()
vmin = vmin*contrast/100.
vmax = vmax*contrast/100.
im = axes[l].imshow(self.main.fdata_container[(field, tstep, amrlevel)][iloc1:iloc2,jloc1:jloc2,kloc].T,
interpolation=interpolation, cmap='jet',
origin='lower', vmin=vmin, vmax=vmax, extent=[x1min,x1max,x2min,x2max], aspect=aspect)
if sliceplane == 'xz':
xtitle = r'z ($\mu$m)'; ytitle = r'x ($\mu$m)'
iloc1 = int(dim_factor*len(zaxis)*zminloc/100.)
iloc2 = int(dim_factor*len(zaxis)*zmaxloc/100.)
jloc1 = int(dim_factor*len(xaxis)*xminloc/100.)
jloc2 = int(dim_factor*len(xaxis)*xmaxloc/100.)
kloc = int(dim_factor*len(yaxis)*slicevalue/50)
im = axes[l].imshow(self.main.fdata_container[(field, tstep, amrlevel)][jloc1:jloc2,kloc,iloc1:iloc2],
interpolation=interpolation, cmap='jet',
origin='lower', extent=[x1min,x1max,x2min,x2max], aspect=aspect)
if sliceplane == 'yz':
xtitle = r'z ($\mu$m)'; ytitle = r'y ($\mu$m)'
iloc1 = int(dim_factor*len(zaxis)*zminloc/100.)
iloc2 = int(dim_factor*len(zaxis)*zmaxloc/100.)
jloc1 = int(dim_factor*len(yaxis)*yminloc/100.)
jloc2 = int(dim_factor*len(yaxis)*ymaxloc/100.)
kloc = int(dim_factor*len(xaxis)*slicevalue/50)
im = axes[l].imshow(self.main.fdata_container[(field, tstep, amrlevel)][kloc,jloc1:jloc2,iloc1:iloc2],
interpolation=interpolation, cmap='jet',
origin='lower', extent=[x1min,x1max,x2min,x2max], aspect=aspect)
axes[l].axes.set_xlim([x1min,x1max])
axes[l].axes.set_ylim([x2min,x2max])
axes[l].set_title(field+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
axes[l].set_xlabel(xtitle, fontsize=fontsize)
axes[l].set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
else: # particle plot
species = species_panel[l]
phase = phase_panel[l]
title = species+' '+phase[0]+'-'+phase[1]
nbin = 300
loc = loc_container[l]
# there must be at least one particle for histogram
if len(loc) > 0:
if phase[1] in ['px','py','pz']:
p1min = np.min(self.main.pdata_container[(species, phase[1], tstep)][loc])
p1max = np.max(self.main.pdata_container[(species, phase[1], tstep)][loc])
xtitle = r'%s ($c$)'%(phase[1])
elif phase[1] in ['ene']:
p1min = np.min(self.main.pdata_container[(species, phase[1], tstep)][loc])
p1max = np.max(self.main.pdata_container[(species, phase[1], tstep)][loc])
xtitle = r'%s ($\gamma$-1)'%(phase[1])
elif phase[1] in ['x','y','z']:
p1min = x1min
p1max = x1max
xtitle = r'%s ($\mu$m)'%(phase[1])
if phase[0] in ['px','py','pz']:
p2min = np.min(self.main.pdata_container[(species, phase[0], tstep)][loc])
p2max = np.max(self.main.pdata_container[(species, phase[0], tstep)][loc])
ytitle = r'%s ($c$)'%(phase[0])
elif phase[0] in ['ene']:
p2min = np.min(self.main.pdata_container[(species, phase[0], tstep)][loc])
p2max = np.max(self.main.pdata_container[(species, phase[0], tstep)][loc])
ytitle = r'%s ($\gamma$-1)'%(phase[0])
elif phase[0] in ['x','y','z']:
p2min = x2min
p2max = x2max
ytitle = r'%s ($\mu$m)'%(phase[0])
if p1min == p1max:
if p1min != 0:
p1min = p1min*.5
p1max = p1min*3.
else:
p1max = 1.0
if p2min == p2max:
if p2min != 0:
p2min = p2min*.5
p2max = p2min*3.
else:
p2max = 1.0
histogram = histogram_cic_2d(
self.main.pdata_container[(species, phase[1], tstep)][loc],
self.main.pdata_container[(species, phase[0], tstep)][loc],
self.main.pdata_container[(species, 'w', tstep)][loc], nbin, p1min, p1max, nbin, p2min, p2max)
vmax=np.max(histogram)
vmin = vmax*1.e-4
#vmax *= contrast/100.
#vmin *= 100./contrast
logthresh=-np.log10(vmin)
im = axes[l].imshow( histogram.T,
origin='lower', extent=[ p1min,p1max,p2min,p2max ],
aspect=aspect, interpolation=interpolation, cmap='jet',
vmin=vmin, vmax=vmax,
norm=matplotlib.colors.LogNorm(10**-logthresh))
axes[l].axes.set_xlim([p1min,p1max])
axes[l].axes.set_ylim([p2min,p2max])
axes[l].set_title(title+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
axes[l].set_xlabel(xtitle, fontsize=fontsize)
axes[l].set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
else:
if phase[1] in ['px','py','pz']:
xtitle = r'%s ($c$)'%(phase[1])
elif phase[1] in ['ene']:
xtitle = r'%s ($\gamma$-1)'%(phase[1])
elif phase[1] in ['x','y','z']:
xtitle = r'%s ($\mu$m)'%(phase[1])
if phase[0] in ['px','py','pz']:
ytitle = r'%s ($c$)'%(phase[0])
elif phase[0] in ['ene']:
ytitle = r'%s ($\gamma$-1)'%(phase[0])
elif phase[0] in ['x','y','z']:
ytitle = r'%s ($\mu$m)'%(phase[0])
#self.plot.axes.set_xlim([p1min,p1max])
#self.plot.axes.set_ylim([p2min,p2max])
axes[l].set_title(title+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
axes[l].set_xlabel(xtitle, fontsize=fontsize)
axes[l].set_ylabel(ytitle, fontsize=fontsize)
cb=[]
cbars[l]= cb
if nrow < 4 and ncolumn < 4:
axes[l].axes.get_figure().tight_layout()
return axes, cbars
def locallineplot2D(self,
figure,
nrow,
ncolumn,
field,
panel_select,
time,
laxis,
ldata):
interpolation = 'spline16'
fontmax = 10; fontmin = 5.
barmax = 0.12; barmin = 0.05
matplotlib.rc('xtick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
matplotlib.rc('ytick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
fontsize = int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax)
cbarwidth = (barmin-barmax)/(30-1)*(nrow*ncolumn-1)+barmax
xtitle = r'l ($\mu$m)'; ytitle = field
self.plot = figure.add_subplot(nrow,ncolumn,panel_select)
self.plot.cla()
self.plot.plot(laxis, ldata)
self.plot.set_title(field+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
self.plot.set_xlabel(xtitle, fontsize=fontsize)
self.plot.set_ylabel(ytitle, fontsize=fontsize)
#if nrow < 4 or ncolumn < 4:
# self.plot.axes.get_figure().tight_layout()
def localcontourplot2D(self,
figure,
fdata,
nrow,
ncolumn,
field,
panel_select,
time,
x1min,
x1max,
x2min,
x2max,
aspect):
interpolation = 'nearest'
fontmax = 11; fontmin = 5.
barmax = 0.12; barmin = 0.05
matplotlib.rc('xtick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
matplotlib.rc('ytick', labelsize=int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax))
fontsize = int((fontmin-fontmax)/(30-1)*(nrow*ncolumn-1)+fontmax)
cbarwidth = (barmin-barmax)/(30-1)*(nrow*ncolumn-1)+barmax
xtitle = r'z ($\mu$m)'; ytitle = r'x ($\mu$m)'
self.plot = figure.add_subplot(nrow,ncolumn,panel_select)
self.plot.cla()
im = self.plot.imshow(fdata, interpolation=interpolation, cmap='jet',
origin='lower', extent=[x1min,x1max,x2min,x2max], aspect=aspect)
self.plot.axes.set_xlim([x1min,x1max])
self.plot.axes.set_ylim([x2min,x2max])
self.plot.set_title(field+' (%6.1f fs)'%(time), x=0.3, fontsize=fontsize)
self.plot.set_xlabel(xtitle, fontsize=fontsize)
self.plot.set_ylabel(ytitle, fontsize=fontsize)
ax = figure.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=cbarwidth, pad=0.)
cb = figure.colorbar(im, cax=cax)
#if nrow < 4 or ncolumn < 4:
# self.plot.axes.get_figure().tight_layout() | PypiClean |
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/device_management/reports/get_configuration_policy_settings_device_summary_report/get_configuration_policy_settings_device_summary_report_post_request_body.py | from __future__ import annotations
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
class GetConfigurationPolicySettingsDeviceSummaryReportPostRequestBody(AdditionalDataHolder, Parsable):
def __init__(self,) -> None:
"""
Instantiates a new getConfigurationPolicySettingsDeviceSummaryReportPostRequestBody and sets the default values.
"""
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
self._additional_data: Dict[str, Any] = {}
# The filter property
self._filter: Optional[str] = None
# The groupBy property
self._group_by: Optional[List[str]] = None
# The name property
self._name: Optional[str] = None
# The orderBy property
self._order_by: Optional[List[str]] = None
# The search property
self._search: Optional[str] = None
# The select property
self._select: Optional[List[str]] = None
# The sessionId property
self._session_id: Optional[str] = None
# The skip property
self._skip: Optional[int] = None
# The top property
self._top: Optional[int] = None
@property
def additional_data(self,) -> Dict[str, Any]:
"""
Gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Returns: Dict[str, Any]
"""
return self._additional_data
@additional_data.setter
def additional_data(self,value: Dict[str, Any]) -> None:
"""
Sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Args:
value: Value to set for the AdditionalData property.
"""
self._additional_data = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> GetConfigurationPolicySettingsDeviceSummaryReportPostRequestBody:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: GetConfigurationPolicySettingsDeviceSummaryReportPostRequestBody
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return GetConfigurationPolicySettingsDeviceSummaryReportPostRequestBody()
@property
def filter(self,) -> Optional[str]:
"""
Gets the filter property value. The filter property
Returns: Optional[str]
"""
return self._filter
@filter.setter
def filter(self,value: Optional[str] = None) -> None:
"""
Sets the filter property value. The filter property
Args:
value: Value to set for the filter property.
"""
self._filter = value
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields: Dict[str, Callable[[Any], None]] = {
"filter": lambda n : setattr(self, 'filter', n.get_str_value()),
"groupBy": lambda n : setattr(self, 'group_by', n.get_collection_of_primitive_values(str)),
"name": lambda n : setattr(self, 'name', n.get_str_value()),
"orderBy": lambda n : setattr(self, 'order_by', n.get_collection_of_primitive_values(str)),
"search": lambda n : setattr(self, 'search', n.get_str_value()),
"select": lambda n : setattr(self, 'select', n.get_collection_of_primitive_values(str)),
"sessionId": lambda n : setattr(self, 'session_id', n.get_str_value()),
"skip": lambda n : setattr(self, 'skip', n.get_int_value()),
"top": lambda n : setattr(self, 'top', n.get_int_value()),
}
return fields
@property
def group_by(self,) -> Optional[List[str]]:
"""
Gets the groupBy property value. The groupBy property
Returns: Optional[List[str]]
"""
return self._group_by
@group_by.setter
def group_by(self,value: Optional[List[str]] = None) -> None:
"""
Sets the groupBy property value. The groupBy property
Args:
value: Value to set for the group_by property.
"""
self._group_by = value
@property
def name(self,) -> Optional[str]:
"""
Gets the name property value. The name property
Returns: Optional[str]
"""
return self._name
@name.setter
def name(self,value: Optional[str] = None) -> None:
"""
Sets the name property value. The name property
Args:
value: Value to set for the name property.
"""
self._name = value
@property
def order_by(self,) -> Optional[List[str]]:
"""
Gets the orderBy property value. The orderBy property
Returns: Optional[List[str]]
"""
return self._order_by
@order_by.setter
def order_by(self,value: Optional[List[str]] = None) -> None:
"""
Sets the orderBy property value. The orderBy property
Args:
value: Value to set for the order_by property.
"""
self._order_by = value
@property
def search(self,) -> Optional[str]:
"""
Gets the search property value. The search property
Returns: Optional[str]
"""
return self._search
@search.setter
def search(self,value: Optional[str] = None) -> None:
"""
Sets the search property value. The search property
Args:
value: Value to set for the search property.
"""
self._search = value
@property
def select(self,) -> Optional[List[str]]:
"""
Gets the select property value. The select property
Returns: Optional[List[str]]
"""
return self._select
@select.setter
def select(self,value: Optional[List[str]] = None) -> None:
"""
Sets the select property value. The select property
Args:
value: Value to set for the select property.
"""
self._select = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
writer.write_str_value("filter", self.filter)
writer.write_collection_of_primitive_values("groupBy", self.group_by)
writer.write_str_value("name", self.name)
writer.write_collection_of_primitive_values("orderBy", self.order_by)
writer.write_str_value("search", self.search)
writer.write_collection_of_primitive_values("select", self.select)
writer.write_str_value("sessionId", self.session_id)
writer.write_int_value("skip", self.skip)
writer.write_int_value("top", self.top)
writer.write_additional_data_value(self.additional_data)
@property
def session_id(self,) -> Optional[str]:
"""
Gets the sessionId property value. The sessionId property
Returns: Optional[str]
"""
return self._session_id
@session_id.setter
def session_id(self,value: Optional[str] = None) -> None:
"""
Sets the sessionId property value. The sessionId property
Args:
value: Value to set for the session_id property.
"""
self._session_id = value
@property
def skip(self,) -> Optional[int]:
"""
Gets the skip property value. The skip property
Returns: Optional[int]
"""
return self._skip
@skip.setter
def skip(self,value: Optional[int] = None) -> None:
"""
Sets the skip property value. The skip property
Args:
value: Value to set for the skip property.
"""
self._skip = value
@property
def top(self,) -> Optional[int]:
"""
Gets the top property value. The top property
Returns: Optional[int]
"""
return self._top
@top.setter
def top(self,value: Optional[int] = None) -> None:
"""
Sets the top property value. The top property
Args:
value: Value to set for the top property.
"""
self._top = value | PypiClean |
/dyneusr-0.3.11.tar.gz/dyneusr-0.3.11/docs/demo/haxby-umap-supervised/haxby_umap_supervised.py | import numpy as np
import pandas as pd
import networkx as nx
from nilearn.datasets import fetch_haxby
from nilearn.input_data import NiftiMasker
from kmapper import KeplerMapper, Cover
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from umap.umap_ import UMAP
from dyneusr import DyNeuGraph
from dyneusr.tools import visualize_mapper_stages
from dyneusr.mapper.utils import optimize_dbscan
# Fetch dataset, extract time-series from ventral temporal (VT) mask
dataset = fetch_haxby()
masker = NiftiMasker(
dataset.mask_vt[0],
standardize=True, detrend=True, smoothing_fwhm=4.0,
low_pass=0.09, high_pass=0.008, t_r=2.5,
memory="nilearn_cache")
X = masker.fit_transform(dataset.func[0])
# Encode labels as integers
df = pd.read_csv(dataset.session_target[0], sep=" ")
target, labels = pd.factorize(df.labels.values)
y = pd.DataFrame({l:(target==i).astype(int) for i,l in enumerate(labels)})
# Extract sessions 4-5
mask_sessions = df.chunks.add(1).isin([4, 5])
X = X[mask_sessions]
y = y.loc[mask_sessions, :]
target = target[mask_sessions]
# Generate a shape graph using KeplerMapper
mapper = KeplerMapper(verbose=1)
# Configure projection
pca = PCA(2, random_state=1)
umap = UMAP(n_components=2, init=pca.fit_transform(X))
# Construct lens and generate the shape graph
lens = mapper.fit_transform(
umap.fit_transform(X, y=target),
projection=[0, 1])
graph = mapper.map(
lens, X=X,
cover=Cover(20, 0.5),
clusterer=optimize_dbscan(X, k=3, p=100.0), )
# Convert to a DyNeuGraph
dG = DyNeuGraph(G=graph, y=y)
# Define some custom_layouts
dG.add_custom_layout(lens, name='lens')
dG.add_custom_layout(nx.spring_layout, name='nx.spring')
dG.add_custom_layout(nx.kamada_kawai_layout, name='nx.kamada_kawai')
dG.add_custom_layout(nx.spectral_layout, name='nx.spectral')
dG.add_custom_layout(nx.circular_layout, name='nx.circular')
# Configure some projections
pca = PCA(2, random_state=1)
tsne = TSNE(2, init='pca', random_state=1)
umap = UMAP(n_components=2, init=pca.fit_transform(X))
# Add projections as custom_layouts
dG.add_custom_layout(pca.fit_transform(X), name='PCA')
dG.add_custom_layout(tsne.fit_transform(X), name='TSNE')
dG.add_custom_layout(umap.fit_transform(X, y=None), name='UMAP')
dG.add_custom_layout(umap.fit_transform(X, y=target), name='Supervised UMAP')
# Visualize
dG.visualize(static=True, show=True) | PypiClean |
/openbb-3.2.2-py3-none-any.whl/openbb_terminal/cryptocurrency/defi/llama_view.py | __docformat__ = "numpy"
import logging
import os
from typing import Optional, Union
import pandas as pd
from openbb_terminal import OpenBBFigure, theme
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import read_data_file
from openbb_terminal.cryptocurrency.defi import llama_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_grouped_defi_protocols(
limit: int = 50,
export: str = "",
sheet_name: Optional[str] = None,
external_axes: bool = False,
) -> Union[OpenBBFigure, None]:
"""Plots top dApps (in terms of TVL) grouped by chain.
[Source: https://docs.llama.fi/api]
Parameters
----------
num: int
Number of top dApps to display
export : str
Export dataframe data to csv,json,xlsx file
external_axes : bool, optional
Whether to return the figure object or not, by default False
"""
df = llama_model.get_defi_protocols(limit, drop_chain=False)
df["TVL ($)"] = df["TVL ($)"].apply(lambda x: lambda_long_number_format(x))
chains = llama_model.get_grouped_defi_protocols(limit)
fig = OpenBBFigure(
xaxis_title="Total Value Locked ($)",
yaxis_title="Decentralized Application Name",
)
fig.set_title(f"Top {limit} dApp TVL grouped by chain")
colors = iter(theme.get_colors(reverse=True))
for chain in chains:
chain_filter = df.loc[df.Chain == chain]
fig.add_bar(
y=chain_filter.index,
x=chain_filter["TVL ($)"],
name=chain,
orientation="h",
marker_color=next(colors, "#B6A9CB"),
)
fig.update_layout(
margin=dict(l=150),
yaxis=dict(side="left", tickfont=dict(size=8)),
legend=dict(yanchor="bottom", y=0, xanchor="right", x=1),
)
fig.update_xaxes(tickvals=list(range(0, 40)), ticktext=list(range(0, 40)))
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"gdapps",
chains,
sheet_name,
fig,
)
return fig.show(external=external_axes)
@log_start_end(log=logger)
def display_defi_protocols(
sortby: str,
limit: int = 20,
ascend: bool = False,
description: bool = False,
export: str = "",
sheet_name: Optional[str] = None,
) -> None:
"""Prints table showing information about listed DeFi protocols, their current TVL and changes to it in
the last hour/day/week. [Source: https://docs.llama.fi/api]
Parameters
----------
sortby: str
Key by which to sort data
limit: int
Number of records to display
ascend: bool
Flag to sort data descending
description: bool
Flag to display description of protocol
export : str
Export dataframe data to csv,json,xlsx file
"""
df = llama_model.get_defi_protocols(limit, sortby, ascend, description)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
export=bool(export),
limit=limit,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ldapps",
df,
sheet_name,
)
@log_start_end(log=logger)
def display_historical_tvl(
dapps: str,
export: Optional[str] = "",
sheet_name: Optional[str] = None,
external_axes: bool = False,
) -> Union[OpenBBFigure, None]:
"""Plots historical TVL of different dApps
[Source: https://docs.llama.fi/api]
Parameters
----------
dapps: str
dApps to search historical TVL. Should be split by , e.g.: anchor,sushiswap,pancakeswap
export : str
Export dataframe data to csv,json,xlsx file
external_axes : bool, optional
Whether to return the figure object or not, by default False
"""
fig = OpenBBFigure(yaxis_title="Total Value Locked ($)")
fig.set_title("TVL in dApps")
available_protocols = read_data_file("defillama_dapps.json")
dapp: str = ""
if isinstance(available_protocols, dict):
for dapp in dapps.split(","):
if dapp in available_protocols:
df = llama_model.get_defi_protocol(dapp)
df = df.query("`totalLiquidityUSD` > 0")
df.index = pd.DatetimeIndex(df.index.strftime("%Y-%m-%d"))
if not df.empty:
fig.add_scatter(
x=df.index,
y=df["totalLiquidityUSD"].values,
name=available_protocols[dapp],
)
else:
console.print(f"{dapp} not found\n")
if export and export != "":
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"dtvl_{dapp}",
df,
sheet_name,
fig,
)
return None
return fig.show(external=external_axes)
return None
@log_start_end(log=logger)
def display_defi_tvl(
limit: int = 5,
export: Optional[str] = "",
sheet_name: Optional[str] = None,
external_axes: bool = False,
) -> Union[OpenBBFigure, None]:
"""Plots historical values of the total sum of TVLs from all listed protocols.
[Source: https://docs.llama.fi/api]
Parameters
----------
limit: int
Number of records to display, by default 5
export : str
Export dataframe data to csv,json,xlsx file
external_axes : bool, optional
Whether to return the figure object or not, by default False
"""
fig = OpenBBFigure(yaxis_title="Total Value Locked ($)")
fig.set_title("Total Value Locked in DeFi")
df = llama_model.get_defi_tvl()
df_data = df.copy()
df = df.tail(limit)
if export and export != "":
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"stvl",
df_data,
sheet_name,
fig,
)
return None
fig.add_scatter(x=df["date"], y=df["totalLiquidityUSD"], name="TVL")
return fig.show(external=external_axes) | PypiClean |
/mattermost_api_reference_client-4.0.0.post1.tar.gz/mattermost_api_reference_client-4.0.0.post1/mattermost_api_reference_client/models/bot.py | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="Bot")
@attr.s(auto_attribs=True)
class Bot:
"""A bot account
Attributes:
user_id (Union[Unset, str]): The user id of the associated user entry.
create_at (Union[Unset, int]): The time in milliseconds a bot was created
update_at (Union[Unset, int]): The time in milliseconds a bot was last updated
delete_at (Union[Unset, int]): The time in milliseconds a bot was deleted
username (Union[Unset, str]):
display_name (Union[Unset, str]):
description (Union[Unset, str]):
owner_id (Union[Unset, str]): The user id of the user that currently owns this bot.
"""
user_id: Union[Unset, str] = UNSET
create_at: Union[Unset, int] = UNSET
update_at: Union[Unset, int] = UNSET
delete_at: Union[Unset, int] = UNSET
username: Union[Unset, str] = UNSET
display_name: Union[Unset, str] = UNSET
description: Union[Unset, str] = UNSET
owner_id: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
user_id = self.user_id
create_at = self.create_at
update_at = self.update_at
delete_at = self.delete_at
username = self.username
display_name = self.display_name
description = self.description
owner_id = self.owner_id
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if user_id is not UNSET:
field_dict["user_id"] = user_id
if create_at is not UNSET:
field_dict["create_at"] = create_at
if update_at is not UNSET:
field_dict["update_at"] = update_at
if delete_at is not UNSET:
field_dict["delete_at"] = delete_at
if username is not UNSET:
field_dict["username"] = username
if display_name is not UNSET:
field_dict["display_name"] = display_name
if description is not UNSET:
field_dict["description"] = description
if owner_id is not UNSET:
field_dict["owner_id"] = owner_id
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
user_id = d.pop("user_id", UNSET)
create_at = d.pop("create_at", UNSET)
update_at = d.pop("update_at", UNSET)
delete_at = d.pop("delete_at", UNSET)
username = d.pop("username", UNSET)
display_name = d.pop("display_name", UNSET)
description = d.pop("description", UNSET)
owner_id = d.pop("owner_id", UNSET)
bot = cls(
user_id=user_id,
create_at=create_at,
update_at=update_at,
delete_at=delete_at,
username=username,
display_name=display_name,
description=description,
owner_id=owner_id,
)
bot.additional_properties = d
return bot
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties | PypiClean |
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/windows_updates/content_approval.py | from __future__ import annotations
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import compliance_change, deployable_content, deployment, deployment_settings
from . import compliance_change
class ContentApproval(compliance_change.ComplianceChange):
def __init__(self,) -> None:
"""
Instantiates a new ContentApproval and sets the default values.
"""
super().__init__()
self.odata_type = "#microsoft.graph.windowsUpdates.contentApproval"
# The content property
self._content: Optional[deployable_content.DeployableContent] = None
# Settings for governing how to deploy content.
self._deployment_settings: Optional[deployment_settings.DeploymentSettings] = None
# Deployments created as a result of applying the approval.
self._deployments: Optional[List[deployment.Deployment]] = None
@property
def content(self,) -> Optional[deployable_content.DeployableContent]:
"""
Gets the content property value. The content property
Returns: Optional[deployable_content.DeployableContent]
"""
return self._content
@content.setter
def content(self,value: Optional[deployable_content.DeployableContent] = None) -> None:
"""
Sets the content property value. The content property
Args:
value: Value to set for the content property.
"""
self._content = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> ContentApproval:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: ContentApproval
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return ContentApproval()
@property
def deployment_settings(self,) -> Optional[deployment_settings.DeploymentSettings]:
"""
Gets the deploymentSettings property value. Settings for governing how to deploy content.
Returns: Optional[deployment_settings.DeploymentSettings]
"""
return self._deployment_settings
@deployment_settings.setter
def deployment_settings(self,value: Optional[deployment_settings.DeploymentSettings] = None) -> None:
"""
Sets the deploymentSettings property value. Settings for governing how to deploy content.
Args:
value: Value to set for the deployment_settings property.
"""
self._deployment_settings = value
@property
def deployments(self,) -> Optional[List[deployment.Deployment]]:
"""
Gets the deployments property value. Deployments created as a result of applying the approval.
Returns: Optional[List[deployment.Deployment]]
"""
return self._deployments
@deployments.setter
def deployments(self,value: Optional[List[deployment.Deployment]] = None) -> None:
"""
Sets the deployments property value. Deployments created as a result of applying the approval.
Args:
value: Value to set for the deployments property.
"""
self._deployments = value
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from . import compliance_change, deployable_content, deployment, deployment_settings
fields: Dict[str, Callable[[Any], None]] = {
"content": lambda n : setattr(self, 'content', n.get_object_value(deployable_content.DeployableContent)),
"deployments": lambda n : setattr(self, 'deployments', n.get_collection_of_object_values(deployment.Deployment)),
"deploymentSettings": lambda n : setattr(self, 'deployment_settings', n.get_object_value(deployment_settings.DeploymentSettings)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
super().serialize(writer)
writer.write_object_value("content", self.content)
writer.write_collection_of_object_values("deployments", self.deployments)
writer.write_object_value("deploymentSettings", self.deployment_settings) | PypiClean |
/scf-0.2.8.tar.gz/scf-0.2.8/tcfcli/cmds/local/libs/apigw/api_service.py |
import os
import logging
from tcfcli.libs.apis.provider import ApiProvider
from tcfcli.cmds.local.libs.apigw.local_service import LocalService, Route
from tcfcli.common.user_exceptions import NoApiDefinition
logger = logging.getLogger(__name__)
class LocalApiService(object):
_DEFAULT_PORT = 3000
_DEFAULT_HOST = '127.0.0.1'
def __init__(self, invoke_context, port=None, host=None, static_dir=None):
self._invoke_context = invoke_context
self._stderr = invoke_context.stderr
self._port = port or self._DEFAULT_PORT
self._host = host or self._DEFAULT_HOST
self._static_dir = static_dir
self._local_runtime_manager = invoke_context.local_runtime_manager
self._api_provider = ApiProvider(invoke_context.template)
def start(self):
routes_list = self._get_routes()
if not routes_list:
raise NoApiDefinition('There is no api definition in template')
static_dir_path = self._get_static_dir_path()
svc = LocalService(routes_list=routes_list,
runtime_manager=self._local_runtime_manager,
static_dir=static_dir_path,
port=self._port,
host=self._host,
stderr=self._stderr)
self._show_routes(routes_list, port=self._port, host=self._host)
logger.info(
'Mounting finsh. You can browse to the above endpoints to invoke functions. Support modify function code online, only need to restart scf when the template changed.')
svc.listen()
def _get_routes(self):
routes = []
for api in self._api_provider.get_all():
route = Route(method=[api.method], path=api.path, func_name=api.func_name)
routes.append(route)
return routes
def _show_routes(self, routes_list, port, host):
for route in routes_list:
if route.method[0] == 'ANY':
logger.info(
'Mounting {} at http://{}:{}{} {}'.format(route.func_name, host, port, route.path, route.method))
def _get_static_dir_path(self):
if not self._static_dir:
return None
cwd = self._invoke_context.get_cwd()
static_dir_path = os.path.join(cwd, self._static_dir)
if os.path.exists(static_dir_path):
return staticmethod
return None | PypiClean |
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/adapters/ssl.py | import requests
from requests.adapters import HTTPAdapter
from .._compat import poolmanager
class SSLAdapter(HTTPAdapter):
"""
A HTTPS Adapter for Python Requests that allows the choice of the SSL/TLS
version negotiated by Requests. This can be used either to enforce the
choice of high-security TLS versions (where supported), or to work around
misbehaving servers that fail to correctly negotiate the default TLS
version being offered.
Example usage:
>>> import requests
>>> import ssl
>>> from requests_toolbelt import SSLAdapter
>>> s = requests.Session()
>>> s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
You can replace the chosen protocol with any that are available in the
default Python SSL module. All subsequent requests that match the adapter
prefix will use the chosen SSL version instead of the default.
This adapter will also attempt to change the SSL/TLS version negotiated by
Requests when using a proxy. However, this may not always be possible:
prior to Requests v2.4.0 the adapter did not have access to the proxy setup
code. In earlier versions of Requests, this adapter will not function
properly when used with proxies.
"""
__attrs__ = HTTPAdapter.__attrs__ + ['ssl_version']
def __init__(self, ssl_version=None, **kwargs):
self.ssl_version = ssl_version
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=self.ssl_version)
if requests.__build__ >= 0x020400:
# Earlier versions of requests either don't have this method or, worse,
# don't allow passing arbitrary keyword arguments. As a result, only
# conditionally define this method.
def proxy_manager_for(self, *args, **kwargs):
kwargs['ssl_version'] = self.ssl_version
return super(SSLAdapter, self).proxy_manager_for(*args, **kwargs) | PypiClean |
/laion_clap-1.1.4-py3-none-any.whl/laion_clap/evaluate/eval_linear_probe.py | import logging
import os
import random
from datetime import datetime
import copy
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.cuda.amp import GradScaler
import glob
try:
import wandb
except ImportError:
wandb = None
try:
import torch.utils.tensorboard as tensorboard
except ImportError:
tensorboard = None
try:
import horovod.torch as hvd
except ImportError:
hvd = None
from clap_module import create_model_and_transforms, trace_model, create_model
from training.data import get_data
from training.params import parse_args
from training.distributed import is_master, init_distributed_device, world_info_from_env
from training.logger import setup_logging
from training.scheduler import cosine_lr
from training.lp_main import config_lp_optimizer
from training.lp_train import train_one_epoch, evaluate
from clap_module.utils import get_tar_path_from_dataset_name, dataset_split
from clap_module.utils import load_p, load_class_label
from clap_module.linear_probe import LinearProbe
def maintain_ckpts(args, startidx, all_idx_len):
for i in reversed(range(startidx, all_idx_len)):
if os.path.exists(os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt")):
os.rename(
os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"),
os.path.join(args.checkpoint_path, f"epoch_top_{i+1}.pt"),
)
if os.path.exists(
os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt")
):
os.remove(os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt"))
return
def update_top_k_performance(
new_metrics_inputs, current_top_k_ckpt_metrics, args, ckpt, bignumbetter=True, pretrain_epoch=0
):
"""
Record the top-k performance of the current epoch.
current_top_k_metrics is a dictionary of the form: {1: top_1_ckpt_measure, 2: top_2_ckpt_measure, ...}
"""
if isinstance(new_metrics_inputs, (list, tuple)):
new_metrics_inputs = np.mean(new_metrics_inputs)
return update_top_k_performance(
new_metrics_inputs,
current_top_k_ckpt_metrics,
args=args,
ckpt=ckpt,
bignumbetter=bignumbetter,
pretrain_epoch=pretrain_epoch
)
elif isinstance(new_metrics_inputs, dict):
new_metrics_inputs = np.mean(list(new_metrics_inputs.values()))
return update_top_k_performance(
new_metrics_inputs,
current_top_k_ckpt_metrics,
args=args,
ckpt=ckpt,
bignumbetter=bignumbetter,
pretrain_epoch=pretrain_epoch
)
elif isinstance(new_metrics_inputs, (float, int)):
update_flag = {k: False for k in current_top_k_ckpt_metrics.keys()}
sorted_keys = sorted(current_top_k_ckpt_metrics.keys())
sorted_values = sorted(
current_top_k_ckpt_metrics.values(), reverse=bignumbetter
)
sorted_values_ = copy.deepcopy(sorted_values)
sorted_values.append(new_metrics_inputs)
sorted_values = sorted(sorted_values, reverse=bignumbetter)
sorted_values = sorted_values[:-1]
if sorted_values == sorted_values_:
return current_top_k_ckpt_metrics, new_metrics_inputs
else:
for i in range(len(sorted_keys)):
if current_top_k_ckpt_metrics[sorted_keys[i]] != sorted_values[i]:
current_top_k_ckpt_metrics[sorted_keys[i]] = sorted_values[i]
update_flag[sorted_keys[i]] = True
for i in range(len(update_flag)):
if update_flag[i]:
maintain_ckpts(args, i, len(sorted_keys))
torch.save(
ckpt,
os.path.join(args.checkpoint_path, f"pretrain_epoch_{pretrain_epoch}_lp_epoch_top_{i}.pt"),
)
break
return current_top_k_ckpt_metrics, new_metrics_inputs
# def updateifNone(a, b):
# a = b if None else a
# return a
def is_pretrained_params(n):
return (
n.startswith("clap_model.transformer")
or n in ["clap_model.positional_embedding", "clap_model.text_projection"]
or n.startswith("clap_model.token_embedding")
or n.startswith("clap_model.ln_final")
or n.startswith("clap_model.logit_scale_t")
)
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
def main():
args = parse_args()
# sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
args.amodel = args.amodel.replace("/", "-")
pretrained_ckpts = sorted(glob.glob(os.path.join(args.pretrained, "*.pt")), key=os.path.getmtime)
if args.name is None:
args.name = "-".join(
[
datetime.now().strftime("%Y_%m_%d-%H_%M_%S"),
f"linear_probe"
f"model_{args.amodel}",
f"lr_{args.lr}",
f"b_{args.batch_size}",
f"j_{args.workers}",
f"p_{args.precision}",
]
)
# discover initial world args early so we can log properly
args.distributed = False
args.local_rank, args.rank, args.world_size = world_info_from_env()
if args.remotedata and is_master(args):
for dataset_name in args.datasetnames:
for split in dataset_split[dataset_name]:
if not os.path.exists(f"./json_files/{dataset_name}/{split}"):
os.makedirs(f"./json_files/{dataset_name}/{split}")
os.system(
f"aws s3 cp s3://s-laion-audio/webdataset_tar/{dataset_name}/{split}/sizes.json ./json_files/{dataset_name}/{split}/sizes.json"
)
args.log_path = None
if is_master(args, local=args.log_local):
log_base_path = os.path.join(args.logs, args.name)
os.makedirs(log_base_path, exist_ok=True)
log_filename = f"out-{args.rank}" if args.log_local else "out.log"
args.log_path = os.path.join(log_base_path, log_filename)
# avoid log dir in same name:
postfix = 0
while os.path.exists(args.log_path):
postfix += 1
log_base_path_new = log_base_path+'-'+str(postfix)
os.makedirs(log_base_path_new, exist_ok=True)
log_filename = f"out-{args.rank}" if args.log_local else "out.log"
args.log_path = os.path.join(log_base_path_new, log_filename)
# print(
# "Error. Experiment already exists. Use --name {} to specify a new experiment."
# )
# return -1
# Set logger
args.log_level = logging.DEBUG if args.debug else logging.INFO
setup_logging(args.log_path, args.log_level)
# fully initialize distributed device environment
device = init_distributed_device(args)
args.wandb = "wandb" in args.report_to or "all" in args.report_to
args.tensorboard = "tensorboard" in args.report_to or "all" in args.report_to
if is_master(args):
args.tensorboard_path = (
os.path.join(args.logs, args.name, "tensorboard")
if args.tensorboard
else ""
)
args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints")
for dirname in [args.tensorboard_path, args.checkpoint_path]:
if dirname:
os.makedirs(dirname, exist_ok=True)
else:
args.tensorboard_path = ""
args.checkpoint_path = ""
if args.copy_codebase:
copy_codebase(args)
assert args.precision in ["amp", "fp16", "fp32"]
if args.precision == "fp16":
logging.warning(
"It is recommended to use AMP mixed-precision instead of FP16. "
"FP16 support needs further verification and tuning, especially for train."
)
if args.horovod:
logging.info(
f"Running in horovod mode with multiple processes / nodes. Device: {args.device}."
f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
)
elif args.distributed:
logging.info(
f"Running in distributed mode with multiple processes. Device: {args.device}."
f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
)
else:
logging.info(f"Running with a single process. Device {args.device}.")
logging.info(f'openai cache dir: {os.path.expanduser(args.openai_model_cache_dir)}')
# determine if this worker should save logs and checkpoints. only do so if it is rank == 0
args.save_logs = args.logs and args.logs.lower() != "none" and is_master(args)
writer = None
if args.save_logs and args.tensorboard:
assert tensorboard is not None, "Please install tensorboard."
writer = tensorboard.SummaryWriter(args.tensorboard_path)
if args.wandb and is_master(args):
assert wandb is not None, "Please install wandb."
logging.debug("Starting wandb.")
# you will have to configure this for your project!
wandb.init(
project="clap",
notes=args.wandb_notes,
name=args.wandb_notes,
tags=[],
config=vars(args),
)
logging.debug("Finished loading wandb.")
for idx, f in enumerate(pretrained_ckpts):
logging.info(f"pretrained on {f}")
args.pretrained = f
ckpt = torch.load(f, map_location='cpu')
pretrain_epoch = 0
if 'epoch' in ckpt:
pretrain_epoch = ckpt['epoch']
# train
best_metrics = lp_main(args, device, writer, pretrain_epoch, idx)
if args.wandb and is_master(args):
assert wandb is not None, "Please install wandb."
for name, val in best_metrics.items():
wandb.log({f"val/summary/{name}": val, "epoch": pretrain_epoch})
if args.wandb and is_master(args):
wandb.finish()
def update_metric(best_metric, new_metric):
for key in new_metric:
if key not in best_metric:
best_metric[key] = new_metric[key]
else:
best_metric[key] = max(best_metric[key], new_metric[key])
return best_metric
def lp_main(args, device, writer, pretrain_epoch, idx):
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
args.class_index_dict = load_class_label(args.class_label_path)
# Create CLAP model
clap_model, clap_model_cfg = create_model(
args.amodel,
args.tmodel,
args.pretrained,
precision=args.precision,
device=device,
jit=args.torchscript,
force_quick_gelu=args.force_quick_gelu,
openai_model_cache_dir=os.path.expanduser(args.openai_model_cache_dir),
skip_params=False,
enable_fusion=args.enable_fusion,
fusion_type=args.fusion_type
)
args.lp_out_ch = len(list(args.class_index_dict.keys()))
# Linear Probe
if idx == 0:
logging.info(f"linear probe using mlp: {args.lp_mlp}")
logging.info(f"linear probe using freeze: {args.lp_freeze}")
logging.info(f"linear probe act layer: {args.lp_act}")
logging.info(f"linear probe out ch: {args.lp_out_ch}")
logging.info(f"linear probe learning rate (if applicable): {args.lp_lr}")
logging.info(f"linear probe loss func: {args.lp_loss}")
logging.info(f"linear probe lp_metrics: {args.lp_metrics}")
model = LinearProbe(
clap_model,
mlp=args.lp_mlp, freeze=args.lp_freeze,
in_ch=512, out_ch=args.lp_out_ch,
act=args.lp_act
) # in_ch is fixed (i.e., 512)
model = model.to(device)
if args.horovod:
with torch.no_grad():
for param in model.parameters():
param.set_(param.contiguous())
if args.trace:
model = trace_model(model, batch_size=args.batch_size, device=device)
if is_master(args) and idx == 0:
logging.info("Linear Probe CLAP Model:")
logging.info(f"{str(clap_model)}")
logging.info("Params:")
params_file = os.path.join(args.logs, args.name, "params.txt")
with open(params_file, "w") as f:
for name in sorted(vars(args)):
val = getattr(args, name)
logging.info(f" {name}: {val}")
f.write(f"{name}: {val}\n")
if args.distributed and not args.horovod:
if args.use_bn_sync:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
ddp_args = {}
if args.ddp_static_graph:
# this doesn't exist in older PyTorch, arg only added if enabled
ddp_args["static_graph"] = True
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[device], find_unused_parameters=True, **ddp_args
)
data = get_data(args, clap_model_cfg)
assert len(data), "At least one train or eval dataset must be specified."
if args.trace:
assert "train" not in data, "Cannot train with traced model"
optimizer, scheduler, text_freeze_parameters = config_lp_optimizer(model, data, args)
scaler = GradScaler() if args.precision == "amp" else None
# optionally resume from a checkpoint
start_epoch = 0
if args.resume is not None:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume, map_location=device)
if "epoch" in checkpoint:
# resuming a train checkpoint w/ epoch and optimizer state
start_epoch = checkpoint["epoch"]
sd = checkpoint["state_dict"]
if not args.distributed and next(iter(sd.items()))[0].startswith(
"module"
):
sd = {k[len("module.") :]: v for k, v in sd.items()}
model.load_state_dict(sd)
if args.split_opt:
if optimizer is not None:
for k, o_ in optimizer.items():
o_.load_state_dict(checkpoint[k + "_" + "optimizer"])
if optimizer is not None:
optimizer.load_state_dict(checkpoint["optimizer"])
if scaler is not None and "scaler" in checkpoint:
scaler.load_state_dict(checkpoint["scaler"])
logging.info(
f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})"
)
else:
# loading a bare (model only) checkpoint for fine-tune or evaluation
model.load_state_dict(checkpoint)
logging.info(
f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})"
)
if args.freeze_text:
print("Freeze Text!!!!")
for k in text_freeze_parameters:
k.requires_grad = False
else:
logging.info("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
cudnn.deterministic = False
if args.wandb and is_master(args):
args.train_sz = data["train"].dataloader.num_samples
if args.val_data is not None:
args.val_sz = data["val"].dataloader.num_samples
if args.debug:
wandb.watch(model, log="all")
if idx == 0:
wandb.save(params_file)
best_metrics = {}
if "train" not in data:
metric = evaluate(model, data, start_epoch, args, writer, extra_suffix="_pe@" + str(pretrain_epoch))
if is_master(args):
best_metrics = update_metric(best_metrics, metric)
return
elif start_epoch == 0 and "val" in data and not args.no_eval:
metric = evaluate(model, data, 0, args, writer, extra_suffix="_pe@" + str(pretrain_epoch))
if is_master(args):
best_metrics = update_metric(best_metrics, metric)
if args.save_top_performance:
current_top_k_ckpt_metrics = {
i: 0 for i in range(args.save_top_performance)
} # initialize the top-k metric for ckpts to 0
for epoch in range(start_epoch, args.epochs):
# freeze the text param after (include) args.freeze_text_after, this is -1 by default
if epoch == args.freeze_text_after:
print("Text pretrained parameters are freezed since this epoch.")
for k in text_freeze_parameters:
k.requires_grad = False
if is_master(args):
logging.info(f"Start epoch {epoch}")
train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer, extra_suffix="_pe@" + str(pretrain_epoch))
completed_epoch = epoch + 1
if any(v in data for v in ("val", "imagenet-val", "imagenet-v2")) and not args.no_eval:
metric = evaluate(model, data, completed_epoch, args, writer, extra_suffix="_pe@" + str(pretrain_epoch))
if is_master(args):
best_metrics = update_metric(best_metrics, metric)
if args.save_top_performance:
top_k_dataset = args.top_k_checkpoint_select_dataset
top_k_metric = args.top_k_checkpoint_select_metric
filtered_metrics = [
v
for k, v in metric.items()
if top_k_metric in k and top_k_dataset in k
] # check all R@10 metrics (all dataset) and use it to update the ckpt
# Saving checkpoints.
if args.save_logs:
opt_dict = {
k + "_" + "optimizer": v.state_dict() for k, v in optimizer.items()
}
checkpoint_dict = {
"epoch": completed_epoch,
"pretrain_epoch": pretrain_epoch,
"name": args.name,
"state_dict": model.state_dict(),
}
checkpoint_dict.update(opt_dict)
if scaler is not None:
checkpoint_dict["scaler"] = scaler.state_dict()
if completed_epoch == args.epochs or (
args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0
):
torch.save(
checkpoint_dict,
os.path.join(args.checkpoint_path, f"pretrain_epoch_{pretrain_epoch}_lp_epoch_{completed_epoch}.pt"),
)
if args.save_most_recent:
torch.save(
checkpoint_dict,
os.path.join(args.checkpoint_path, f"pretrain_epoch_{pretrain_epoch}_lp_epoch_latest.pt"),
)
if args.save_top_performance and not args.no_eval:
update_top_k_performance(
filtered_metrics,
current_top_k_ckpt_metrics,
args,
checkpoint_dict,
bignumbetter=True,
pretrain_epoch=pretrain_epoch
)
del clap_model
return best_metrics
def copy_codebase(args):
from shutil import copytree, ignore_patterns
new_code_path = os.path.join(args.logs, args.name, "code")
if os.path.exists(new_code_path):
print(
f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
)
return -1
print(f"Copying codebase to {new_code_path}")
current_code_path = os.path.realpath(__file__)
for _ in range(3):
current_code_path = os.path.dirname(current_code_path)
copytree(
current_code_path, new_code_path, ignore=ignore_patterns("log", "logs", "wandb")
)
print("Done copying code.")
return 1
if __name__ == "__main__":
main() | PypiClean |
/pyglet_desper-0.9.0-py3-none-any.whl/pyglet_desper/model.py | import json
import os.path as pt
from typing import Union, Optional, Callable
import desper
import pyglet
from pyglet.graphics import Group
from pyglet.image import Animation, AnimationFrame, AbstractImage
from pyglet.media.codecs import MediaDecoder
from pyglet.image.codecs import ImageDecoder
from pyglet.image.atlas import TextureBin
from pyglet_desper.logic import CameraProcessor, Camera
default_texture_bin = pyglet.image.atlas.TextureBin()
"""Default texture atlas for :class:`ImageFileHandle`.
All images loaded with said handle class will by default be added
to an atlas in this bin, which will result in optimized batching
and hance rendering.
Before loading any images, it is possible to modify the bin's
:attr:`TextureBin.texture_width` and :attr:`TextureBin.texture_height`
in order to alter the size of generated atlases (defaults to 2048x2048).
Replacing the bin entirely with a new instance will sort no effect.
Specify a bin manually as parameter for :class:`ImageFileHandle`
in that case.
"""
_image_cache: dict[str, pyglet.image.AbstractImage] = {}
"""Cache for internal use.
Map absolute filenames to pyglet images. Mainly populated by
:class:`ImageFileHandle` to prevent reloading the same image multiple
times.
"""
GRAPHIC_BASE_CLASSES = (pyglet.sprite.Sprite,
pyglet.text.layout.TextLayout)
# pyglet.shapes.ShapeBase is currently excluded as it does not support
# batch and group
# Default populator
MEDIA_DIRECTORY = 'media'
MEDIA_STREAMING_DIRECTORY = pt.join('media', 'streaming')
FONT_DIRECTORY = 'font'
IMAGE_DIRECTORY = 'image'
WORLD_DIRECTORY = 'world'
def clear_image_cache():
"""Clear module level image cache.
Texture bins/atlases (e.g. :attr:`default_texture_bin`) will not
get cleared. Based on the user's implementation manual
intervention might be necessary.
"""
_image_cache.clear()
class MediaFileHandle(desper.Handle[pyglet.media.Source]):
"""Specialized handle for pyglet's :class:`pyglet.media.Source`.
Given a filename (path string), the :meth:`load` implementation
tries to load given file as a :class:`pyglet.media.Source`
object, i.e. an audio or video resource.
Optionally, the source can be set to be streamed from disk
through the ``streaming`` parameter (defaults to: not streamed).
A decoder can be specified. Available
decoders can be inspected through
:func:`pyglet.media.codecs.get_decoders`.
If not specified, the first available codec that supports the given
file format will be used.
"""
def __init__(self, filename: str, streaming=False,
decoder: MediaDecoder = None):
self.filename = filename
self.streaming = streaming
self.decoder = decoder
def load(self) -> pyglet.media.Source:
"""Load file with given parameters."""
return pyglet.media.load(self.filename, streaming=self.streaming,
decoder=self.decoder)
class ImageFileHandle(desper.Handle[pyglet.image.AbstractImage]):
"""Specialized handle for :class:`pyglet.image.AbstractImage`.
Given a filename (path string), the :meth:`load` implementation
tries to load given file as a :class:`pyglet.image.AbstractImage`
object.
By default images are cached and loaded into atlases
(:class:`pyglet.image.atlas.TextureAtlas`). This behaviour can be
altered through ``atlas``, ``border`` and ``texture_bin``
parameters.
Note that such atlas related parameters are ignored if the image
is found in the local cache, as the cached value will be
directly returned independently from the given parameters.
A decoder can be specified. Available
decoders can be inspected through
:func:`pyglet.image.codecs.get_decoders`.
If not specified, the first available codec that supports the given
file format will be used.
"""
def __init__(self, filename: str,
atlas=True, border: int = 1,
texture_bin: TextureBin = default_texture_bin,
decoder: ImageDecoder = None):
self.filename = filename
self.atlas = atlas
self.border = border
self.texture_bin = texture_bin
self.decoder = decoder
def load(self) -> pyglet.image.AbstractImage:
"""Load file with given parameters."""
abs_filename = pt.abspath(self.filename)
if abs_filename in _image_cache:
return _image_cache[abs_filename]
image = pyglet.image.load(abs_filename, decoder=self.decoder)
if (self.atlas
and image.width + self.border <= self.texture_bin.texture_width
and image.height + self.border
<= self.texture_bin.texture_height):
image = self.texture_bin.add(image)
_image_cache[abs_filename] = image
return image
def parse_spritesheet(sheet: pyglet.image.AbstractImage,
metadata: dict) -> Union[AbstractImage, Animation]:
"""Setup image or animation from a source image and a dictionary.
The dictionary must be in the following format:::
{
"frames": [
{
"frame": {"x": ..., "y": ..., "w": ..., "h": ...},
"duration": ...
},
...
],
"meta": {
"origin": {"x": ..., "y": ...}
}
}
Durations are in milliseconds.
All fields are optional. In particular, here is how the decoder
reacts to missing values:
- If ``frames`` list is present and contains more than one frame,
a :class:`pyglet.image.Animation` is built. Otherwise, a single
:class:`pyglet.image.AbstractImage` is returned. If the
``frames`` list is missing or empty, the same input ``sheet`` is
returned (eventually its origin will be changed).
- ``origin`` is used to set the origin (i.e. ``anchor_x``
and ``anchor_y``) of all animation frames.
The user is then encouraged to have an animation where all
frames have the same size (or deal with the consequences).
if and only if the ``frames`` list is missing or empty, the
origin is set directly to the input image ``sheet``, which is
then returned.
- ``x`` and ``y`` coordinates (for frames, or for origin) are
assumed to be ``0`` if unspecified.
- ``w`` and ``h`` coordinates are assumed to be respectively equal
to :attr:`sheet.width` and :attr:`sheet.height` if unspecified.
- ``duration`` values are set to one second (``1000`` ms) if
unspecified.
Be aware that according to ``pyglet``'s coordinate system, the
origin of an image is considered to be the bottom-left corner (
as opposed to common top-left based systems).
The format is compatible with `Aseprite <https://aseprite.com/>`_'s
export spritesheet option (in the output tab, json data must be
enabled and set to ``array`` type, not ``hash``). Keep in mind that
Aseprite uses a top-left origin in its format. The format is
enriched with various other properties which are ignored by this
function.
"""
# Extract origin
meta = metadata.get('meta', {})
origin = meta.get('origin', {})
origin_x = origin.get('x', 0)
origin_y = origin.get('y', 0)
frames: list[dict] = metadata.get('frames', [])
# Empty list of frames, fallback to the input sheet
if not frames:
sheet.anchor_x = origin_x
sheet.anchor_y = origin_y
return sheet
# Otherwise, start building frames
regions = []
durations = []
for frame in frames:
region = frame.get('frame', {})
region_x = region.get('x', 0)
region_y = region.get('y', 0)
region_w = region.get('w', sheet.width)
region_h = region.get('h', sheet.height)
image_region = sheet.get_region(region_x, region_y, region_w,
region_h)
image_region.anchor_x = origin_x
image_region.anchor_y = origin_y
regions.append(image_region)
durations.append(frame.get('duration', 1000))
# If single frame, return the region itself
if len(regions) == 1:
return regions[0]
# Finally, assemble frames and return animation
return Animation([AnimationFrame(region, duration)
for region, duration in zip(regions, durations)])
def load_spritesheet(filename: str) -> Union[AbstractImage, Animation]:
"""Load an animation or image from a metadata file.
The file must be a json in the following format:::
{
"frames": [
{
"frame": {"x": ..., "y": ..., "w": ..., "h": ...},
"duration": ...
},
...
],
"meta": {
"origin": {"x": ..., "y": ...},
"image": "path_to_spritesheet.png"
}
}
The only mandatory field is ``image`` (hence ``meta``, as it
contains it), which shall contain the path to the actual referenced
image file of the spritesheet.
To further inspect the meaning of other fields, see
:func:`parse_spritesheet`, which is internally used.
"""
with open(filename) as file:
metadata = json.load(file)
meta = metadata.get('meta', {})
image_filename = pt.join(pt.dirname(filename), meta['image'])
return parse_spritesheet(ImageFileHandle(image_filename).load(), metadata)
class RichImageFileHandle(desper.Handle[Union[Animation, AbstractImage]]):
"""Specialized handle for image and animation formats.
Given a filename (path string), the :meth:`load` implementation
tries to load given file, in order, as one of the following:
- As a spritesheet animation/image (see
:func:`load_spritesheet` and :func:`parse_spritesheet`)
- As a :class:`pyglet.image.Animation` (for the supported formats
see :class:`pyglet.image.codecs.get_animation_decoders`)
- As a :class:`pyglet.image.AbstractImage` (same behaviour of
:class:`ImageFileHandle`).
"""
def __init__(self, filename: str):
self.filename = filename
def load(self) -> Union[Animation, AbstractImage]:
"""Load designated file.
Try loading it as a spritesheet (see
:func:`load_spritesheet` and :func:`parse_spritesheet`).
If not a spritesheet, try loading it as a
:class:`pyglet.image.Animation`. If not an animation,
load it as a standard image (same behaviour as
:class:`ImageFileHandle`)
"""
# Try decoding it as json metadata
try:
return load_spritesheet(self.filename)
except (json.JSONDecodeError, UnicodeDecodeError):
pass
# Try decoding it as animation
try:
return pyglet.image.load_animation(self.filename)
# Since pyglet decoders do not reliably raise DecodeExceptions,
# a generic catch is necessary. DecodeException is left as
# reminder.
except (Exception, pyglet.util.DecodeException):
pass
# Otherwise, it is likely an image
return ImageFileHandle(self.filename).load()
class FontFileHandle(desper.Handle[None]):
"""Specialized handle for font loading.
This is a thin wrapper over font files. No resource is
actually returned by calling the handle, the font data is simply
loaded into memory and can then be used with pyglet text
classes by specifying its family name (see pyglet's docs on
`Loading custom fonts <https://bit.ly/3gPjJnD>`_).
"""
def __init__(self, filename: str):
self.filename = filename
def load(self) -> None:
"""Add file as font."""
pyglet.font.add_file(self.filename)
def default_processors_transformer(world_handle: desper.WorldHandle,
world: desper.World):
"""World transformer, use with :class:`WorldHandle`.
Populate ``world`` with default pyglet based processors, i.e.:
- :class:`pyglet-desper.CameraProcessor`
Note that despite the similarity, this does not substitute
desper's :func:`desper.default_processors_transformer`, as it
simply adds a different set of processors. In a typical scenario,
both desper's original transformer and this one shall be used.
"""
world.add_processor(CameraProcessor())
def retrieve_batch(world: Optional[desper.World] = None
) -> pyglet.graphics.Batch:
"""Retrieve a batch from the given world.
A batch will be found by either:
- directly querying for a batch via
``world.get(pyglet.graphics.Batch)``
- querying for an existing camera via
``world.get(pyglet_desper.Camera)``
If no batch or camera can be found, a new
:class:`pyglet.graphics.Batch` is constructed and added to the
``world`` (creating an entity with just the batch as component),
and then returned.
No camera is created by default. This batch can be queried later by
the user and included in a camera if desired.
If omitted ``world`` will default to
:attr:`desper.default_loop.current_world`.
"""
world = world or desper.default_loop.current_world
assert world is not None, ('Could not find current world, '
'desper.default_loop is uninitialized or is '
'not being used. Pass a proper World instance '
'as parameter.')
batch_query = world.get(pyglet.graphics.Batch)
if batch_query:
return batch_query[0][1]
camera_query = world.get(Camera)
if camera_query:
return camera_query[0][1].batch
# Otherwise, create a new batch
batch = pyglet.graphics.Batch()
world.create_entity(batch)
return batch
class WantsGroupBatch:
"""Simple component used to mark graphical components.
Does nothing on its own, but is used by
:func:`init_graphics_transformer` to identify which components to
act on (see its docstring for more info). A Graphical component will
be populated with a :class:`pyglet.graphics.Batch` and a
:class:`pyglet.graphics.Group`, based on the given ``order``.
Designed mostly to be used by export/import scripts for
:class:`desper.World` instances.
"""
def __init__(self, order: int = 0,
group_factory: Callable[[...], Group] = Group):
self.order = order
self.group_factory = group_factory
def build_group(self) -> Group:
return self.group_factory(self.order)
def init_graphics_transformer(world_handle: desper.WorldHandle,
world: desper.World):
"""World transformer, use with :class:`WorldHandle`.
Designed to be placed after a
:class:`desper.WorldFromFileTransformer`, in order to correctly
finalize graphical components in a world created from file.
In particular, all entities that have both a graphical component
(from pyglet) and a :class:`WantsGroupBatch` will retrieve a
:class:`pyglet.graphics.Batch` using
:func:`retrieve_batch`. This batch can be queried later by
the user and included in a camera if desired. This also means that
all found graphical components will be assigned to the same batch
(usually the best option anyway).
The :class:`WantsGroupBatch` class is also used to build a
:class:`pyglet.graphics.Group`. Note that this approach is mainly
there in order to correctly initialize graphics in worlds loaded
from files. Standard approach would be creating pyglet components
directly, assigning the desired group and batch (eventually
retrieving it with :func:`retrieve_batch`).
Graphical components are discovered by querying the following
class hierarchies:
- :class:`pyglet.text.layout.TextLayout`, base class for all text
related classes
- :class:`pyglet.sprite.Sprite`, base class for all sprites
Shapes are not evaluated (shall be manually managed by the user)
since pyglet shapes do not currently support properties
``group`` and ``batch``.
"""
for graphics_type in GRAPHIC_BASE_CLASSES:
for entity, graphics in world.get(graphics_type):
# If WantsGroupBatch is found, it means that the
# associated pyglet component needs a batch and a group
wants = world.get_component(entity, WantsGroupBatch)
if wants is not None:
graphics.group = wants.build_group()
graphics.batch = retrieve_batch(world)
# Cleanup unneeded components
for entity, _ in world.get(WantsGroupBatch):
world.remove_component(entity, WantsGroupBatch)
def world_from_file_handle(filename: str) -> desper.WorldFromFileHandle:
"""Construct a world handle for pyglet based worlds.
All the transformers present in :class:`desper.WorldFromFileHandle`
are kept, but pyglet specific ones are added. In particular:
- :func:`default_processors_transformer`, for pyglet specific
:class:`desper.Processor`s
- :func:`init_graphics_transformer`, for the correct initialization
of pyglet components instantiated from file
"""
handle = desper.WorldFromFileHandle(filename)
handle.transform_functions.appendleft(default_processors_transformer)
handle.transform_functions.append(init_graphics_transformer)
return handle
resource_populator = desper.DirectoryResourcePopulator()
"""Default directory resource populator.
Enables populating a :class:`desper.ResourceMap` from a directory
tree having the following structure:::
resources
├── media
│ └── streaming
├── font
├── image
└── world
The used :class:`Handle` factories are:
- :class:`MediaFileHandle` for media resources
- :class:`FontFileHandle` for font resources
- :class:`RichImageFileHandle` for image and animation resources
- :class:`world_from_file_handle` for world resources
"""
resource_populator.add_rule(MEDIA_DIRECTORY, MediaFileHandle)
resource_populator.add_rule(MEDIA_STREAMING_DIRECTORY, MediaFileHandle,
streaming=True)
resource_populator.add_rule(FONT_DIRECTORY, FontFileHandle)
resource_populator.add_rule(IMAGE_DIRECTORY, RichImageFileHandle)
resource_populator.add_rule(WORLD_DIRECTORY, world_from_file_handle) | PypiClean |
/moser_pylint-2.7.2-py3-none-any.whl/pylint/reporters/ureports/text_writer.py |
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Text formatting drivers for ureports"""
from pylint.reporters.ureports import BaseWriter
TITLE_UNDERLINES = ["", "=", "-", "`", ".", "~", "^"]
BULLETS = ["*", "-"]
class TextWriter(BaseWriter):
"""format layouts as text
(ReStructured inspiration but not totally handled yet)
"""
def begin_format(self):
super().begin_format()
self.list_level = 0
def visit_section(self, layout):
"""display a section as text
"""
self.section += 1
self.writeln()
self.format_children(layout)
self.section -= 1
self.writeln()
def visit_evaluationsection(self, layout):
"""Display an evaluation section as a text."""
self.section += 1
self.format_children(layout)
self.section -= 1
self.writeln()
def visit_title(self, layout):
title = "".join(list(self.compute_content(layout)))
self.writeln(title)
try:
self.writeln(TITLE_UNDERLINES[self.section] * len(title))
except IndexError:
print("FIXME TITLE TOO DEEP. TURNING TITLE INTO TEXT")
def visit_paragraph(self, layout):
"""enter a paragraph"""
self.format_children(layout)
self.writeln()
def visit_table(self, layout):
"""display a table as text"""
table_content = self.get_table_content(layout)
# get columns width
cols_width = [0] * len(table_content[0])
for row in table_content:
for index, col in enumerate(row):
cols_width[index] = max(cols_width[index], len(col))
self.default_table(layout, table_content, cols_width)
self.writeln()
def default_table(self, layout, table_content, cols_width):
"""format a table"""
cols_width = [size + 1 for size in cols_width]
format_strings = " ".join(["%%-%ss"] * len(cols_width))
format_strings = format_strings % tuple(cols_width)
format_strings = format_strings.split(" ")
table_linesep = "\n+" + "+".join(["-" * w for w in cols_width]) + "+\n"
headsep = "\n+" + "+".join(["=" * w for w in cols_width]) + "+\n"
self.write(table_linesep)
for index, line in enumerate(table_content):
self.write("|")
for line_index, at_index in enumerate(line):
self.write(format_strings[line_index] % at_index)
self.write("|")
if index == 0 and layout.rheaders:
self.write(headsep)
else:
self.write(table_linesep)
def visit_verbatimtext(self, layout):
"""display a verbatim layout as text (so difficult ;)
"""
self.writeln("::\n")
for line in layout.data.splitlines():
self.writeln(" " + line)
self.writeln()
def visit_text(self, layout):
"""add some text"""
self.write("%s" % layout.data) | PypiClean |
/mis_modulos-0.1.tar.gz/mis_modulos-0.1/virtualenv/seed/wheels/util.py | from operator import attrgetter
from zipfile import ZipFile
class Wheel:
def __init__(self, path):
# https://www.python.org/dev/peps/pep-0427/#file-name-convention
# The wheel filename is {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
self.path = path
self._parts = path.stem.split("-")
@classmethod
def from_path(cls, path):
if path is not None and path.suffix == ".whl" and len(path.stem.split("-")) >= 5:
return cls(path)
return None
@property
def distribution(self):
return self._parts[0]
@property
def version(self):
return self._parts[1]
@property
def version_tuple(self):
return self.as_version_tuple(self.version)
@staticmethod
def as_version_tuple(version):
result = []
for part in version.split(".")[0:3]:
try:
result.append(int(part))
except ValueError:
break
if not result:
raise ValueError(version)
return tuple(result)
@property
def name(self):
return self.path.name
def support_py(self, py_version):
name = f"{'-'.join(self.path.stem.split('-')[0:2])}.dist-info/METADATA"
with ZipFile(str(self.path), "r") as zip_file:
metadata = zip_file.read(name).decode("utf-8")
marker = "Requires-Python:"
requires = next((i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker)), None)
if requires is None: # if it does not specify a python requires the assumption is compatible
return True
py_version_int = tuple(int(i) for i in py_version.split("."))
for require in (i.strip() for i in requires.split(",")):
# https://www.python.org/dev/peps/pep-0345/#version-specifiers
for operator, check in [
("!=", lambda v: py_version_int != v),
("==", lambda v: py_version_int == v),
("<=", lambda v: py_version_int <= v),
(">=", lambda v: py_version_int >= v),
("<", lambda v: py_version_int < v),
(">", lambda v: py_version_int > v),
]:
if require.startswith(operator):
ver_str = require[len(operator) :].strip()
version = tuple((int(i) if i != "*" else None) for i in ver_str.split("."))[0:2]
if not check(version):
return False
break
return True
def __repr__(self):
return f"{self.__class__.__name__}({self.path})"
def __str__(self):
return str(self.path)
def discover_wheels(from_folder, distribution, version, for_py_version):
wheels = []
for filename in from_folder.iterdir():
wheel = Wheel.from_path(filename)
if wheel and wheel.distribution == distribution:
if version is None or wheel.version == version:
if wheel.support_py(for_py_version):
wheels.append(wheel)
return sorted(wheels, key=attrgetter("version_tuple", "distribution"), reverse=True)
class Version:
#: the version bundled with virtualenv
bundle = "bundle"
embed = "embed"
#: custom version handlers
non_version = (bundle, embed)
@staticmethod
def of_version(value):
return None if value in Version.non_version else value
@staticmethod
def as_pip_req(distribution, version):
return f"{distribution}{Version.as_version_spec(version)}"
@staticmethod
def as_version_spec(version):
of_version = Version.of_version(version)
return "" if of_version is None else f"=={of_version}"
__all__ = [
"discover_wheels",
"Version",
"Wheel",
] | PypiClean |
/livecellx-0.0.2.tar.gz/livecellx-0.0.2/notebooks/traj_feature.ipynb | ```
from pathlib import Path
import cv2
import sys
import numpy as np
from skimage import feature, measure
import livecell_tracker
from livecell_tracker.core import datasets
from livecell_tracker.core.datasets import LiveCellImageDataset
import livecell_tracker.segment
from livecell_tracker import core
import livecell_tracker.core.utils
from tqdm import tqdm
import json
from livecell_tracker.core import (
SingleCellTrajectory,
SingleCellStatic,
SingleCellTrajectoryCollection,
)
import livecell_tracker.trajectory.contour.contour_class
import matplotlib.pyplot as plt
from livecell_tracker.trajectory import feature_extractors
traj_collection_json_path = "../datasets/test_data/traj_analysis/track_singleCellTrajectoryCollection.json"
traj_collection_json = json.load(open(traj_collection_json_path, "r"))
trajectory_collection = SingleCellTrajectoryCollection().load_from_json_dict(traj_collection_json)
traj = trajectory_collection.get_trajectory(1)
sc_obj = traj.get_single_cell(0)
trajectory_collection.get_trajectory(1).raw_img_dataset, sc_obj.img_dataset
contour_mask = sc_obj.get_contour_mask()
plt.imshow(contour_mask)
plt.axis("off")
plt.imshow(sc_obj.get_contour_img())
feature_extractors.compute_haralick_features(sc_obj)
sc_obj.add_feature("haralick", feature_extractors.compute_haralick_features(sc_obj))
sc_obj.get_contour_mask().shape, sc_obj.get_img().shape
skimage_features = feature_extractors.compute_skimage_regionprops(sc_obj)
sc_obj.add_feature("skimage", skimage_features)
import pandas as pd
from traitlets import Callable
traj.compute_features("haralick", feature_extractors.compute_haralick_features)
traj.compute_features("skimage", feature_extractors.compute_skimage_regionprops)
df = traj.get_sc_feature_table()
```
| PypiClean |
/CCC-2.0.1.tar.gz/CCC-2.0.1/ccc/click_to_call/views.py | import json
from collections import OrderedDict, deque
from math import ceil
import xlwt
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.http.response import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import TemplateView, View
from phonenumber_field.phonenumber import to_python
from rest_framework import generics
from twilio.jwt.client import ClientCapabilityToken
from twilio.twiml.voice_response import VoiceResponse
from ccc.campaigns.models import Campaign
from ccc.click_to_call.cloud_tasks import Dialer, DialListRow, save_dialer_list
from ccc.click_to_call.models import AssociateMasterList, AutoDialerList
from ccc.click_to_call.serializers import (AutoDialerListSerializer,
AutoDialerMasterListSerializer)
from ccc.contacts.models import ContactGroup
from ccc.packages.models import TwilioNumber
@login_required
def get_token(request):
"""Returns a Twilio Client token"""
# Create a TwilioCapability token with our Twilio API credentials
ACCOUNT_SID = settings.TWILIO_SID
AUTH_TOKEN = settings.TWILIO_TOKEN
capability = ClientCapabilityToken(
ACCOUNT_SID,
AUTH_TOKEN)
TWILIO_CLIENT_OUTGOING = settings.TWILIO_CLIENT_OUTGOING
# Allow our users to make outgoing calls with Twilio Client
capability.allow_client_outgoing(TWILIO_CLIENT_OUTGOING)
# If the user is on the support dashboard page, we allow them to accept
# incoming calls to "support_agent"
# (in a real app we would also require the user to be authenticated)
if request.GET.get('forPage') == "asdsad":
capability.allow_client_incoming('support_agent')
else:
# Otherwise we give them a name of "customer"
capability.allow_client_incoming('customer')
# Generate the capability token
token = capability.to_jwt()
return HttpResponse(json.dumps({'token': token.decode('utf-8')}), content_type="application/json")
# @login_required
@csrf_exempt
def call(request):
"""Returns TwiML instructions to Twilio's POST requests"""
# If the browser sent a phoneNumber param, we know this request
# is a support agent trying to call a customer's phone
dest_number = ''
if 'PhoneNumber' in request.GET:
dest_number = request.GET["PhoneNumber"]
else:
# This will raise a erro on twilio itself
pass
resp = VoiceResponse()
from_no = request.GET["from_no"] if request.GET.get(
"from_no") else "+441242305348"
phones = TwilioNumber.objects.filter(twilio_number=from_no)
if phones.exists():
user = phones[0].user
if user.balance.get('talktime', 0) <= 0:
return HttpResponse("Error")
max_duration = user.balance.get('talktime', 0) * 60
callback_url = reverse("dial_callback",
kwargs={"twilio_number": from_no})
with resp.dial(dest_number, caller_id=from_no, action=callback_url,
method="GET", time_limit=max_duration) as r:
pass
return HttpResponse(str(resp))
class ContactDialView(TemplateView):
template_name = "ccc/click_to_call/call_contacts.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
campaigns = Campaign.objects.filter(user=request.user, active=True)
groups = ContactGroup.objects.filter(user=request.user)
phones = TwilioNumber.objects.filter(user=request.user).exclude(
twilio_number__isnull=True,
twilio_number='')
context["phones"] = phones
context["campaigns"] = campaigns
context["groups"] = groups
return self.render_to_response(context)
class DialPageView(TemplateView):
template_name = "ccc/click_to_call/click_to_call.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context["master_list_id"] = self.kwargs.get("master_list_id")
phones = TwilioNumber.objects.filter(user=request.user)
context["phones"] = phones
return self.render_to_response(context)
class OptionPageView(TemplateView):
template_name = "ccc/click_to_call/select_options.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class UploadedListView(TemplateView):
template_name = "ccc/click_to_call/master_list.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class UploadListView(TemplateView):
"""
"""
template_name = "ccc/click_to_call/upload_excel.html"
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class UploadListOutputView(TemplateView):
template_name = "ccc/click_to_call/upload_excel_output.html"
def get_context_data(self, **kwargs):
master_list = get_object_or_404(AssociateMasterList, id=kwargs['master_list_id'],
user=self.request.user)
kwargs['master_list'] = master_list
valid_numbers = AutoDialerList.objects.filter(is_valid=True, associated_to=master_list)
kwargs['total_numbers'] = valid_numbers.count()
kwargs['total_landline'] = valid_numbers.filter(phone_type='landline').count()
kwargs['total_cellphone'] = valid_numbers.filter(phone_type='cell-phone').count()
kwargs['total_voip'] = valid_numbers.filter(phone_type='voip').count()
if master_list.has_errors:
unprocessed_numbers = AutoDialerList.objects.filter(is_valid=False, associated_to=master_list)
kwargs['unprocessed_numbers'] = unprocessed_numbers
kwargs['total_unprocessed_numbers'] = unprocessed_numbers.count()
return kwargs
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def validate_international_phonenumber(value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
return False, u'The phone number entered is not valid.'
return True, ''
class SaveUploadedListView(View):
"""
"""
template_name = "ccc/click_to_call/upload_excel.html"
def initialise_status_style(self, ):
"""Initialize excel styles
"""
self.red_status_style = xlwt.XFStyle()
self.red_status_style.font.bold = True
self.red_pattern = xlwt.Pattern()
self.red_status_style.font.colour_index = xlwt.Style.colour_map['red']
self.red_status_style.pattern = self.red_pattern
self.green_status_style = xlwt.XFStyle()
self.green_status_style.font.bold = True
self.green_pattern = xlwt.Pattern()
self.green_status_style.font.colour_index = xlwt.Style.colour_map[
'green']
self.green_status_style.pattern = self.green_pattern
self.orange_status_style = xlwt.XFStyle()
self.orange_status_style.font.bold = True
self.orange_pattern = xlwt.Pattern()
self.orange_status_style.font.colour_index = xlwt.Style.colour_map[
'orange']
self.orange_status_style.pattern = self.orange_pattern
self.grey_style = xlwt.XFStyle()
self.grey_style.font.bold = True
self.grey_style.font.colour_index = xlwt.Style.colour_map[
'white']
pattern = xlwt.Pattern()
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
pattern.pattern_background_colour = xlwt.Style.colour_map['gray50']
self.grey_style.pattern = pattern
self.grey_style.alignment.horz = xlwt.Alignment.HORZ_CENTER_ACROSS_SEL
def write_header(self, sheet_obj, header_data, row_position):
"""Method to set/update column names and width
"""
column_position = 0
row_position = 0
for col_value, col_width in header_data.items():
sheet_obj.col(column_position).width = col_width
sheet_obj.write(row_position, column_position,
col_value, self.grey_style)
column_position += 1
def write_data_to_excel(self, sheet, row_data, row_no):
"""Method to write data to spreadsheet
"""
style_map = {"fail": self.red_status_style,
}
style = style_map["fail"]
sheet.write(row_no, 0, row_data[0])
sheet.write(row_no, 1, row_data[1])
sheet.write(row_no, 2, row_data[2], style=style)
def generate_excel(self, final_data):
"""Method to generate in excel format
"""
spreadsheet = xlwt.Workbook(encoding="utf-8")
sheet_obj = spreadsheet.add_sheet(
"Sheet1", cell_overwrite_ok=True)
header_data = OrderedDict([
('Phone', 256 * 25),
('Name(Optional)', 256 * 25),
('Error', 256 * 100),
])
self.initialise_status_style()
self.write_header(sheet_obj=sheet_obj,
header_data=header_data,
row_position=0)
row_no = 1
for data in final_data:
self.write_data_to_excel(
sheet=sheet_obj, row_data=data, row_no=row_no)
row_no += 1
fname = "contacts"
response = HttpResponse(content_type="application/ms-excel")
response[
'Content-Disposition'] = 'attachment; filename="{}.xlsx"'.format(fname.strip())
spreadsheet.save(response)
return response
def save_auto_dialer(self, name, final_data):
"""Save Autodialer Numbers
"""
obj = AssociateMasterList.objects.create(
name=name, user=self.request.user)
for data in final_data:
AutoDialerList.objects.create(number=data[0],
name=data[1],
associated_to=obj
)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
import xlrd
file_obj = request.FILES.get("upload_file")
s1 = xlrd.open_workbook(file_contents=file_obj.read())
name = request.POST.get("list_name")
sheet = s1.sheet_by_index(0)
list_dialer = list()
for rows in range(sheet.nrows):
# if it's the first row... continue (sample row).
if rows == 0:
continue
rows = DialListRow(sheet.row_values(rows))
dialer = Dialer(rows.first_name, rows.last_name, rows.phone_number, rows.city, rows.state)
list_dialer.append(dialer)
m_list = AssociateMasterList.objects.create(name=name, user=request.user)
# Async task
task = save_dialer_list(m_list_id=m_list.id, dialers=list_dialer).execute()
return HttpResponseRedirect(reverse('process_excel', args=[m_list.id, task.task_id]))
class ProcessUploadedFileView(TemplateView):
"""Check the status of task that process file uploaded"""
template_name = "ccc/click_to_call/process_excel.html"
def get_context_data(self, **kwargs):
kwargs['ping_url'] = reverse('process_excel_status', args=[kwargs['task_id'], ])
return kwargs
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
#Asyn tasks #todo check status via gcloud #fixme implement status in gcloud library.
# task_status = save_dialer_list(m_list_id=)
#task_status = save_dialer_list.AsyncResult(context['task_id']).status
# if task_status == 'SUCCESS':
# master_list = AssociateMasterList.objects.get(id=kwargs['master_list_id'])
# return HttpResponseRedirect(reverse('output_upload_excel', kwargs={'master_list_id': master_list.pk}))
return self.render_to_response(context)
@login_required
def uploaded_file_status(request, task_id):
#Fixme #todo , check if gcloud returns status
return HttpResponse(json.dumps({
'status': save_dialer_list.AsyncResult(task_id).status
}), content_type="application/json")
class AutoDialNumberListView(generics.ListAPIView):
"""
Keyword search for corporate.
"""
queryset = AssociateMasterList.objects.all()
serializer_class = AutoDialerMasterListSerializer
paginate_by = 10
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AutoDialNumberListView, self).dispatch(
request, *args, **kwargs)
def get_queryset(self, *args, **kwargs):
return AssociateMasterList.objects.filter(
user=self.request.user)
class AutoDialNumberView(generics.ListAPIView):
"""
Keyword search for corporate.
"""
queryset = AutoDialerList.objects.all()
serializer_class = AutoDialerListSerializer
paginate_by = 10
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AutoDialNumberView, self).dispatch(
request, *args, **kwargs)
def get_queryset(self, *args, **kwargs):
self.dailer_list_id = self.kwargs["dailer_list_id"]
return AutoDialerList.objects.filter(associated_to=self.dailer_list_id,
is_valid=True,
associated_to__user=self.request.user)
class DialCallBackView(View):
"""Class to handle auto dial callback from twilio
"""
def get(self, *args, **kwargs):
"""
"""
call_duration = self.request.GET.get("DialCallDuration", 0)
call_duration_in_mins = ceil(float(call_duration) / 60.0)
twilio_number = kwargs["twilio_number"]
queryset = TwilioNumber.objects.filter(twilio_number=twilio_number)
if queryset.exists():
twilio_obj = queryset[0]
user = twilio_obj.user
from ccc.packages.models import Credit
credits_qs = Credit.objects.filter(package__user=user)
if credits_qs.exists():
credit_obj = credits_qs.latest("id")
credit_obj.talktime = credit_obj.talktime - call_duration_in_mins
credit_obj.save()
return HttpResponse("Done") | PypiClean |
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_13/models/array_performance.py | import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_13 import models
class ArrayPerformance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'service_usec_per_read_op_cache_reduction': 'float',
'id': 'str',
'name': 'str',
'queue_depth': 'int',
'local_queue_usec_per_op': 'int',
'usec_per_other_op': 'int',
'others_per_sec': 'int'
}
attribute_map = {
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'service_usec_per_read_op_cache_reduction': 'service_usec_per_read_op_cache_reduction',
'id': 'id',
'name': 'name',
'queue_depth': 'queue_depth',
'local_queue_usec_per_op': 'local_queue_usec_per_op',
'usec_per_other_op': 'usec_per_other_op',
'others_per_sec': 'others_per_sec'
}
required_args = {
}
def __init__(
self,
bytes_per_mirrored_write=None, # type: int
bytes_per_op=None, # type: int
bytes_per_read=None, # type: int
bytes_per_write=None, # type: int
mirrored_write_bytes_per_sec=None, # type: int
mirrored_writes_per_sec=None, # type: int
qos_rate_limit_usec_per_mirrored_write_op=None, # type: int
qos_rate_limit_usec_per_read_op=None, # type: int
qos_rate_limit_usec_per_write_op=None, # type: int
queue_usec_per_mirrored_write_op=None, # type: int
queue_usec_per_read_op=None, # type: int
queue_usec_per_write_op=None, # type: int
read_bytes_per_sec=None, # type: int
reads_per_sec=None, # type: int
san_usec_per_mirrored_write_op=None, # type: int
san_usec_per_read_op=None, # type: int
san_usec_per_write_op=None, # type: int
service_usec_per_mirrored_write_op=None, # type: int
service_usec_per_read_op=None, # type: int
service_usec_per_write_op=None, # type: int
time=None, # type: int
usec_per_mirrored_write_op=None, # type: int
usec_per_read_op=None, # type: int
usec_per_write_op=None, # type: int
write_bytes_per_sec=None, # type: int
writes_per_sec=None, # type: int
service_usec_per_read_op_cache_reduction=None, # type: float
id=None, # type: str
name=None, # type: str
queue_depth=None, # type: int
local_queue_usec_per_op=None, # type: int
usec_per_other_op=None, # type: int
others_per_sec=None, # type: int
):
"""
Keyword args:
bytes_per_mirrored_write (int): The average I/O size per mirrored write. Measured in bytes.
bytes_per_op (int): The average I/O size for both read and write (all) operations.
bytes_per_read (int): The average I/O size per read. Measured in bytes.
bytes_per_write (int): The average I/O size per write. Measured in bytes.
mirrored_write_bytes_per_sec (int): The number of mirrored bytes written per second.
mirrored_writes_per_sec (int): The number of mirrored writes per second.
qos_rate_limit_usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds.
qos_rate_limit_usec_per_read_op (int): The average time spent waiting due to QoS rate limiting for a read request. Measured in microseconds.
qos_rate_limit_usec_per_write_op (int): The average time that a write I/O request spends waiting as a result of the volume reaching its QoS bandwidth limit. Measured in microseconds.
queue_usec_per_mirrored_write_op (int): The average time that a mirrored write I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_read_op (int): The average time that a read I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_write_op (int): The average time that a write I/O request spends in the array waiting to be served. Measured in microseconds.
read_bytes_per_sec (int): The number of bytes read per second.
reads_per_sec (int): The number of read requests processed per second.
san_usec_per_mirrored_write_op (int): The average time required to transfer data from the initiator to the array for a mirrored write request. Measured in microseconds.
san_usec_per_read_op (int): The average time required to transfer data from the array to the initiator for a read request. Measured in microseconds.
san_usec_per_write_op (int): The average time required to transfer data from the initiator to the array for a write request. Measured in microseconds.
service_usec_per_mirrored_write_op (int): The average time required for the array to service a mirrored write request. Measured in microseconds.
service_usec_per_read_op (int): The average time required for the array to service a read request. Measured in microseconds.
service_usec_per_write_op (int): The average time required for the array to service a write request. Measured in microseconds.
time (int): The time when the sample performance data was taken. Measured in milliseconds since the UNIX epoch.
usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_read_op (int): The average time it takes the array to process an I/O read request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_write_op (int): The average time it takes the array to process an I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
write_bytes_per_sec (int): The number of bytes written per second.
writes_per_sec (int): The number of write requests processed per second.
service_usec_per_read_op_cache_reduction (float): The percentage reduction in `service_usec_per_read_op` due to data cache hits. For example, a value of 0.25 indicates that the value of `service_usec_per_read_op` is 25% lower than it would have been without any data cache hits.
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A locally unique, system-generated name. The name cannot be modified.
queue_depth (int): Deprecated. The number displayed here may not be accurate and in later versions of the product this field will always display `null`. Instead, use `queue_usec_per_mirrored_write_op`, `queue_usec_per_read_op`, and `queue_usec_per_write_op` fields to measure IO queuing.
local_queue_usec_per_op (int): Average local queue time for both read and write operations, measured in microseconds.
usec_per_other_op (int): The average time it takes the array to process an I/O other request, measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
others_per_sec (int): The number of other requests processed per second.
"""
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if service_usec_per_read_op_cache_reduction is not None:
self.service_usec_per_read_op_cache_reduction = service_usec_per_read_op_cache_reduction
if id is not None:
self.id = id
if name is not None:
self.name = name
if queue_depth is not None:
self.queue_depth = queue_depth
if local_queue_usec_per_op is not None:
self.local_queue_usec_per_op = local_queue_usec_per_op
if usec_per_other_op is not None:
self.usec_per_other_op = usec_per_other_op
if others_per_sec is not None:
self.others_per_sec = others_per_sec
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayPerformance`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op_cache_reduction" and value is not None:
if value > 1.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, value must be less than or equal to `1.0`")
if value < 0.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, must be a value greater than or equal to `0.0`")
if key == "queue_depth" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_depth`, must be a value greater than or equal to `0`")
if key == "local_queue_usec_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `local_queue_usec_per_op`, must be a value greater than or equal to `0`")
if key == "usec_per_other_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_other_op`, must be a value greater than or equal to `0`")
if key == "others_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `others_per_sec`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayPerformance`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayPerformance`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayPerformance`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayPerformance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayPerformance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/ladybug-comfort-0.16.46.tar.gz/ladybug-comfort-0.16.46/ladybug_comfort/humidex.py | """Utility functions for calculating the Humidex."""
from __future__ import division
import math
def humidex(ta, tdp):
"""Calculate Humidex from air temperature and the Dew Point.
The Humidex is a Canadian innovation first used in 1965.
It combines the temperature and humidity into one number to reflect the
perceived temperature.
Because it takes into account the two most important
factors that affect summer comfort, it can be a better
measure of how stifling the air feels than either temperature or
humidity alone. [1]
Air temperatures below 20c will give a generally meaningless result as the Humidex
only describes perceived heat.
The Humidex is a "nominally dimensionless quantity" but is generally
recognized by the public as equivalent to the degree Celsius [2]
Note:
[1] Environment Canada (October 2017). "Warm Season Weather Hazards".
https://www.canada.ca/en/environment-climate-change/services/seasonal\
-weather-hazards/warm-season-weather-hazards.html#toc7
[2] https://en.wikipedia.org/wiki/Humidex
Args:
ta: Air temperature [C]
tdp: The Dew Point [C]
Returns:
float -- Humidex
"""
dew_point_k = tdp + 273.15 # celsius to kelvin
e = 6.11 * math.exp(5417.7530 * ((1 / 273.15) - (1 / dew_point_k)))
h = 0.5555 * (e - 10.0)
humidex_value = float(ta + h)
return humidex_value
def humidex_degree_of_comfort(humidex):
"""Get the degree of comfort associated with a given Humidex value.
Degrees of comfort are provided by the Government of Canada and are indicated
here with the following integer values:
* 0 = No discomfort (Humidex of 19 and below)
* 1 = Little discomfort (Humidex between 20 - 29)
* 2 = Some discomfort (Humidex between 30 - 39)
* 3 = Great discomfort; avoid exertion (Humidex between 40 - 45)
* 4 = Dangerous; heat stroke possible (Humidex of 45 and above)
See: https://www.canada.ca/en/environment-climate-change/services/seasonal-\
weather-hazards/warm-season-weather-hazards.html#toc7
Args:
humidex: Humidex
Returns:
int -- Degree of Comfort
"""
if humidex < 20.0:
return 0
elif humidex < 30.0:
return 1
elif humidex < 40.0:
return 2
elif humidex < 46.0:
return 3
return 4 | PypiClean |
/python-cpl-0.7.4.tar.gz/python-cpl-0.7.4/cpl/frames.py | from __future__ import absolute_import
import os
from astropy.io import fits
from . import md5sum
class FrameConfig(object):
'''Frame configuration.
Each :class:`FrameConfig` object stores information about one the data
type a recipe can process. They are used for defining the calibration
files. However, since this information is not generally provided by CPL
recipes, it contains only dummy information, except for the MUSE recipes.
The objects stores a frame tag, a unique identifier for a certain kind of
frame, the minimum and maximum number of frames needed.
Attributes:
.. attribute:: tag
Category tag name. The tag name is used to distinguish between
different types of files. An examples of tag names is 'MASTER_BIAS'
which specifies the master bias calibration file(s).
.. attribute:: min
Minimal number of frames, or :obj:`None` if not specified. A frame is
required if the :attr:`min` is set to a value greater than 0.
.. attribute:: max
Maximal number of frames, or :obj:`None` if not specified
.. attribute:: frames
List of frames (file names or :class:`astropy.io.fits.HDUList` objects)
that are assigned to this frame type.
'''
def __init__(self, tag, min_frames = 0, max_frames = 0, frames = None):
self.tag = tag
self.min = min_frames if min_frames > 0 else None
self.max = max_frames if max_frames > 0 else None
self.frames = frames
self.__doc__ = self._doc()
def extend_range(self, min_frames, max_frames):
if self.min is not None:
self.min = min(self.min, min_frames) if min_frames is not None \
else None
if self.max is not None:
self.max = max(self.max, max_frames) if max_frames is not None \
else None
def set_range(self, min_frames, max_frames):
self.min = min_frames
self.max = max_frames
def __str__(self):
return str(self.frames)
def __repr__(self):
return 'FrameDef(%s, frames=%s)' % (repr(self.tag), repr(self.frames))
def _doc(self):
if self.max is None or self.min is None:
r = ' one frame or list of frames'
elif self.max == 1:
r = ' one frame'
elif self.min > 1 and self.max > self.min:
r = ' list of %i-%i frames' % (self.min, self.max)
elif self.max > 1:
r = ' one frame or list of max. %i frames' % self.max
elif self.min > 1:
r = ' list of min. %i frames' % self.max
else:
r = ' one frame or list of frames'
if not self.min:
r += ' (optional)'
return r
def __getitem__(self, i):
return (self.tag, self.frames)[i]
class FrameList(object):
def __init__(self, recipe, other = None):
self._recipe = recipe
self._values = dict()
if isinstance(other, self.__class__):
self._set_items((o.tag, o.frames) for o in other)
elif isinstance(other, dict):
self._set_items(other.items())
elif other:
self._set_items(other)
def _set_items(self, l):
for o in l:
self[o[0]] = o[1]
@property
def _cpl_dict(self):
cpl_frameconfigs = self._recipe._recipe.frameConfig()
if cpl_frameconfigs is None:
return None
s = dict()
for configs in cpl_frameconfigs:
c_cfg = configs[1]
for f in c_cfg:
if f[0] in s:
s[f[0]].extend_range(f[1], f[2])
elif f[0] in self._values:
s[f[0]] = self._values[f[0]]
s[f[0]].set_range(f[1], f[2])
else:
s[f[0]] = FrameConfig(f[0], f[1], f[2])
self._values[f[0]] = s[f[0]]
return s
@property
def _dict(self):
return self._cpl_dict or self._values
def __iter__(self):
return iter(self._dict.values())
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
d = self._cpl_dict
if d is not None:
d[key].frames = value
else:
self._values.setdefault(key, FrameConfig(key)).frames = value
def __delitem__(self, key):
self._dict[key].frames = None
def __contains__(self, key):
return key in self._dict
def __len__(self):
return len(self._dict)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key.startswith('_'):
super(FrameList, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
del self[key]
def __dir__(self):
return self._dict.keys()
def __repr__(self):
return repr(dict(iter(self)))
def __str__(self):
return str(dict(iter(self)))
def __eq__(self, other):
return dict(iter(self)) == other
@property
def __doc__(self):
r = 'Frames for recipe %s.\n\nAttributes:\n' % (
self._recipe.name)
for s in self:
r += '%s: %s\n' % (self._key(s), s.__doc__)
return r
def _aslist(self, frames):
flist = FrameList(self._recipe, self)
if frames is not None:
flist._set_items(frames.items())
return [(f.tag, f.frames) for f in flist]
def mkabspath(frames, tmpdir):
'''Convert all filenames in the frames list into absolute paths.
:class:`astropy.io.fits.HDUList`s will be converted to temporary files
located in the temporary directory tmpdir.
The replacement is done in-place. The function will return the list of
temporary files.
param frames: :class:`list` of (tag, frame) tuples with frame being either
a file name or a HDU list.
param tmpdir: directory where the temporary files are being created.
'''
tmpfiles = list()
for i, frame in enumerate(frames):
if isinstance(frame[1], fits.HDUList):
md5 = md5sum.update_md5(frame[1])
filename = os.path.abspath(os.path.join(tmpdir, '%s_%s.fits'
% (frame[0], md5[:8])))
try:
os.remove(filename)
except:
pass
frames[i] = ( frame[0], filename )
tmpfiles.append(filename)
frame[1].writeto(filename)
else:
frames[i] = ( frame[0], os.path.abspath(frame[1]) )
return tmpfiles
def expandframelist(frames):
'''Convert a dictionary with frames into a frame list where each frame
gets its own entry in the form (tag, frame)
'''
framelist = list()
for tag, f in frames:
if isinstance(f, list) and not isinstance(f, fits.HDUList):
framelist += [ (tag, frame) for frame in f ]
elif f is not None:
framelist.append((tag, f))
return framelist | PypiClean |
/relative-addons-system-2.5.4.tar.gz/relative-addons-system-2.5.4/RelativeAddonsSystem/utils/cache.py | import os
from pathlib import Path
from .storage import Storage
class RelativeAddonsSystemCache(Storage):
_instance = None
DATA_KEY = "addons_data"
STATES_KEY = "addons_states"
def __init__(self, path: Path):
super().__init__(path)
self.initialize({self.DATA_KEY: {}, self.STATES_KEY: {}})
def __new__(cls, *args, **kwargs):
if cls._instance is not None:
return cls._instance
inst = object.__new__(cls)
inst.__init__(*args, **kwargs)
cls._instance = inst
return inst
@classmethod
def get_instance(cls) -> "RelativeAddonsSystemCache":
return cls._instance
def get_addon_data(self, addon: "Addon"):
return self.get(self.DATA_KEY, {}).get(addon.meta.name, {})
def update_addon_data(self, data: dict, addon: "Addon"):
if self.DATA_KEY not in self:
self.set(self.DATA_KEY, {})
addons_data = self.get(self.DATA_KEY, {})
addons_data.update({addon.meta.name: data})
self.save()
def addon_updated(self, addon: "Addon", update_state: bool = False):
addons_states: dict[str, dict] = self.get(self.STATES_KEY, {})
system_addon_modified_time = os.path.getmtime(addon.path)
addon_state = addons_states.get(addon.meta.name, None)
saved_addon_modified_time = addon_state["last_modified"] if addon_state else None
if update_state:
self.update_addon_state(addon)
return saved_addon_modified_time != system_addon_modified_time
def update_addon_state(self, addon: "Addon"):
addons_states: dict[str, dict] = self.get(self.STATES_KEY, {})
system_addon_modified_time = os.path.getmtime(addon.path)
addon_state = addons_states.get(addon.meta.name, None)
if not addon_state:
addons_states[addon.meta.name] = dict(
last_modified=system_addon_modified_time
)
else:
addon_state["last_modified"] = system_addon_modified_time
self.save()
def remove_addons(self, *addons: "Addon"):
cached_states = self.get(self.STATES_KEY, {})
cached_data = self.get(self.DATA_KEY, {})
for addon in addons:
if addon.meta.name in cached_data:
del cached_data[addon.meta.name]
if addon.meta.name in cached_states:
del cached_states[addon.meta.name]
self.save() | PypiClean |
/custom-awscli-1.27.51.tar.gz/custom-awscli-1.27.51/awscli/examples/securityhub/list-members.rst | **To retrieve a list of member accounts**
The following ``list-members`` example returns the list of member accounts for the requesting administrator account. ::
aws securityhub list-members
Output::
{
"Members": [
{
"AccountId": "123456789111",
"AdministratorId": "123456789012",
"InvitedAt": 2020-06-01T20:15:15.289000+00:00,
"MasterId": "123456789012",
"MemberStatus": "ASSOCIATED",
"UpdatedAt": 2020-06-01T20:15:15.289000+00:00
},
{
"AccountId": "123456789222",
"AdministratorId": "123456789012",
"InvitedAt": 2020-06-01T20:15:15.289000+00:00,
"MasterId": "123456789012",
"MemberStatus": "ASSOCIATED",
"UpdatedAt": 2020-06-01T20:15:15.289000+00:00
}
],
}
For more information, see `Managing administrator and member accounts <https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-accounts.html>`__ in the *AWS Security Hub User Guide*.
| PypiClean |
/py_hcl-0.1.2.tar.gz/py_hcl-0.1.2/py_hcl/firrtl_ir/expr/prim_ops.py | from . import Expression
from ..utils import serialize_num
class Add(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"add(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Sub(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"sub(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Mul(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"mul(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Div(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"div(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Rem(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"rem(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Lt(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"lt(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Leq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"leq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Gt(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"gt(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Geq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"geq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Eq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"eq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Neq(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"neq(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class And(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"and(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Or(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"or(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Xor(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"xor(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Not(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"not(")
self.arg.serialize(output)
output.write(b")")
class Neg(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"neg(")
self.arg.serialize(output)
output.write(b")")
class Cat(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"cat(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Bits(Expression):
def __init__(self, ir_arg, const_args, tpe):
self.ir_arg = ir_arg
self.const_args = const_args
self.tpe = tpe
def serialize(self, output):
output.write(b"bits(")
self.ir_arg.serialize(output)
output.write(b", ")
output.write(serialize_num(self.const_args[0]))
output.write(b", ")
output.write(serialize_num(self.const_args[1]))
output.write(b")")
class AsUInt(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"asUInt(")
self.arg.serialize(output)
output.write(b")")
class AsSInt(Expression):
def __init__(self, arg, tpe):
self.arg = arg
self.tpe = tpe
def serialize(self, output):
output.write(b"asSInt(")
self.arg.serialize(output)
output.write(b")")
class Shl(Expression):
def __init__(self, ir_arg, const_arg, tpe):
self.ir_arg = ir_arg
self.const_arg = const_arg
self.tpe = tpe
def serialize(self, output):
output.write(b"shl(")
self.ir_arg.serialize(output)
output.write(b", ")
output.write(serialize_num(self.const_arg))
output.write(b")")
class Shr(Expression):
def __init__(self, ir_arg, const_arg, tpe):
self.ir_arg = ir_arg
self.const_arg = const_arg
self.tpe = tpe
def serialize(self, output):
output.write(b"shr(")
self.ir_arg.serialize(output)
output.write(b", ")
output.write(serialize_num(self.const_arg))
output.write(b")")
class Dshl(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"dshl(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")")
class Dshr(Expression):
def __init__(self, args, tpe):
self.args = args
self.tpe = tpe
def serialize(self, output):
output.write(b"dshr(")
self.args[0].serialize(output)
output.write(b", ")
self.args[1].serialize(output)
output.write(b")") | PypiClean |
/material_zui-0.1.6-py3-none-any.whl/material_zui/image/colorization.py | from cv2 import Mat
import numpy as np
import cv2
import os
"""
Credits:
1. https://github.com/opencv/opencv/blob/master/samples/dnn/colorization.py
2. http://richzhang.github.io/colorization/
3. https://github.com/richzhang/colorization/
Download the model files:
1. colorization_deploy_v2.prototxt: https://github.com/richzhang/colorization/tree/caffe/colorization/models
2. pts_in_hull.npy: https://github.com/richzhang/colorization/blob/caffe/colorization/resources/pts_in_hull.npy
3. colorization_release_v2.caffemodel: https://www.dropbox.com/s/dx0qvhhp5hbcx7z/colorization_release_v2.caffemodel?dl=1
"""
# Paths to load the models
DIR = f"{os.path.dirname(os.path.realpath(__file__))}/model"
PROTOTXT = os.path.join(DIR, "colorization_deploy_v2.prototxt")
POINTS = os.path.join(DIR, "pts_in_hull.npy")
MODEL = os.path.join(DIR, "colorization_release_v2.caffemodel")
def colorization(image: Mat) -> Mat:
# Load the Model
net = cv2.dnn.readNetFromCaffe(PROTOTXT, MODEL)
pts = np.load(POINTS)
# Load centers for ab channel quantization used for rebalancing.
class8 = net.getLayerId("class8_ab")
conv8 = net.getLayerId("conv8_313_rh")
pts = pts.transpose().reshape(2, 313, 1, 1)
net.getLayer(class8).blobs = [pts.astype("float32")]
net.getLayer(conv8).blobs = [np.full([1, 313], 2.606, dtype="float32")]
# Load the input image
scaled = image.astype("float32") / 255.0
lab = cv2.cvtColor(scaled, cv2.COLOR_BGR2LAB)
resized = cv2.resize(lab, (224, 224))
L = cv2.split(resized)[0]
L -= 50
# Colorizing the image
net.setInput(cv2.dnn.blobFromImage(L))
ab = net.forward()[0, :, :, :].transpose((1, 2, 0))
ab = cv2.resize(ab, (image.shape[1], image.shape[0]))
L = cv2.split(lab)[0]
colorized = np.concatenate((L[:, :, np.newaxis], ab), axis=2)
colorized = cv2.cvtColor(colorized, cv2.COLOR_LAB2BGR)
colorized = np.clip(colorized, 0, 1)
colorized = (255 * colorized).astype("uint8") # type: ignore
return colorized
def save_colorization(image_path: str, output_path: str):
image = cv2.imread(image_path)
colorized = colorization(image)
cv2.imwrite(output_path, colorized) | PypiClean |
/sincfold-0.12.tar.gz/sincfold-0.12/README.md | # **sincFold**
This is the repository for sincFold, a new RNA folding prediction tool based on deep learning.
<p align="center">
<img src="abstract.png" alt="abstract">
</p>
SincFold is a fast and accurate RNA secondary structure prediction method. It is an end-to-end approach that predicts the contact matrix using only the sequence of nucleotides as input. The model is based on a residual neural network that can learn short and long context interactions. Extensive experiments on several benchmark datasets were made, comparing sincFold against classical methods and new models based on deep learning. We demonstrate that sincFold achieves the best performance in comparison with state-of-the-art methods.
A summary of results can be seen in [this notebook](results/summary.ipynb).
## Folding RNA sequences
We have a [webserver](https://sinc.unl.edu.ar/web-demo/sincfold/) running with the latest version. This server admits one sequence at a time. We provide a model pre-trained with validated RNA datasets (ArchiveII, RNAstralign, URS-PDB). Please follow the next instructions if you want to run the model locally.
## Install
This is a Python package. It is recommended to use virtualenv or conda to create a new environment. To install the package, run:
pip install sincfold
Alternativelly, you can clone the repository with:
git clone https://github.com/sinc-lab/sincFold
cd sincFold/
and install with:
pip install .
on Windows, you will probably need to add the python scripts folder to the PATH.
## Predicting sequences
To predict the secondary structure of a sequence using the pretrained weights:
sincFold pred AACCGGGUCAGGUCCGGAAGGAAGCAGCCCUAA
This will display the predicted dot-bracket in the console.
SincFold also supports files with multiple sequences in .csv and .fasta format as inputs, and providing .csv or .ct format outputs.
!echo -e ">seq1\\nAACCGGGUCAGGUCCGGAAGGAAGCAGCCCUAA" > sample.fasta
!echo -e ">seq2\\nGUAGUCGUGGCCGAGUGGUUAAGGCGAUGGACUAGAAAUCCAUUGGGGUCUCCCCGCGCAGGUUCGAAUCCUGCCGACUACGCCA" >> sample.fasta
sincFold pred sample.fasta -o pred_ct_files/
We also provide [this notebook](https://colab.research.google.com/github/sinc-lab/sincFold/blob/main/demo.ipynb) to run the sincFold functions.
## Training and testing models
A new model can be trained using the `train` option. For example, download this training set:
wget "https://raw.githubusercontent.com/sinc-lab/sincFold/main/sample/train.csv"
and then run sincFold with:
sincFold -d cuda train train.csv -n 10 -o output_path
The option "-d cuda" requires a GPU (otherwise remove it), and -n limits the maximum number of epochs to get a quick result. The output log and trained model will be saved in the directory `output_path`.
Then, a different test set can be evaluated with the `test` option. You can download this sample file form:
wget "https://raw.githubusercontent.com/sinc-lab/sincFold/main/sample/test.csv"
and test the model with:
sincFold test test.csv -w output_path/weights.pmt
The model path (-w) is optional, if omitted the pretrained weights are used.
## Reproducible research
You can run the complete train and test scheme using the following code (in this case set up benchmarkII and fold 0 data partition).
```python
import os
import pandas as pd
out_path = f"working_path/"
os.mkdir(out_path)
# read dataset and predefined partitions (the files are available in this repository)
dataset = pd.read_csv("data/benchmarkII.csv", index_col="id")
partitions = pd.read_csv("data/benchmarkII_splits.csv")
dataset.loc[partitions[(partitions.fold_number==0) & (partitions.partition=="train")].id].to_csv(out_path + "train.csv")
dataset.loc[partitions[(partitions.fold_number==0) & (partitions.partition=="valid")].id].to_csv(out_path + "valid.csv")
dataset.loc[partitions[(partitions.fold_number==0) & (partitions.partition=="test")].id].to_csv(out_path + "test.csv")
```
then call the training and testing functions
sincFold -d cuda train working_path/train.csv --valid_file working_path/valid.csv -o working_path/output/
sincFold -d cuda test working_path/test.csv -w working_path/output/weights.pmt
Using a GPU for training is recommended (with the option '-d cuda'). The complete process may take about 3hs using a RTX A5000.
```bibtex
@article{sincFold2023,
title={sincFold: end-to-end learning of short- and long-range interactions for RNA folding},
author={Leandro A. Bugnon and Leandro Di Persia and Matias Gerard and Jonathan Raad and
Santiago Prochetto and Emilio Fenoy and Uciel Chorostecki and Federico Ariel and
Georgina Stegmayer and Diego H. Milone},
journal={under review},
year={2023}
}
```
| PypiClean |
/torch_yolo3-0.1.1.tar.gz/torch_yolo3-0.1.1/config/create_custom_model.sh |
NUM_CLASSES=$1
echo "
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=$(expr 3 \* $(expr $NUM_CLASSES \+ 5))
activation=linear
[yolo]
mask = 6,7,8
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=$NUM_CLASSES
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=$(expr 3 \* $(expr $NUM_CLASSES \+ 5))
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=$NUM_CLASSES
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=$(expr 3 \* $(expr $NUM_CLASSES \+ 5))
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
classes=$NUM_CLASSES
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
" >> yolov3-custom.cfg | PypiClean |
/coinbase4py-0.3.2.tar.gz/coinbase4py-0.3.2/README.md | coinbase4py
===========
python & Django include project for anybody who wants to use coinbase.com
A small Django app that provides a client to coinbase.com
# Installation
- Get a git clone of the source tree:
git clone https://github.com/claytantor/coinbase4py.git
Then you'll need the "lib" subdir on your PYTHONPATH:
cd coinbase4py
python setup.py install
# Django project setup
1. Add `coinbase4py` to `INSTALLED_APPS` in your project's "settings.py".
| PypiClean |
/tencentcloud_iac_pulumi-0.1.5.tar.gz/tencentcloud_iac_pulumi-0.1.5/tencentcloud_iac_pulumi/dbbrain/db_diag_report_task.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DbDiagReportTaskArgs', 'DbDiagReportTask']
@pulumi.input_type
class DbDiagReportTaskArgs:
def __init__(__self__, *,
end_time: pulumi.Input[str],
instance_id: pulumi.Input[str],
product: pulumi.Input[str],
send_mail_flag: pulumi.Input[int],
start_time: pulumi.Input[str],
contact_groups: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
contact_peoples: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None):
"""
The set of arguments for constructing a DbDiagReportTask resource.
:param pulumi.Input[str] end_time: End time, such as 2020-11-09T14:00:00+08:00.
:param pulumi.Input[str] instance_id: instance id.
:param pulumi.Input[str] product: Service product type, supported values include: mysql - cloud database MySQL, cynosdb - cloud database CynosDB for MySQL.
:param pulumi.Input[int] send_mail_flag: Whether to send mail: 0 - no, 1 - yes.
:param pulumi.Input[str] start_time: Start time, such as 2020-11-08T14:00:00+08:00.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_groups: An array of contact group IDs to receive mail from.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_peoples: An array of contact IDs to receive emails from.
"""
pulumi.set(__self__, "end_time", end_time)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "product", product)
pulumi.set(__self__, "send_mail_flag", send_mail_flag)
pulumi.set(__self__, "start_time", start_time)
if contact_groups is not None:
pulumi.set(__self__, "contact_groups", contact_groups)
if contact_peoples is not None:
pulumi.set(__self__, "contact_peoples", contact_peoples)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> pulumi.Input[str]:
"""
End time, such as 2020-11-09T14:00:00+08:00.
"""
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: pulumi.Input[str]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
instance id.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def product(self) -> pulumi.Input[str]:
"""
Service product type, supported values include: mysql - cloud database MySQL, cynosdb - cloud database CynosDB for MySQL.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: pulumi.Input[str]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="sendMailFlag")
def send_mail_flag(self) -> pulumi.Input[int]:
"""
Whether to send mail: 0 - no, 1 - yes.
"""
return pulumi.get(self, "send_mail_flag")
@send_mail_flag.setter
def send_mail_flag(self, value: pulumi.Input[int]):
pulumi.set(self, "send_mail_flag", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Input[str]:
"""
Start time, such as 2020-11-08T14:00:00+08:00.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: pulumi.Input[str]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
An array of contact group IDs to receive mail from.
"""
return pulumi.get(self, "contact_groups")
@contact_groups.setter
def contact_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "contact_groups", value)
@property
@pulumi.getter(name="contactPeoples")
def contact_peoples(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
An array of contact IDs to receive emails from.
"""
return pulumi.get(self, "contact_peoples")
@contact_peoples.setter
def contact_peoples(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "contact_peoples", value)
@pulumi.input_type
class _DbDiagReportTaskState:
def __init__(__self__, *,
contact_groups: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
contact_peoples: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
end_time: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
send_mail_flag: Optional[pulumi.Input[int]] = None,
start_time: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DbDiagReportTask resources.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_groups: An array of contact group IDs to receive mail from.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_peoples: An array of contact IDs to receive emails from.
:param pulumi.Input[str] end_time: End time, such as 2020-11-09T14:00:00+08:00.
:param pulumi.Input[str] instance_id: instance id.
:param pulumi.Input[str] product: Service product type, supported values include: mysql - cloud database MySQL, cynosdb - cloud database CynosDB for MySQL.
:param pulumi.Input[int] send_mail_flag: Whether to send mail: 0 - no, 1 - yes.
:param pulumi.Input[str] start_time: Start time, such as 2020-11-08T14:00:00+08:00.
"""
if contact_groups is not None:
pulumi.set(__self__, "contact_groups", contact_groups)
if contact_peoples is not None:
pulumi.set(__self__, "contact_peoples", contact_peoples)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if product is not None:
pulumi.set(__self__, "product", product)
if send_mail_flag is not None:
pulumi.set(__self__, "send_mail_flag", send_mail_flag)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
An array of contact group IDs to receive mail from.
"""
return pulumi.get(self, "contact_groups")
@contact_groups.setter
def contact_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "contact_groups", value)
@property
@pulumi.getter(name="contactPeoples")
def contact_peoples(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
An array of contact IDs to receive emails from.
"""
return pulumi.get(self, "contact_peoples")
@contact_peoples.setter
def contact_peoples(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "contact_peoples", value)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[pulumi.Input[str]]:
"""
End time, such as 2020-11-09T14:00:00+08:00.
"""
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
instance id.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
"""
Service product type, supported values include: mysql - cloud database MySQL, cynosdb - cloud database CynosDB for MySQL.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="sendMailFlag")
def send_mail_flag(self) -> Optional[pulumi.Input[int]]:
"""
Whether to send mail: 0 - no, 1 - yes.
"""
return pulumi.get(self, "send_mail_flag")
@send_mail_flag.setter
def send_mail_flag(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "send_mail_flag", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
Start time, such as 2020-11-08T14:00:00+08:00.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
class DbDiagReportTask(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contact_groups: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
contact_peoples: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
end_time: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
send_mail_flag: Optional[pulumi.Input[int]] = None,
start_time: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to create a dbbrain db_diag_report_task
## Example Usage
```python
import pulumi
import tencentcloud_iac_pulumi as tencentcloud
db_diag_report_task = tencentcloud.dbbrain.DbDiagReportTask("dbDiagReportTask",
end_time="%s",
instance_id="%s",
product="mysql",
send_mail_flag=0,
start_time="%s")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_groups: An array of contact group IDs to receive mail from.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_peoples: An array of contact IDs to receive emails from.
:param pulumi.Input[str] end_time: End time, such as 2020-11-09T14:00:00+08:00.
:param pulumi.Input[str] instance_id: instance id.
:param pulumi.Input[str] product: Service product type, supported values include: mysql - cloud database MySQL, cynosdb - cloud database CynosDB for MySQL.
:param pulumi.Input[int] send_mail_flag: Whether to send mail: 0 - no, 1 - yes.
:param pulumi.Input[str] start_time: Start time, such as 2020-11-08T14:00:00+08:00.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DbDiagReportTaskArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to create a dbbrain db_diag_report_task
## Example Usage
```python
import pulumi
import tencentcloud_iac_pulumi as tencentcloud
db_diag_report_task = tencentcloud.dbbrain.DbDiagReportTask("dbDiagReportTask",
end_time="%s",
instance_id="%s",
product="mysql",
send_mail_flag=0,
start_time="%s")
```
:param str resource_name: The name of the resource.
:param DbDiagReportTaskArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DbDiagReportTaskArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contact_groups: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
contact_peoples: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
end_time: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
send_mail_flag: Optional[pulumi.Input[int]] = None,
start_time: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DbDiagReportTaskArgs.__new__(DbDiagReportTaskArgs)
__props__.__dict__["contact_groups"] = contact_groups
__props__.__dict__["contact_peoples"] = contact_peoples
if end_time is None and not opts.urn:
raise TypeError("Missing required property 'end_time'")
__props__.__dict__["end_time"] = end_time
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if product is None and not opts.urn:
raise TypeError("Missing required property 'product'")
__props__.__dict__["product"] = product
if send_mail_flag is None and not opts.urn:
raise TypeError("Missing required property 'send_mail_flag'")
__props__.__dict__["send_mail_flag"] = send_mail_flag
if start_time is None and not opts.urn:
raise TypeError("Missing required property 'start_time'")
__props__.__dict__["start_time"] = start_time
super(DbDiagReportTask, __self__).__init__(
'tencentcloud:Dbbrain/dbDiagReportTask:DbDiagReportTask',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
contact_groups: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
contact_peoples: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
end_time: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
send_mail_flag: Optional[pulumi.Input[int]] = None,
start_time: Optional[pulumi.Input[str]] = None) -> 'DbDiagReportTask':
"""
Get an existing DbDiagReportTask resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_groups: An array of contact group IDs to receive mail from.
:param pulumi.Input[Sequence[pulumi.Input[int]]] contact_peoples: An array of contact IDs to receive emails from.
:param pulumi.Input[str] end_time: End time, such as 2020-11-09T14:00:00+08:00.
:param pulumi.Input[str] instance_id: instance id.
:param pulumi.Input[str] product: Service product type, supported values include: mysql - cloud database MySQL, cynosdb - cloud database CynosDB for MySQL.
:param pulumi.Input[int] send_mail_flag: Whether to send mail: 0 - no, 1 - yes.
:param pulumi.Input[str] start_time: Start time, such as 2020-11-08T14:00:00+08:00.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DbDiagReportTaskState.__new__(_DbDiagReportTaskState)
__props__.__dict__["contact_groups"] = contact_groups
__props__.__dict__["contact_peoples"] = contact_peoples
__props__.__dict__["end_time"] = end_time
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["product"] = product
__props__.__dict__["send_mail_flag"] = send_mail_flag
__props__.__dict__["start_time"] = start_time
return DbDiagReportTask(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> pulumi.Output[Optional[Sequence[int]]]:
"""
An array of contact group IDs to receive mail from.
"""
return pulumi.get(self, "contact_groups")
@property
@pulumi.getter(name="contactPeoples")
def contact_peoples(self) -> pulumi.Output[Optional[Sequence[int]]]:
"""
An array of contact IDs to receive emails from.
"""
return pulumi.get(self, "contact_peoples")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> pulumi.Output[str]:
"""
End time, such as 2020-11-09T14:00:00+08:00.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
instance id.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter
def product(self) -> pulumi.Output[str]:
"""
Service product type, supported values include: mysql - cloud database MySQL, cynosdb - cloud database CynosDB for MySQL.
"""
return pulumi.get(self, "product")
@property
@pulumi.getter(name="sendMailFlag")
def send_mail_flag(self) -> pulumi.Output[int]:
"""
Whether to send mail: 0 - no, 1 - yes.
"""
return pulumi.get(self, "send_mail_flag")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[str]:
"""
Start time, such as 2020-11-08T14:00:00+08:00.
"""
return pulumi.get(self, "start_time") | PypiClean |
/pyf.services-2.0.2.tar.gz/pyf.services-2.0.2/pyf/services/public/scripts/wireit/lib/inputex/build/inputex-min.js | (function(){var B=YAHOO.lang;YAHOO.inputEx=function(C){var D=null;if(C.type){D=YAHOO.inputEx.getFieldClass(C.type);if(D===null){D=YAHOO.inputEx.StringField}}else{D=C.fieldClass?C.fieldClass:A.StringField}var E=new D(C.inputParams);return E};var A=YAHOO.inputEx;B.augmentObject(A,{VERSION:"0.2.1",spacerUrl:"images/space.gif",stateEmpty:"empty",stateRequired:"required",stateValid:"valid",stateInvalid:"invalid",messages:{required:"This field is required",invalid:"This field is invalid",valid:"This field is valid",defaultDateFormat:"m/d/Y",months:["January","February","March","April","May","June","July","August","September","October","November","December"]},widget:{},regexps:{email:/^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/,url:/^(http|https):\/\/[a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,5}(([0-9]{1,5})?\/.*)?$/i,password:/^[0-9a-zA-Z\x20-\x7E]*$/},typeClasses:{},registerType:function(C,D){if(!B.isString(C)){throw new Error("inputEx.registerType: first argument must be a string")}if(!B.isFunction(D)){throw new Error("inputEx.registerType: second argument must be a function")}this.typeClasses[C]=D},getFieldClass:function(C){return B.isFunction(this.typeClasses[C])?this.typeClasses[C]:null},getType:function(C){for(var D in this.typeClasses){if(this.typeClasses.hasOwnProperty(D)){if(this.typeClasses[D]==C){return D}}}return null},buildField:function(C){return A(C)},sn:function(F,E,C){if(!F){return }if(E){for(var D in E){var H=E[D];if(B.isFunction(H)){continue}if(D=="className"){D="class";F.className=H}if(H!==F.getAttribute(D)){try{if(H===false){F.removeAttribute(D)}else{F.setAttribute(D,H)}}catch(G){}}}}if(C){for(var D in C){if(B.isFunction(C[D])){continue}if(F.style[D]!=C[D]){F.style[D]=C[D]}}}},cn:function(C,G,D,I){if(C=="input"&&YAHOO.env.ua.ie){var F="<"+C;if(G!=="undefined"){for(var E in G){F+=" "+E+'="'+G[E]+'"'}}F+="/>";return document.createElement(F)}else{var H=document.createElement(C);this.sn(H,G,D);if(I){H.innerHTML=I}return H}},indexOf:function(F,C){var D=C.length,E;for(E=0;E<D;E++){if(C[E]==F){return E}}return -1},compactArray:function(C){var F=[],D=C.length,E;for(E=0;E<D;E++){if(!B.isNull(C[E])&&!B.isUndefined(C[E])){F.push(C[E])}}return F}})})();var inputEx=YAHOO.inputEx;(function(){var A=YAHOO.inputEx,B=YAHOO.lang;A.visus={trimpath:function(E,F){if(!TrimPath){alert("TrimPath is not on the page. Please load inputex/lib/trimpath-template.js");return }var D=TrimPath.parseTemplate(E.template);var C=D.process(F);return C},func:function(C,D){return C.func(D)},dump:function(C,D){return B.dump(D)}};A.renderVisu=function(I,E,F){var C=I||{};var J=C.visuType||"dump";if(!A.visus.hasOwnProperty(J)){throw new Error("inputEx: no visu for visuType: "+J)}var G=A.visus[J];if(!B.isFunction(G)){throw new Error("inputEx: no visu for visuType: "+J)}var K=null;try{K=G(C,E)}catch(H){throw new Error("inputEx: error while running visu "+J+" : "+H.message);return }var D=null;if(F){if(B.isString(F)){D=YAHOO.util.Dom.get(F)}else{D=F}}if(D){if(YAHOO.lang.isObject(K)&&K.tagName){D.innerHTML="";D.appendChild(K)}else{D.innerHTML=K}}return K}})();(function(){var A=YAHOO.inputEx,B=YAHOO.lang;A.JsonSchema={inputExToSchema:function(C){}};A.JsonSchema.Builder=function(C){var C=C||{};this.options=C;this.schemaToParamMap=C.schemaToParamMap||{title:"label",description:"description",_inputex:null};this.referenceResolver=C.referenceResolver||null;this.defaultOptions=C.defaultOptions||{};this.schemaIdentifierMap=C.schemaIdentifierMap||{}};A.JsonSchema.Builder.prototype={defaultReferenceResolver:function(C){return this.schemaIdentifierMap[C]||null},schemaToInputEx:function(E,N){var H={inputParams:{label:N,name:N}};var O=this.schemaToParamMap;var G=E["$ref"];if(G){var L=null;if(this.referenceResolver){L=this.referenceResolver(G)}if(L===null){L=this.defaultReferenceResolver(G)}if(L===null){throw"Schema for property :"+N+" $references "+G+", not found"}L=B.merge(L);for(var I in E){if(E.hasOwnProperty(I)&&B.isUndefined(L[I])&&I!="$ref"){L[I]=E[I]}}E=L}if(!E.optional){H.inputParams.required=true}for(var R in O){if(O.hasOwnProperty(R)){var K=O[R];var Q=E[R];if(!B.isUndefined(Q)){if(K===null){if(B.isObject(Q)){for(var C in Q){if(Q.hasOwnProperty(C)){H.inputParams[C]=Q[C]}}}}else{H.inputParams[K]=Q}}}}if(E.type){var P=E.type;if(B.isArray(P)){if(P.length===0||(P.length==1&&P[0]=="any")){P="array"}else{P=P[0]}}else{if(B.isObject(P)){}}H.type=P;if(!B.isUndefined(E["default"])){H.inputParams.value=E["default"]}if(P=="array"){H.type="list";if(B.isObject(E.items)&&!B.isArray(E.items)){H.inputParams.elementType=this.schemaToInputEx(E.items,N)}}else{if(P=="object"){H.type="group";if(E.title&&B.isUndefined(H.inputParams.legend)){H.inputParams.legend=E.title}var M=[];if(N){H.inputParams.name=N}for(var R in E.properties){if(E.properties.hasOwnProperty(R)){M.push(this.schemaToInputEx(E.properties[R],R))}}H.inputParams.fields=M}else{if(P=="string"&&E["enum"]){H.type="select";if(E.options){H.inputParams.selectOptions=[];H.inputParams.selectValues=[];for(var J=0;J<E.options.length;J++){var F=E.options[J];H.inputParams.selectOptions[J]=F.label;H.inputParams.selectValues[J]=F.value}}else{H.inputParams.selectValues=E["enum"]}}else{if(P=="string"){if(!B.isUndefined(E.pattern)&&B.isUndefined(H.inputParams.regexp)){if(B.isString(E.pattern)){H.inputParams.regexp=new RegExp(E.pattern)}else{H.inputParams.regexp=E.pattern}}if(!B.isUndefined(E.maxLength)&&B.isUndefined(H.inputParams.maxLength)){H.inputParams.maxLength=E.maxLength}if(!B.isUndefined(E.minLength)&&B.isUndefined(H.inputParams.minLength)){H.inputParams.minLength=E.minLength}if(!B.isUndefined(E.readonly)&&B.isUndefined(H.inputParams.readonly)){H.inputParams.readonly=E.readonly}if(E.format){if(E.format=="html"){H.type="html"}else{if(E.format=="date"){H.type="date";H.inputParams.tooltipIcon=true}else{if(E.format=="url"){H.type="url"}else{if(E.format=="email"){H.type="email"}else{if(E.format=="text"){H.type="text"}else{if(E.format=="time"){H.type="time"}else{if(E.format=="ip-address"){H.type="IPv4"}else{if(E.format=="color"){H.type="color"}}}}}}}}}}}}}}for(var D in this.defaultOptions){if(this.defaultOptions.hasOwnProperty(D)&&B.isUndefined(H.inputParams[D])){H.inputParams[D]=this.defaultOptions[D]}}return H},formFromInstance:function(F){if(!F||!F["$schema"]){throw new Error("Invalid json schema instance object. Object must have a '$schema' property.")}var C=this.schemaToInputEx(F["$schema"]);for(var D=0;D<C.fields.length;D++){var E=C.fields[D].inputParams.name;C.fields[D].inputParams.value=F[E]}return C}}})();(function(){var C=YAHOO.inputEx,B=YAHOO.util.Dom,D=YAHOO.lang,A=YAHOO.util;C.Field=function(E){if(!E){var E={}}this.setOptions(E);this.render();this.updatedEvt=new A.CustomEvent("updated",this);this.initEvents();if(!D.isUndefined(this.options.value)){this.setValue(this.options.value,false)}if(E.parentEl){if(D.isString(E.parentEl)){B.get(E.parentEl).appendChild(this.getEl())}else{E.parentEl.appendChild(this.getEl())}}};C.Field.prototype={setOptions:function(E){this.options={};this.options.name=E.name;this.options.value=E.value;this.options.id=E.id||B.generateId();this.options.label=E.label;this.options.description=E.description;this.options.messages={};this.options.messages.required=(E.messages&&E.messages.required)?E.messages.required:C.messages.required;this.options.messages.invalid=(E.messages&&E.messages.invalid)?E.messages.invalid:C.messages.invalid;this.options.className=E.className?E.className:"inputEx-Field";this.options.required=D.isUndefined(E.required)?false:E.required;this.options.showMsg=D.isUndefined(E.showMsg)?false:E.showMsg},render:function(){this.divEl=C.cn("div",{className:"inputEx-fieldWrapper"});if(this.options.id){this.divEl.id=this.options.id}if(this.options.required){B.addClass(this.divEl,"inputEx-required")}if(this.options.label){this.labelDiv=C.cn("div",{id:this.divEl.id+"-label",className:"inputEx-label","for":this.divEl.id+"-field"});this.labelEl=C.cn("label");this.labelEl.appendChild(document.createTextNode(this.options.label));this.labelDiv.appendChild(this.labelEl);this.divEl.appendChild(this.labelDiv)}this.fieldContainer=C.cn("div",{className:this.options.className});this.renderComponent();if(this.options.description){this.fieldContainer.appendChild(C.cn("div",{id:this.divEl.id+"-desc",className:"inputEx-description"},null,this.options.description))}this.divEl.appendChild(this.fieldContainer);this.divEl.appendChild(C.cn("div",null,{clear:"both"}," "))},fireUpdatedEvt:function(){var E=this;setTimeout(function(){E.updatedEvt.fire(E.getValue(),E)},50)},renderComponent:function(){},getEl:function(){return this.divEl},initEvents:function(){},getValue:function(){},setValue:function(F,E){this.setClassFromState();if(E!==false){this.fireUpdatedEvt()}},setClassFromState:function(){if(this.previousState){var E="inputEx-"+((this.previousState==C.stateRequired)?C.stateInvalid:this.previousState);B.removeClass(this.divEl,E)}var F=this.getState();if(!(F==C.stateEmpty&&B.hasClass(this.divEl,"inputEx-focused"))){var E="inputEx-"+((F==C.stateRequired)?C.stateInvalid:F);B.addClass(this.divEl,E)}if(this.options.showMsg){this.displayMessage(this.getStateString(F))}this.previousState=F},getStateString:function(E){if(E==C.stateRequired){return this.options.messages.required}else{if(E==C.stateInvalid){return this.options.messages.invalid}else{return""}}},getState:function(){if(this.isEmpty()){return this.options.required?C.stateRequired:C.stateEmpty}return this.validate()?C.stateValid:C.stateInvalid},validate:function(){return true},onFocus:function(F){var E=this.getEl();B.removeClass(E,"inputEx-empty");B.addClass(E,"inputEx-focused")},onBlur:function(E){B.removeClass(this.getEl(),"inputEx-focused");this.setClassFromState()},onChange:function(E){this.fireUpdatedEvt()},close:function(){},disable:function(){},enable:function(){},focus:function(){},destroy:function(){var E=this.getEl();this.updatedEvt.unsubscribeAll();if(B.inDocument(E)){E.parentNode.removeChild(E)}A.Event.purgeElement(E,true)},displayMessage:function(G){if(!this.fieldContainer){return }if(!this.msgEl){this.msgEl=C.cn("div",{className:"inputEx-message"});try{var E=this.divEl.getElementsByTagName("div");this.divEl.insertBefore(this.msgEl,E[(E.length-1>=0)?E.length-1:0])}catch(F){alert(F)}}this.msgEl.innerHTML=G},show:function(){this.divEl.style.display=""},hide:function(){this.divEl.style.display="none"},clear:function(E){this.setValue(D.isUndefined(this.options.value)?"":this.options.value,E)},isEmpty:function(){return this.getValue()===""}}})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,B=YAHOO.util.Dom,A=YAHOO.util.Event;C.Group=function(E){C.Group.superclass.constructor.call(this,E);if(this.hasInteractions){for(var F=0;F<this.inputs.length;F++){this.runInteractions(this.inputs[F],this.inputs[F].getValue())}}};D.extend(C.Group,C.Field,{setOptions:function(E){this.options={};this.options.className=E.className||"inputEx-Group";this.options.fields=E.fields;this.options.id=E.id;this.options.name=E.name;this.options.value=E.value;this.options.flatten=E.flatten;this.options.legend=E.legend||"";this.inputConfigs=E.fields;this.options.collapsible=D.isUndefined(E.collapsible)?false:E.collapsible;this.options.collapsed=D.isUndefined(E.collapsed)?false:E.collapsed;this.options.disabled=D.isUndefined(E.disabled)?false:E.disabled;this.inputs=[];this.inputsNames={}},render:function(){this.divEl=C.cn("div",{className:this.options.className});if(this.options.id){this.divEl.id=this.options.id}this.renderFields(this.divEl);if(this.options.disabled){this.disable()}},renderFields:function(G){this.fieldset=C.cn("fieldset");this.legend=C.cn("legend",{className:"inputEx-Group-legend"});if(this.options.collapsible){var I=C.cn("div",{className:"inputEx-Group-collapseImg"},null," ");this.legend.appendChild(I);C.sn(this.fieldset,{className:"inputEx-Expanded"})}if(!D.isUndefined(this.options.legend)&&this.options.legend!==""){this.legend.appendChild(document.createTextNode(" "+this.options.legend))}if(this.options.collapsible||(!D.isUndefined(this.options.legend)&&this.options.legend!=="")){this.fieldset.appendChild(this.legend)}for(var F=0;F<this.options.fields.length;F++){var E=this.options.fields[F];var H=this.renderField(E);this.fieldset.appendChild(H.getEl())}if(this.options.collapsed){this.toggleCollapse()}G.appendChild(this.fieldset)},renderField:function(F){var E=C.buildField(F);this.inputs.push(E);if(E.options.name){this.inputsNames[E.options.name]=E}if(!this.hasInteractions&&F.interactions){this.hasInteractions=true}E.updatedEvt.subscribe(this.onChange,this,true);return E},initEvents:function(){if(this.options.collapsible){A.addListener(this.legend,"click",this.toggleCollapse,this,true)}},toggleCollapse:function(){if(B.hasClass(this.fieldset,"inputEx-Expanded")){B.replaceClass(this.fieldset,"inputEx-Expanded","inputEx-Collapsed")}else{B.replaceClass(this.fieldset,"inputEx-Collapsed","inputEx-Expanded")}},validate:function(){var F=true;for(var G=0;G<this.inputs.length;G++){var E=this.inputs[G];E.setClassFromState();var H=E.getState();if(H==C.stateRequired||H==C.stateInvalid){F=false}}return F},enable:function(){for(var E=0;E<this.inputs.length;E++){this.inputs[E].enable()}},disable:function(){for(var E=0;E<this.inputs.length;E++){this.inputs[E].disable()}},setValue:function(H,F){if(!H){return }for(var G=0;G<this.inputs.length;G++){var I=this.inputs[G];var E=I.options.name;if(E&&!D.isUndefined(H[E])){I.setValue(H[E],false)}else{I.clear(false)}}if(F!==false){this.fireUpdatedEvt()}},getValue:function(){var G={};for(var F=0;F<this.inputs.length;F++){var E=this.inputs[F].getValue();if(this.inputs[F].options.name){if(this.inputs[F].options.flatten&&D.isObject(E)){D.augmentObject(G,E)}else{G[this.inputs[F].options.name]=E}}}return G},close:function(){for(var E=0;E<this.inputs.length;E++){this.inputs[E].close()}},focus:function(){if(this.inputs.length>0){this.inputs[0].focus()}},getFieldByName:function(E){if(!this.inputsNames.hasOwnProperty(E)){return null}return this.inputsNames[E]},onChange:function(F,G){var H=G[0];var E=G[1];this.runInteractions(E,H);this.fireUpdatedEvt()},runAction:function(E,G){var F=this.getFieldByName(E.name);if(YAHOO.lang.isFunction(F[E.action])){F[E.action].call(F)}else{if(YAHOO.lang.isFunction(E.action)){E.action.call(F,G)}else{throw new Error("action "+E.action+" is not a valid action for field "+E.name)}}},runInteractions:function(F,K){var H=C.indexOf(F,this.inputs);var J=this.options.fields[H];if(YAHOO.lang.isUndefined(J.interactions)){return }var L=J.interactions;for(var I=0;I<L.length;I++){var E=L[I];if(E.valueTrigger===K){for(var G=0;G<E.actions.length;G++){this.runAction(E.actions[G],K)}}}},clear:function(E){for(var F=0;F<this.inputs.length;F++){this.inputs[F].clear(false)}if(E!==false){this.fireUpdatedEvt()}}});C.registerType("group",C.Group)})();(function(){var B=YAHOO.util,E=YAHOO.lang,A=YAHOO.util.Event,D=YAHOO.inputEx,C=B.Dom;D.Form=function(F){D.Form.superclass.constructor.call(this,F)};E.extend(D.Form,D.Group,{setOptions:function(F){D.Form.superclass.setOptions.call(this,F);this.buttons=[];this.options.buttons=F.buttons||[];this.options.action=F.action;this.options.method=F.method;if(F.ajax){this.options.ajax={};this.options.ajax.method=F.ajax.method||"POST";this.options.ajax.uri=F.ajax.uri||"default.php";this.options.ajax.callback=F.ajax.callback||{};this.options.ajax.callback.scope=F.ajax.callback.scope||this;this.options.ajax.showMask=E.isUndefined(F.ajax.showMask)?false:F.ajax.showMask}if(E.isFunction(F.onSubmit)){this.options.onSubmit=F.onSubmit}},render:function(){this.divEl=D.cn("div",{className:this.options.className});if(this.options.id){this.divEl.id=this.options.id}this.form=D.cn("form",{method:this.options.method||"POST",action:this.options.action||"",className:this.options.className||"inputEx-Form"});this.divEl.appendChild(this.form);this.form.setAttribute("autocomplete","off");if(this.options.formName){this.form.name=this.options.formName}this.renderFields(this.form);this.renderButtons();if(this.options.disabled){this.disable()}},renderButtons:function(){this.buttonDiv=D.cn("div",{className:"inputEx-Form-buttonBar"});var H,F;for(var G=0;G<this.options.buttons.length;G++){H=this.options.buttons[G];F=D.cn("input",{type:H.type,value:H.value});if(H.onClick){F.onclick=H.onClick}this.buttons.push(F);this.buttonDiv.appendChild(F)}this.form.appendChild(this.buttonDiv)},initEvents:function(){D.Form.superclass.initEvents.call(this);A.addListener(this.form,"submit",this.options.onSubmit||this.onSubmit,this,true)},onSubmit:function(F){if(!this.validate()){A.stopEvent(F);return }if(this.options.ajax){A.stopEvent(F);this.asyncRequest()}},asyncRequest:function(){if(this.options.ajax.showMask){this.showMask()}var F="value="+E.JSON.stringify(this.getValue());B.Connect.asyncRequest(this.options.ajax.method,this.options.ajax.uri,{success:function(G){if(this.options.ajax.showMask){this.hideMask()}if(E.isFunction(this.options.ajax.callback.success)){this.options.ajax.callback.success.call(this.options.ajax.callback.scope,G)}},failure:function(G){if(this.options.ajax.showMask){this.hideMask()}if(E.isFunction(this.options.ajax.callback.failure)){this.options.ajax.callback.failure.call(this.options.ajax.callback.scope,G)}},scope:this},F)},renderMask:function(){if(this.maskRendered){return }C.setStyle(this.divEl,"position","relative");if(YAHOO.env.ua.ie){C.setStyle(this.divEl,"zoom",1)}this.formMask=D.cn("div",{className:"inputEx-Form-Mask"},{display:"none",width:this.divEl.offsetWidth+"px",height:this.divEl.offsetHeight+"px"},"<div class='inputEx-Form-Mask-bg'/><center><br/><div class='inputEx-Form-Mask-spinner'></div><br /><span>"+D.messages.ajaxWait+"</span></div>");this.divEl.appendChild(this.formMask);this.maskRendered=true},showMask:function(){this.renderMask();this.toggleSelectsInIE(false);this.formMask.style.display=""},hideMask:function(){this.toggleSelectsInIE(true);this.formMask.style.display="none"},toggleSelectsInIE:function(F){if(!!YAHOO.env.ua.ie&&YAHOO.env.ua.ie<7){var H=!!F?YAHOO.util.Dom.removeClass:YAHOO.util.Dom.addClass;var G=this;YAHOO.util.Dom.getElementsBy(function(){return true},"select",this.divEl,function(I){H.call(G,I,"inputEx-hidden")})}},enable:function(){D.Form.superclass.enable.call(this);for(var F=0;F<this.buttons.length;F++){this.buttons[F].disabled=false}},disable:function(){D.Form.superclass.disable.call(this);for(var F=0;F<this.buttons.length;F++){this.buttons[F].disabled=true}}});D.messages.ajaxWait="Please wait...";D.registerType("form",D.Form)})();(function(){var B=YAHOO.inputEx,C=YAHOO.lang,A=YAHOO.util.Dom;B.CombineField=function(D){B.CombineField.superclass.constructor.call(this,D)};C.extend(B.CombineField,B.Field,{setOptions:function(D){B.CombineField.superclass.setOptions.call(this,D);this.options.className=D.className?D.className:"inputEx-Field inputEx-CombineField";this.options.separators=D.separators;this.options.fields=D.fields},renderComponent:function(){this.inputs=[];this.appendSeparator(0);if(!this.options.fields){return }for(var D=0;D<this.options.fields.length;D++){if(this.options.required){this.options.fields[D].required=true}var E=this.renderField(this.options.fields[D]);E.divEl.removeChild(E.divEl.childNodes[E.divEl.childNodes.length-1]);YAHOO.util.Dom.setStyle(E.getEl(),"float","left");this.fieldContainer.appendChild(E.getEl());this.appendSeparator(D+1)}},appendSeparator:function(E){if(this.options.separators&&this.options.separators[E]){var D=B.cn("div",{className:"inputEx-CombineField-separator"},null,this.options.separators[E]);this.fieldContainer.appendChild(D)}},renderField:function(E){if(this.options.required){if(!E.inputParams){E.inputParams={}}E.inputParams.required=true}var D=B(E);this.inputs.push(D);D.updatedEvt.subscribe(this.onChange,this,true);YAHOO.util.Event.addBlurListener(D.getEl(),this.onBlur,this,true);return D},validate:function(){for(var E=0;E<this.inputs.length;E++){var D=this.inputs[E];var F=D.getState();if(F==B.stateRequired||F==B.stateInvalid){return false}}return true},setValue:function(D,E){for(var F=0;F<this.inputs.length;F++){this.inputs[F].setValue(D[F],false)}B.CombineField.superclass.setValue.call(this,D,E)},getValue:function(){var D=[];for(var E=0;E<this.inputs.length;E++){D.push(this.inputs[E].getValue())}return D},setClassFromState:function(){B.CombineField.superclass.setClassFromState.call(this);for(var D=0;D<this.inputs.length;D++){this.inputs[D].setClassFromState()}},clear:function(D){for(var E=0;E<this.inputs.length;E++){this.inputs[E].clear(false)}this.setClassFromState();if(D!==false){this.fireUpdatedEvt()}},isEmpty:function(){for(var D=0;D<this.inputs.length;D++){if(!this.inputs[D].isEmpty()){return false}}return true}});B.registerType("combine",B.CombineField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.StringField=function(E){C.StringField.superclass.constructor.call(this,E);if(this.options.typeInvite){this.updateTypeInvite()}};D.extend(C.StringField,C.Field,{setOptions:function(E){C.StringField.superclass.setOptions.call(this,E);this.options.regexp=E.regexp;this.options.size=E.size;this.options.maxLength=E.maxLength;this.options.minLength=E.minLength;this.options.typeInvite=E.typeInvite;this.options.readonly=E.readonly},renderComponent:function(){this.wrapEl=C.cn("div",{className:"inputEx-StringField-wrapper"});var E={};E.type="text";E.id=this.divEl.id?this.divEl.id+"-field":YAHOO.util.Dom.generateId();if(this.options.size){E.size=this.options.size}if(this.options.name){E.name=this.options.name}if(this.options.readonly){E.readonly="readonly"}if(this.options.maxLength){E.maxLength=this.options.maxLength}this.el=C.cn("input",E);this.wrapEl.appendChild(this.el);this.fieldContainer.appendChild(this.wrapEl)},initEvents:function(){A.addListener(this.el,"change",this.onChange,this,true);if(YAHOO.env.ua.ie){var E=this.el;new YAHOO.util.KeyListener(this.el,{keys:[13]},{fn:function(){E.blur();E.focus()}}).enable()}A.addFocusListener(this.el,this.onFocus,this,true);A.addBlurListener(this.el,this.onBlur,this,true);A.addListener(this.el,"keypress",this.onKeyPress,this,true);A.addListener(this.el,"keyup",this.onKeyUp,this,true)},getValue:function(){return(this.options.typeInvite&&this.el.value==this.options.typeInvite)?"":this.el.value},setValue:function(F,E){this.el.value=F;C.StringField.superclass.setValue.call(this,F,E)},validate:function(){var F=this.getValue();if(F==""){return !this.options.required}var E=true;if(this.options.regexp){E=E&&F.match(this.options.regexp)}if(this.options.minLength){E=E&&F.length>=this.options.minLength}return E},disable:function(){this.el.disabled=true},enable:function(){this.el.disabled=false},focus:function(){if(!!this.el&&!D.isUndefined(this.el.focus)){this.el.focus()}},getStateString:function(E){if(E==C.stateInvalid&&this.options.minLength&&this.el.value.length<this.options.minLength){return C.messages.stringTooShort[0]+this.options.minLength+C.messages.stringTooShort[1]}return C.StringField.superclass.getStateString.call(this,E)},setClassFromState:function(){C.StringField.superclass.setClassFromState.call(this);if(this.options.typeInvite){this.updateTypeInvite()}},updateTypeInvite:function(){if(!B.hasClass(this.divEl,"inputEx-focused")){if(this.isEmpty()){B.addClass(this.divEl,"inputEx-typeInvite");this.el.value=this.options.typeInvite}else{B.removeClass(this.divEl,"inputEx-typeInvite")}}else{if(B.hasClass(this.divEl,"inputEx-typeInvite")){this.el.value="";this.previousState=null;B.removeClass(this.divEl,"inputEx-typeInvite")}}},onFocus:function(E){C.StringField.superclass.onFocus.call(this,E);if(this.options.typeInvite){this.updateTypeInvite()}},onKeyPress:function(E){},onKeyUp:function(E){}});C.messages.stringTooShort=["This field should contain at least "," numbers or characters"];C.registerType("string",C.StringField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.AutoComplete=function(E){C.AutoComplete.superclass.constructor.call(this,E)};D.extend(C.AutoComplete,C.StringField,{setOptions:function(E){C.AutoComplete.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-AutoComplete";this.options.datasource=E.datasource;this.options.autoComp=E.autoComp;this.options.returnValue=E.returnValue},initEvents:function(){C.AutoComplete.superclass.initEvents.call(this);A.removeBlurListener(this.el,this.onBlur)},renderComponent:function(){this.wrapEl=C.cn("div",{className:"inputEx-StringField-wrapper"});var E={type:"text",id:YAHOO.util.Dom.generateId()};if(this.options.size){E.size=this.options.size}if(this.options.readonly){E.readonly="readonly"}if(this.options.maxLength){E.maxLength=this.options.maxLength}this.el=C.cn("input",E);var F={type:"hidden",value:""};if(this.options.name){F.name=this.options.name}this.hiddenEl=C.cn("input",F);this.wrapEl.appendChild(this.el);this.wrapEl.appendChild(this.hiddenEl);this.fieldContainer.appendChild(this.wrapEl);this.listEl=C.cn("div",{id:B.generateId()});this.fieldContainer.appendChild(this.listEl);A.onAvailable([this.el,this.listEl],this.buildAutocomplete,this,true)},buildAutocomplete:function(){if(!this._nElementsReady){this._nElementsReady=0}this._nElementsReady++;if(this._nElementsReady!=2){return }this.oAutoComp=new YAHOO.widget.AutoComplete(this.el.id,this.listEl.id,this.options.datasource,this.options.autoComp);this.oAutoComp.itemSelectEvent.subscribe(this.itemSelectHandler,this,true);this.oAutoComp.textboxBlurEvent.subscribe(this.onBlur,this,true)},itemSelectHandler:function(G,F){var E=F[2];this.setValue(this.options.returnValue?this.options.returnValue(E):E[0])},onChange:function(E){this.setClassFromState();YAHOO.lang.later(50,this,function(){if(this.el.value==""){this.setValue("")}});this.fireUpdatedEvt()},setValue:function(F,E){this.hiddenEl.value=F;this.setClassFromState();if(E!==false){this.fireUpdatedEvt()}},getValue:function(){return this.hiddenEl.value}});C.registerType("autocomplete",C.AutoComplete)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.CheckBox=function(E){C.CheckBox.superclass.constructor.call(this,E)};D.extend(C.CheckBox,C.Field,{setOptions:function(E){C.CheckBox.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-CheckBox";this.options.rightLabel=E.rightLabel||"";this.sentValues=E.sentValues||[true,false];this.options.sentValues=this.sentValues;this.checkedValue=this.sentValues[0];this.uncheckedValue=this.sentValues[1]},renderComponent:function(){var E=this.divEl.id?this.divEl.id+"-field":YAHOO.util.Dom.generateId();this.el=C.cn("input",{id:E,type:"checkbox",checked:(this.options.checked===false)?false:true});this.fieldContainer.appendChild(this.el);this.rightLabelEl=C.cn("label",{"for":E,className:"inputEx-CheckBox-rightLabel"},null,this.options.rightLabel);this.fieldContainer.appendChild(this.rightLabelEl);this.hiddenEl=C.cn("input",{type:"hidden",name:this.options.name||"",value:this.el.checked?this.checkedValue:this.uncheckedValue});this.fieldContainer.appendChild(this.hiddenEl)},initEvents:function(){A.addListener(this.el,"change",this.onChange,this,true);if(YAHOO.env.ua.ie){A.addListener(this.el,"click",function(){YAHOO.lang.later(10,this,this.fireUpdatedEvt)},this,true)}A.addFocusListener(this.el,this.onFocus,this,true);A.addBlurListener(this.el,this.onBlur,this,true)},onChange:function(E){this.hiddenEl.value=this.el.checked?this.checkedValue:this.uncheckedValue;if(!YAHOO.env.ua.ie){C.CheckBox.superclass.onChange.call(this,E)}},getValue:function(){return this.el.checked?this.checkedValue:this.uncheckedValue},setValue:function(F,E){if(F===this.checkedValue){this.hiddenEl.value=F;this.el.checked=true}else{this.hiddenEl.value=F;this.el.checked=false}C.CheckBox.superclass.setValue.call(this,F,E)},disable:function(){this.el.disabled=true},enable:function(){this.el.disabled=false}});C.registerType("boolean",C.CheckBox)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.ColorField=function(E){C.ColorField.superclass.constructor.call(this,E)};D.extend(C.ColorField,C.Field,{setOptions:function(E){C.ColorField.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-ColorField inputEx-PickerField";this.options.palette=E.palette;this.options.colors=E.colors;if(E.ratio){this.options.ratio=E.ratio}if(E.cellPerLine){this.options.cellPerLine=E.cellPerLine}if(E.overlayPadding){this.options.overlayPadding=E.overlayPadding}if(E.cellHeight){this.options.cellHeight=E.cellHeight}if(E.cellWidth){this.options.cellWidth=E.cellWidth}if(E.cellMargin){this.options.cellMargin=E.cellMargin}},renderComponent:function(){this.el=C.cn("input",{type:"hidden",name:this.options.name||"",value:this.options.value||"#DD7870"});this.colorEl=C.cn("div",{className:"inputEx-ColorField-button"},{backgroundColor:this.el.value});this.wrapEl=C.cn("div",{className:"inputEx-PickerField-wrapper"});this.wrapEl.appendChild(this.el);this.wrapEl.appendChild(this.colorEl);this.oOverlay=new YAHOO.widget.Overlay(B.generateId(),{visible:false});this.oOverlay.setBody(" ");this.oOverlay.body.id=B.generateId();this.button=new YAHOO.widget.Button({type:"menu",menu:this.oOverlay,label:" "});this.button.appendTo(this.wrapEl);this.oOverlay.render(this.wrapEl);B.setStyle(this.oOverlay.body.parentNode,"position","absolute");A.addListener(this.colorEl,"mousedown",function(E){if(!this.oOverlay.cfg.getProperty("visible")){A.stopEvent(E);this.renderPalette();this.button._showMenu()}},this,true);this.button.on("mousedown",this.renderPalette,this,true);this.fieldContainer.appendChild(this.wrapEl)},renderPalette:function(){if(this.paletteRendered){return }var I=this.options.palette||1;this.colors=this.options.colors||this.setDefaultColors(I);this.length=this.colors.length;this.ratio=this.options.ratio||[16,9];this.cellPerLine=this.options.cellPerLine||Math.ceil(Math.sqrt(this.length*this.ratio[0]/this.ratio[1]));this.cellPerColumn=Math.ceil(this.length/this.cellPerLine);this.overlayPadding=this.options.overlayPadding||7;this.cellWidth=this.options.cellWidth||17;this.cellHeight=this.options.cellHeight||17;this.cellMargin=this.options.cellMargin||4;var H=document.getElementById(this.oOverlay.body.id);var F=this.renderColorGrid();H.appendChild(F);var G=(this.cellWidth+2*this.cellMargin)*this.cellPerLine+(YAHOO.env.ua.ie==6?3*this.overlayPadding:0);var E=(this.cellHeight+2*this.cellMargin)*this.cellPerColumn+(YAHOO.env.ua.ie==6?3*this.overlayPadding:0);B.setStyle(H,"width",G+"px");B.setStyle(H,"height",E+"px");B.setStyle(H,"padding",this.overlayPadding+"px");this.button.unsubscribe("mousedown",this.renderPalette);this.paletteRendered=true},setDefaultColors:function(E){return C.ColorField.palettes[E-1]},renderColorGrid:function(){var F=C.cn("div");for(var E=0;E<this.length;E++){var G=C.cn("div",{className:"inputEx-ColorField-square"},{backgroundColor:this.colors[E],width:this.cellWidth+"px",height:this.cellHeight+"px",margin:this.cellMargin+"px"});A.addListener(G,"mousedown",this.onColorClick,this,true);F.appendChild(G)}return F},onColorClick:function(H){var G=A.getTarget(H);A.stopEvent(H);this.oOverlay.hide();var F=B.getStyle(G,"background-color");var E=C.ColorField.ensureHexa(F);this.setValue(E)},setValue:function(F,E){this.el.value=F;B.setStyle(this.colorEl,"background-color",this.el.value);C.ColorField.superclass.setValue.call(this,F,E)},getValue:function(){return this.el.value},close:function(){this.oOverlay.hide()}});C.messages.selectColor="Select a color :";C.ColorField.palettes=[["#FFEA99","#FFFF66","#FFCC99","#FFCAB2","#FF99AD","#FFD6FF","#FF6666","#E8EEF7","#ADC2FF","#ADADFF","#CCFFFF","#D6EAAD","#B5EDBC","#CCFF99"],["#DEDFDE","#FFFF6B","#EFCB7B","#FFBE94","#FFB6B5","#A5E3FF","#A5CBFF","#99ABEF","#EFB2E7","#FF9AAD","#94E7C6","#A5FFD6","#CEFFA5","#E7EF9C","#FFE38C"],["#000000","#993300","#333300","#003300","#003366","#000080","#333399","#333333","#800000","#FF6600","#808000","#008000","#008080","#0000FF","#666699","#808080","#FF0000","#FF9900","#99CC00","#339966","#33CCCC","#3366FF","#800080","#969696","#FF00FF","#FFCC00","#FFFF00","#00FF00","#00FFFF","#00CCFF","#993366","#C0C0C0","#FF99CC","#FFCC99","#FFFF99","#CCFFCC","#CCFFFF","#99CCFF","#CC99FF","#F0F0F0"],["#FFFFCC","#FFFF99","#CCFFCC","#CCFF66","#99FFCC","#CCFFFF","#66CCCC","#CCCCFF","#99CCFF","#9999FF","#6666CC","#9966CC","#CC99FF","#FFCCFF","#FF99FF","#CC66CC","#FFCCCC","#FF99CC","#FFCCCC","#CC6699","#FF9999","#FF9966","#FFCC99","#FFFFCC","#FFCC66","#FFFF99","#CCCC66"],["#D0D0D0","#31A8FA","#8EC1E5","#58D7CF","#89E2BB","#A7F7F8","#F6B77C","#FE993F","#FE6440","#F56572","#FA9AA3","#F7B1CA","#E584AF","#D1C3EF","#AB77B8","#C69FE7","#90D28A","#C2F175","#EDEA9A","#F3DF70","#F8D1AE","#F98064","#F54F5E","#EC9099","#F0B5BA","#EDA0BB","#D375AC","#BC8DBE","#8C77B8"],["#EEEEEE","#84CBFC","#BCDAF0","#9BE7E3","#B9EED7","#CBFBFB","#FAD4B1","#FFC28C","#FFA28D","#F9A3AB","#FCC3C8","#FBD1E0","#F0B6CF","#E4DBF6","#CDAED5","#DDC6F1","#BDE4B9","#DBF7AD","#F5F3C3","#F8ECAA","#FBE4CF","#FCB3A2","#F9969F","#F4BDC2","#F6D3D6","#F5C6D7","#E5ADCE","#D7BBD8","#BAAED5"]];C.ColorField.ensureHexa=function(F){var G,E;F=F.replace(/\s/g,"");if(!!F.match(/^rgb\((?:\d{1,3},){2}\d{1,3}\)$/)){var H=function(J){var I=parseInt(J,10).toString(16);if(I.length==1){I="0"+I}return I};G=F.split(/([(,)])/);E="#"+H(G[2])+H(G[4])+H(G[6])}else{if(!!F.match(/^#[\da-fA-F]{6}$/)){E=F}else{E="#FFFFFF"}}return E};C.registerType("color",C.ColorField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.DateField=function(E){C.DateField.superclass.constructor.call(this,E)};D.extend(C.DateField,C.StringField,{setOptions:function(E){C.DateField.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-DateField";this.options.messages.invalid=C.messages.invalidDate;this.options.dateFormat=E.dateFormat||C.messages.defaultDateFormat},validate:function(){var L=this.el.value;var M=L.split("/");if(M.length!=3){return false}if(isNaN(parseInt(M[0],10))||isNaN(parseInt(M[1],10))||isNaN(parseInt(M[2],10))){return false}var I=this.options.dateFormat.split("/");var E=C.indexOf("Y",I);if(M[E].length!=4){return false}var K=parseInt(M[C.indexOf("d",I)],10);var F=parseInt(M[E],10);var H=parseInt(M[C.indexOf("m",I)],10)-1;var J=new Date(F,H,K);var G=J.getFullYear();return((J.getDate()==K)&&(J.getMonth()==H)&&(G==F))},setValue:function(I,F){if(I===""){C.DateField.superclass.setValue.call(this,"",F);return }var H="";if(I instanceof Date){H=this.options.dateFormat.replace("Y",I.getFullYear());var E=I.getMonth()+1;H=H.replace("m",((E<10)?"0":"")+E);var G=I.getDate();H=H.replace("d",((G<10)?"0":"")+G)}else{H=I}C.DateField.superclass.setValue.call(this,H,F)},getValue:function(){var G=C.DateField.superclass.getValue.call(this);if(G===""){return""}var F=G.split("/");var H=this.options.dateFormat.split("/");var J=parseInt(F[C.indexOf("d",H)],10);var I=parseInt(F[C.indexOf("Y",H)],10);var E=parseInt(F[C.indexOf("m",H)],10)-1;return(new Date(I,E,J))}});C.messages.invalidDate="Invalid date, ex: 03/27/2008";C.registerType("date",C.DateField)})();(function(){var B=YAHOO.inputEx,C=YAHOO.lang,A=YAHOO.util.Event;B.DateSplitField=function(D){if(!D.dateFormat){D.dateFormat=B.messages.defaultDateFormat}var F=D.dateFormat.split("/");this.yearIndex=B.indexOf("Y",F);this.monthIndex=B.indexOf("m",F);this.dayIndex=B.indexOf("d",F);D.fields=[];for(var E=0;E<3;E++){if(E==this.dayIndex){D.fields.push({type:"integer",inputParams:{typeInvite:B.messages.dayTypeInvite,size:2}})}else{if(E==this.yearIndex){D.fields.push({type:"integer",inputParams:{typeInvite:B.messages.yearTypeInvite,size:4}})}else{D.fields.push({type:"integer",inputParams:{typeInvite:B.messages.monthTypeInvite,size:2}})}}}D.separators=D.separators||[false," "," ",false];B.DateSplitField.superclass.constructor.call(this,D);this.initAutoTab()};C.extend(B.DateSplitField,B.CombineField,{setValue:function(G,E){var D=[];if(!G||!C.isFunction(G.getTime)||!C.isNumber(G.getTime())){D[this.monthIndex]="";D[this.yearIndex]="";D[this.dayIndex]=""}else{for(var F=0;F<3;F++){D.push(F==this.dayIndex?G.getDate():(F==this.yearIndex?G.getFullYear():G.getMonth()+1))}}B.DateSplitField.superclass.setValue.call(this,D,E)},getValue:function(){if(this.isEmpty()){return""}var D=B.DateSplitField.superclass.getValue.call(this);return new Date(D[this.yearIndex],D[this.monthIndex]-1,D[this.dayIndex])},validate:function(){var F=B.DateSplitField.superclass.validate.call(this);if(!F){return false}var E=B.DateSplitField.superclass.getValue.call(this);var D=E[this.dayIndex];var H=E[this.monthIndex];var G=E[this.yearIndex];var I=this.getValue();if(I==""){return true}if(D==""||H==""||G==""){return false}if(G<0||G>9999||D<1||D>31||H<1||H>12){return false}return(I!="Invalid Date")},isEmpty:function(){var D=B.DateSplitField.superclass.getValue.call(this);return(D[this.monthIndex]==""&&D[this.yearIndex]==""&&D[this.dayIndex]=="")},initAutoTab:function(){var D=[48,49,50,51,52,53,54,55,56,57];var G=function(H){for(var I=0,J=D.length;I<J;I++){if(H==D[I]){return true}}return false};var F=this;var E=function(H){C.later(0,F,function(){var I=F.inputs[H];if(I.el.value.length==I.options.size){F.inputs[H+1].focus()}})};A.addListener(this.inputs[0].el,"keypress",function(H){if(G(A.getCharCode(H))){E(0)}},this,true);A.addListener(this.inputs[1].el,"keypress",function(H){if(G(A.getCharCode(H))){E(1)}},this,true)}});B.messages.monthTypeInvite="Month";B.messages.dayTypeInvite="Day";B.messages.yearTypeInvite="Year";B.registerType("datesplit",B.DateSplitField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.DatePickerField=function(E){C.DatePickerField.superclass.constructor.call(this,E)};D.extend(C.DatePickerField,C.DateField,{setOptions:function(E){C.DatePickerField.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-DateField inputEx-PickerField inputEx-DatePickerField";this.options.readonly=true;this.options.calendar=E.calendar||C.messages.defautCalendarOpts},renderComponent:function(){C.DatePickerField.superclass.renderComponent.call(this);this.oOverlay=new YAHOO.widget.Overlay(B.generateId(),{visible:false});this.oOverlay.setBody(" ");this.oOverlay.body.id=B.generateId();this.button=new YAHOO.widget.Button({type:"menu",menu:this.oOverlay,label:" "});this.button.appendTo(this.wrapEl);this.oOverlay.render(this.wrapEl);B.setStyle(this.oOverlay.body.parentNode,"position","absolute");A.addListener(this.el,"click",function(){this.renderCalendar();if(!this.oOverlay.justHidden){this.button._showMenu()}},this,true);this.oOverlay.hideEvent.subscribe(function(){this.oOverlay.justHidden=true;YAHOO.lang.later(250,this,function(){this.oOverlay.justHidden=false})},this,true);this.button.on("click",this.renderCalendar,this,true)},renderCalendar:function(){if(!!this.calendarRendered){return }var F=B.generateId();this.calendar=new YAHOO.widget.Calendar(F,this.oOverlay.body.id,this.options.calendar);if(C.messages.shortMonths){this.calendar.cfg.setProperty("MONTHS_SHORT",C.messages.shortMonths)}if(C.messages.months){this.calendar.cfg.setProperty("MONTHS_LONG",C.messages.months)}if(C.messages.weekdays1char){this.calendar.cfg.setProperty("WEEKDAYS_1CHAR",C.messages.weekdays1char)}if(C.messages.shortWeekdays){this.calendar.cfg.setProperty("WEEKDAYS_SHORT",C.messages.shortWeekdays)}var E=function(){var I=B.get(F).tBodies[0],H=I.getElementsByTagName("a"),G;if(H.length>0){B.batch(H,function(J){if(B.hasClass(J.parentNode,"today")){G=J}});if(!G){G=H[0]}D.later(0,G,function(){try{G.focus()}catch(J){}})}};this.calendar.renderEvent.subscribe(E,this.calendar,true);this.oOverlay.beforeShowEvent.subscribe(this.beforeShowOverlay,this,true);this.beforeShowOverlay();this.calendar.selectEvent.subscribe(function(K,I,M){if(!!this.ignoreNextSelectEvent){this.ignoreNextSelectEvent=false;return }this.oOverlay.hide();var H=I[0][0];var J=H[0],L=H[1],G=H[2];this.setValue(new Date(J,L-1,G))},this,true);this.button.unsubscribe("click",this.renderCalendar);this.calendarRendered=true},beforeShowOverlay:function(){var E=this.getValue();if(!!E&&!!this.calendar){this.ignoreNextSelectEvent=true;this.calendar.select(E);this.calendar.cfg.setProperty("pagedate",(E.getMonth()+1)+"/"+E.getFullYear());this.calendar.render()}}});C.messages.defautCalendarOpts={navigator:true};C.registerType("datepicker",C.DatePickerField)})();(function(){var A=YAHOO.inputEx;A.EmailField=function(B){A.EmailField.superclass.constructor.call(this,B)};YAHOO.lang.extend(A.EmailField,A.StringField,{setOptions:function(B){A.EmailField.superclass.setOptions.call(this,B);this.options.messages.invalid=A.messages.invalidEmail;this.options.regexp=A.regexps.email},getValue:function(){return this.el.value.toLowerCase()}});A.messages.invalidEmail="Invalid email, ex: [email protected]";A.registerType("email",A.EmailField)})();(function(){var A=YAHOO.inputEx;A.HiddenField=function(B){A.HiddenField.superclass.constructor.call(this,B)};YAHOO.lang.extend(A.HiddenField,A.Field,{render:function(){this.type=A.HiddenField;this.divEl=A.cn("div",null,{display:"none"});this.el=A.cn("input",{type:"hidden"});if(this.options.name){this.el.name=this.options.name}this.divEl.appendChild(this.el)},setValue:function(C,B){this.el.value=C;A.HiddenField.superclass.setValue.call(this,C,B)},getValue:function(){return this.el.value}});A.registerType("hidden",A.HiddenField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.InPlaceEdit=function(E){C.InPlaceEdit.superclass.constructor.call(this,E)};D.extend(C.InPlaceEdit,C.Field,{setOptions:function(E){C.InPlaceEdit.superclass.setOptions.call(this,E);this.options.animColors=E.animColors||{from:"#ffff99",to:"#ffffff"};this.options.visu=E.visu;this.options.editorField=E.editorField},renderComponent:function(){this.renderVisuDiv();this.renderEditor()},renderEditor:function(){this.editorContainer=C.cn("div",{className:"inputEx-InPlaceEdit-editor"},{display:"none"});this.editorField=C.buildField(this.options.editorField);this.editorContainer.appendChild(this.editorField.getEl());B.setStyle(this.editorField.getEl(),"float","left");this.okButton=C.cn("input",{type:"button",value:C.messages.okEditor,className:"inputEx-InPlaceEdit-OkButton"});B.setStyle(this.okButton,"float","left");this.editorContainer.appendChild(this.okButton);this.cancelLink=C.cn("a",{className:"inputEx-InPlaceEdit-CancelLink"},null,C.messages.cancelEditor);this.cancelLink.href="";B.setStyle(this.cancelLink,"float","left");this.editorContainer.appendChild(this.cancelLink);this.editorContainer.appendChild(C.cn("div",null,{clear:"both"}));this.fieldContainer.appendChild(this.editorContainer)},onVisuMouseOver:function(E){if(this.colorAnim){this.colorAnim.stop(true)}C.sn(this.formattedContainer,null,{backgroundColor:this.options.animColors.from})},onVisuMouseOut:function(E){if(this.colorAnim){this.colorAnim.stop(true)}this.colorAnim=new YAHOO.util.ColorAnim(this.formattedContainer,{backgroundColor:this.options.animColors},1);this.colorAnim.onComplete.subscribe(function(){B.setStyle(this.formattedContainer,"background-color","")},this,true);this.colorAnim.animate()},renderVisuDiv:function(){this.formattedContainer=C.cn("div",{className:"inputEx-InPlaceEdit-visu"});if(D.isFunction(this.options.formatDom)){this.formattedContainer.appendChild(this.options.formatDom(this.options.value))}else{if(D.isFunction(this.options.formatValue)){this.formattedContainer.innerHTML=this.options.formatValue(this.options.value)}else{this.formattedContainer.innerHTML=D.isUndefined(this.options.value)?C.messages.emptyInPlaceEdit:this.options.value}}this.fieldContainer.appendChild(this.formattedContainer)},initEvents:function(){A.addListener(this.formattedContainer,"click",this.openEditor,this,true);A.addListener(this.formattedContainer,"mouseover",this.onVisuMouseOver,this,true);A.addListener(this.formattedContainer,"mouseout",this.onVisuMouseOut,this,true);A.addListener(this.okButton,"click",this.onOkEditor,this,true);A.addListener(this.cancelLink,"click",this.onCancelEditor,this,true);if(this.editorField.el){A.addListener(this.editorField.el,"keyup",this.onKeyUp,this,true);A.addListener(this.editorField.el,"keydown",this.onKeyDown,this,true)}},onKeyUp:function(E){if(E.keyCode==13){this.onOkEditor()}if(E.keyCode==27){this.onCancelEditor(E)}},onKeyDown:function(E){if(E.keyCode==9){this.onOkEditor()}},onOkEditor:function(){var F=this.editorField.getValue();this.setValue(F);this.editorContainer.style.display="none";this.formattedContainer.style.display="";var E=this;setTimeout(function(){E.updatedEvt.fire(F)},50)},onCancelEditor:function(E){A.stopEvent(E);this.editorContainer.style.display="none";this.formattedContainer.style.display=""},openEditor:function(){var E=this.getValue();this.editorContainer.style.display="";this.formattedContainer.style.display="none";if(!D.isUndefined(E)){this.editorField.setValue(E)}this.editorField.focus();if(this.editorField.el&&D.isFunction(this.editorField.el.setSelectionRange)&&(!!E&&!!E.length)){this.editorField.el.setSelectionRange(0,E.length)}},getValue:function(){var E=(this.editorContainer.style.display=="");return E?this.editorField.getValue():this.value},setValue:function(F,E){this.value=F;if(D.isUndefined(F)||F==""){C.renderVisu(this.options.visu,C.messages.emptyInPlaceEdit,this.formattedContainer)}else{C.renderVisu(this.options.visu,this.value,this.formattedContainer)}if(this.editorContainer.style.display==""){this.editorField.setValue(F)}C.InPlaceEdit.superclass.setValue.call(this,F,E)},close:function(){this.editorContainer.style.display="none";this.formattedContainer.style.display=""}});C.messages.emptyInPlaceEdit="(click to edit)";C.messages.cancelEditor="cancel";C.messages.okEditor="Ok";C.registerType("inplaceedit",C.InPlaceEdit)})();(function(){var B=YAHOO.inputEx,C=YAHOO.lang,A=YAHOO.util.Event;B.IntegerField=function(D){B.IntegerField.superclass.constructor.call(this,D)};YAHOO.lang.extend(B.IntegerField,B.StringField,{setOptions:function(D){B.IntegerField.superclass.setOptions.call(this,D);this.options.negative=C.isUndefined(D.negative)?false:D.negative},getValue:function(){if((this.options.typeInvite&&this.el.value==this.options.typeInvite)||this.el.value==""){return""}return parseInt(this.el.value,10)},validate:function(){var D=this.getValue();if(D==""){return true}if(isNaN(D)){return false}return !!this.el.value.match(new RegExp(this.options.negative?"^[+-]?[0-9]*$":"^\\+?[0-9]*$"))}});B.registerType("integer",B.IntegerField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.ListField=function(E){this.subFields=[];C.ListField.superclass.constructor.call(this,E)};D.extend(C.ListField,C.Field,{setOptions:function(E){C.ListField.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-ListField";this.options.sortable=D.isUndefined(E.sortable)?false:E.sortable;this.options.elementType=E.elementType||{type:"string"};this.options.useButtons=D.isUndefined(E.useButtons)?false:E.useButtons;this.options.unique=D.isUndefined(E.unique)?false:E.unique;this.options.listAddLabel=E.listAddLabel||C.messages.listAddLink;this.options.listRemoveLabel=E.listRemoveLabel||C.messages.listRemoveLink},renderComponent:function(){if(this.options.useButtons){this.addButton=C.cn("img",{src:C.spacerUrl,className:"inputEx-ListField-addButton"});this.fieldContainer.appendChild(this.addButton)}this.fieldContainer.appendChild(C.cn("span",null,{marginLeft:"4px"},this.options.listLabel));this.childContainer=C.cn("div",{className:"inputEx-ListField-childContainer"});this.fieldContainer.appendChild(this.childContainer);if(!this.options.useButtons){this.addButton=C.cn("a",{className:"inputEx-List-link"},null,this.options.listAddLabel);this.fieldContainer.appendChild(this.addButton)}},initEvents:function(){A.addListener(this.addButton,"click",this.onAddButton,this,true)},validate:function(){var F=true;var J={};for(var G=0;G<this.subFields.length&&F;G++){var E=this.subFields[G];E.setClassFromState();var H=E.getState();if(H==C.stateRequired||H==C.stateInvalid){F=false}if(this.options.unique){var I=D.dump(E.getValue());if(J[I]){F=false}else{J[I]=true}}}return F},setValue:function(H,E){if(!D.isArray(H)){return }for(var G=0;G<H.length;G++){if(G==this.subFields.length){this.addElement(H[G])}else{this.subFields[G].setValue(H[G],false)}}var F=this.subFields.length-H.length;if(F>0){for(var G=0;G<F;G++){this.removeElement(H.length)}}C.ListField.superclass.setValue.call(this,H,E)},getValue:function(){var E=[];for(var F=0;F<this.subFields.length;F++){E[F]=this.subFields[F].getValue()}return E},addElement:function(F){var E=this.renderSubField(F);this.subFields.push(E);return E},onAddButton:function(F){A.stopEvent(F);var E=this.addElement();E.focus();this.fireUpdatedEvt()},renderSubField:function(L){var H=C.cn("div");if(this.options.useButtons){var F=C.cn("img",{src:C.spacerUrl,className:"inputEx-ListField-delButton"});A.addListener(F,"click",this.onDelete,this,true);H.appendChild(F)}var K=D.merge({},this.options.elementType);if(!K.inputParams){K.inputParams={}}if(!D.isUndefined(L)){K.inputParams.value=L}var I=C.buildField(K);var G=I.getEl();B.setStyle(G,"margin-left","4px");B.setStyle(G,"float","left");H.appendChild(G);I.updatedEvt.subscribe(this.onChange,this,true);if(this.options.sortable){var J=C.cn("div",{className:"inputEx-ListField-Arrow inputEx-ListField-ArrowUp"});A.addListener(J,"click",this.onArrowUp,this,true);var E=C.cn("div",{className:"inputEx-ListField-Arrow inputEx-ListField-ArrowDown"});A.addListener(E,"click",this.onArrowDown,this,true);H.appendChild(J);H.appendChild(E)}if(!this.options.useButtons){var F=C.cn("a",{className:"inputEx-List-link"},null,this.options.listRemoveLabel);A.addListener(F,"click",this.onDelete,this,true);H.appendChild(F)}H.appendChild(C.cn("div",null,{clear:"both"}));this.childContainer.appendChild(H);return I},onArrowUp:function(K){var H=A.getTarget(K).parentNode;var F=null;var G=-1;for(var I=1;I<H.parentNode.childNodes.length;I++){var E=H.parentNode.childNodes[I];if(E==H){F=H.parentNode.childNodes[I-1];G=I;break}}if(F){var L=this.childContainer.removeChild(H);var J=this.childContainer.insertBefore(L,F);var M=this.subFields[G];this.subFields[G]=this.subFields[G-1];this.subFields[G-1]=M;if(this.arrowAnim){this.arrowAnim.stop(true)}this.arrowAnim=new YAHOO.util.ColorAnim(J,{backgroundColor:{from:"#eeee33",to:"#eeeeee"}},0.4);this.arrowAnim.onComplete.subscribe(function(){B.setStyle(J,"background-color","")});this.arrowAnim.animate();this.fireUpdatedEvt()}},onArrowDown:function(K){var G=A.getTarget(K).parentNode;var F=-1;var J=null;for(var H=0;H<G.parentNode.childNodes.length;H++){var E=G.parentNode.childNodes[H];if(E==G){J=G.parentNode.childNodes[H+1];F=H;break}}if(J){var L=this.childContainer.removeChild(G);var I=B.insertAfter(L,J);var M=this.subFields[F];this.subFields[F]=this.subFields[F+1];this.subFields[F+1]=M;if(this.arrowAnim){this.arrowAnim.stop(true)}this.arrowAnim=new YAHOO.util.ColorAnim(I,{backgroundColor:{from:"#eeee33",to:"#eeeeee"}},1);this.arrowAnim.onComplete.subscribe(function(){B.setStyle(I,"background-color","")});this.arrowAnim.animate();this.fireUpdatedEvt()}},onDelete:function(I){A.stopEvent(I);var F=A.getTarget(I).parentNode;var E=-1;var H=F.childNodes[this.options.useButtons?1:0];for(var G=0;G<this.subFields.length;G++){if(this.subFields[G].getEl()==H){E=G;break}}if(E!=-1){this.removeElement(E)}this.fireUpdatedEvt()},removeElement:function(F){var E=this.subFields[F].getEl().parentNode;this.subFields[F]=undefined;this.subFields=C.compactArray(this.subFields);E.parentNode.removeChild(E)}});C.registerType("list",C.ListField);C.messages.listAddLink="Add";C.messages.listRemoveLink="remove"})();(function(){var B=YAHOO.inputEx,A=YAHOO.util.Event,C=YAHOO.lang;B.NumberField=function(D){B.NumberField.superclass.constructor.call(this,D)};YAHOO.lang.extend(B.NumberField,B.StringField,{getValue:function(){if((this.options.typeInvite&&this.el.value==this.options.typeInvite)||this.el.value==""){return""}return parseFloat(this.el.value)},validate:function(){var D=this.getValue();if(D==""){return true}if(isNaN(D)){return false}return !!this.el.value.match(/^([\+\-]?((([0-9]+(\.)?)|([0-9]*\.[0-9]+))([eE][+-]?[0-9]+)?))$/)}});B.registerType("number",B.NumberField)})();(function(){var A=YAHOO.inputEx;A.PairField=function(B){B.fields=[B.leftFieldOptions||{},B.rightFieldOptions||{}];B.separators=[false," : ",false];A.PairField.superclass.constructor.call(this,B)};YAHOO.lang.extend(A.PairField,A.CombineField);A.registerType("pair",A.PairField)})();(function(){var B=YAHOO.inputEx,A=YAHOO.util.Event,C=YAHOO.lang;B.PasswordField=function(D){B.PasswordField.superclass.constructor.call(this,D)};C.extend(B.PasswordField,B.StringField,{setOptions:function(D){B.PasswordField.superclass.setOptions.call(this,D);this.options.className=D.className?D.className:"inputEx-Field inputEx-PasswordField";this.options.regexp=B.regexps.password;this.options.strengthIndicator=YAHOO.lang.isUndefined(D.strengthIndicator)?false:D.strengthIndicator;this.options.capsLockWarning=YAHOO.lang.isUndefined(D.capsLockWarning)?false:D.capsLockWarning},renderComponent:function(){this.wrapEl=B.cn("div",{className:"inputEx-StringField-wrapper"});var D={};D.type="password";D.size=this.options.size;if(this.options.name){D.name=this.options.name}this.el=B.cn("input",D);this.wrapEl.appendChild(this.el);this.fieldContainer.appendChild(this.wrapEl);if(this.options.capsLockWarning){this.capsLockWarning=B.cn("div",{className:"capsLockWarning"},{display:"none"},B.messages.capslockWarning);this.wrapEl.appendChild(this.capsLockWarning)}if(this.options.strengthIndicator){this.strengthEl=B.cn("div",{className:"inputEx-Password-StrengthIndicator"},null,B.messages.passwordStrength);this.strengthBlocks=[];for(var E=0;E<4;E++){this.strengthBlocks[E]=B.cn("div",{className:"inputEx-Password-StrengthIndicatorBlock"});this.strengthEl.appendChild(this.strengthBlocks[E])}this.wrapEl.appendChild(this.strengthEl)}},setConfirmationField:function(D){this.options.confirmPasswordField=D;this.options.messages.invalid=B.messages.invalidPasswordConfirmation;this.options.confirmPasswordField.options.confirmationPasswordField=this},validate:function(){if(this.options.confirmPasswordField){if(this.options.confirmPasswordField.getValue()!=this.getValue()){return false}}return B.PasswordField.superclass.validate.call(this)},getStateString:function(D){if(D==B.stateInvalid&&this.options.minLength&&this.el.value.length<this.options.minLength){return B.messages.invalidPassword[0]+this.options.minLength+B.messages.invalidPassword[1]}return B.StringField.superclass.getStateString.call(this,D)},onInput:function(D){B.PasswordField.superclass.onInput.call(this,D);if(this.options.confirmationPasswordField){this.options.confirmationPasswordField.setClassFromState()}},onKeyPress:function(H){B.PasswordField.superclass.onKeyPress.call(this,H);if(this.options.capsLockWarning){var G=H?H:window.event;if(!G){return }var E=G.target?G.target:G.srcElement;var I=-1;if(G.which){I=G.which}else{if(G.keyCode){I=G.keyCode}}var F=false;if(G.shiftKey){F=G.shiftKey}else{if(G.modifiers){F=!!(G.modifiers&4)}}var D=((I>=65&&I<=90)&&!F)||((I>=97&&I<=122)&&F);this.setCapsLockWarning(D)}},onKeyUp:function(D){B.PasswordField.superclass.onKeyUp.call(this,D);if(this.options.strengthIndicator){C.later(0,this,this.updateStrengthIndicator)}},setCapsLockWarning:function(D){this.capsLockWarning.style.display=D?"":"none"},updateStrengthIndicator:function(){var F=B.PasswordField.getPasswordStrength(this.getValue());for(var E=0;E<4;E++){var D=(F>=E*25)&&(F>0);YAHOO.util.Dom.setStyle(this.strengthBlocks[E],"background-color",D?"#4AE817":"#FFFFFF")}}});B.PasswordField.getPasswordStrength=function(K){var G=(K.length);if(G>7){G=7}var I=K.replace(/[0-9]/g,"");var J=(K.length-I.length);if(J>3){J=3}var D=K.replace(/\W/g,"");var F=(K.length-D.length);if(F>3){F=3}var E=K.replace(/[A-Z]/g,"");var L=(K.length-E.length);if(L>3){L=3}var H=((G*10)-20)+(J*10)+(F*20)+(L*10);if(H<0){H=0}if(H>100){H=100}return H};B.messages.invalidPassword=["The password schould contain at least "," numbers or characters"];B.messages.invalidPasswordConfirmation="Passwords are different !";B.messages.capslockWarning="Warning: CapsLock is on";B.messages.passwordStrength="Password Strength";B.registerType("password",B.PasswordField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.RadioField=function(E){C.RadioField.superclass.constructor.call(this,E)};D.extend(C.RadioField,C.Field,{setOptions:function(E){C.RadioField.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-RadioField";if(D.isUndefined(E.allowAny)||E.allowAny===false){this.options.allowAny=false}else{this.options.allowAny={};if(D.isArray(E.allowAny.separators)){this.options.allowAny.separators=E.allowAny.separators}this.options.allowAny.validator=(D.isFunction(E.allowAny.validator))?E.allowAny.validator:function(F){return true};this.options.allowAny.value=(!D.isUndefined(E.allowAny.value))?E.allowAny.value:""}this.options.choices=E.choices;this.options.values=D.isArray(E.values)?E.values:E.choices},renderComponent:function(){this.optionEls=[];for(var I=0;I<this.options.choices.length;I++){var J=C.cn("div",{className:"inputEx-RadioField-choice"});var H=this.divEl.id?this.divEl.id+"-field-opt"+I:YAHOO.util.Dom.generateId();var G=C.cn("input",{id:H,type:"radio",name:this.options.name,value:this.options.values[I]});J.appendChild(G);var F=C.cn("label",{"for":H,className:"inputEx-RadioField-rightLabel"},null,""+this.options.choices[I]);J.appendChild(F);this.fieldContainer.appendChild(J);this.optionEls.push(G)}if(this.options.allowAny){var J=C.cn("div",{className:"inputEx-RadioField-choice"});if(YAHOO.env.ua.ie){this.radioAny=document.createElement("<input type='radio' name='"+this.options.name+"'>")}else{this.radioAny=C.cn("input",{type:"radio",name:this.options.name})}J.appendChild(this.radioAny);this.anyField=new C.StringField({value:this.options.allowAny.value});B.setStyle(this.radioAny,"float","left");B.setStyle(this.anyField.getEl(),"float","left");this.anyField.disable();if(this.options.allowAny.separators){var E=C.cn("div",null,{margin:"3px"},this.options.allowAny.separators[0]||"");B.setStyle(E,"float","left");J.appendChild(E)}J.appendChild(this.anyField.getEl());if(this.options.allowAny.separators){var E=C.cn("div",null,{margin:"3px"},this.options.allowAny.separators[1]||"");B.setStyle(E,"float","left");J.appendChild(E)}this.fieldContainer.appendChild(J);this.optionEls.push(this.radioAny)}},initEvents:function(){A.addListener(this.optionEls,"change",this.onChange,this,true);A.addFocusListener(this.optionEls,this.onFocus,this,true);A.addBlurListener(this.optionEls,this.onBlur,this,true);if(YAHOO.env.ua.ie){A.addListener(this.optionEls,"click",function(){YAHOO.lang.later(10,this,this.fireUpdatedEvt)},this,true)}if(this.anyField){this.anyField.updatedEvt.subscribe(function(E){C.RadioField.superclass.onChange.call(this,E)},this,true);A.addBlurListener(this.anyField.el,this.onBlur,this,true)}},onChange:function(E){if(this.radioAny){if(this.radioAny==A.getTarget(E)){this.anyField.enable();D.later(50,this.anyField,"focus")}else{this.anyField.disable()}}if(!YAHOO.env.ua.ie){C.RadioField.superclass.onChange.call(this,E)}},getValue:function(){for(var E=0;E<this.optionEls.length;E++){if(this.optionEls[E].checked){if(this.radioAny&&this.radioAny==this.optionEls[E]){var F=this.anyField.getValue();return F}return this.options.values[E]}}return""},setValue:function(I,E){var G=true,H;for(var F=0;F<this.optionEls.length;F++){if(I==this.options.values[F]){this.optionEls[F].checked=true;G=false}else{this.optionEls[F].checked=false}if(this.radioAny&&this.radioAny==this.optionEls[F]){H=this.optionEls[F]}}if(this.radioAny&&G){H.checked=true;this.anyField.enable();this.anyField.setValue(I,false)}C.StringField.superclass.setValue.call(this,I,E)},validate:function(){if(this.options.allowAny){for(var E=0;E<this.optionEls.length;E++){if(this.optionEls[E].checked){if(this.radioAny&&this.radioAny==this.optionEls[E]){var F=this.anyField.getValue();return this.options.allowAny.validator(F)}}}}return true}});C.registerType("radio",C.RadioField)})();(function(){var A=YAHOO.inputEx,B=YAHOO.lang;A.RTEField=function(C){A.RTEField.superclass.constructor.call(this,C)};B.extend(A.RTEField,A.Field,{setOptions:function(C){A.RTEField.superclass.setOptions.call(this,C);this.options.opts=C.opts||{};this.options.type=C.type},renderComponent:function(){if(!A.RTEfieldsNumber){A.RTEfieldsNumber=0}var H="inputEx-RTEField-"+A.RTEfieldsNumber;var D={id:H};if(this.options.name){D.name=this.options.name}this.el=A.cn("textarea",D);A.RTEfieldsNumber+=1;this.fieldContainer.appendChild(this.el);var F={height:"300px",width:"580px",dompath:true};var G=this.options.opts;for(var E in G){if(B.hasOwnProperty(G,E)){F[E]=G[E]}}var C=((this.options.type&&(this.options.type=="simple"))?YAHOO.widget.SimpleEditor:YAHOO.widget.Editor);if(C){this.editor=new C(H,F);this.editor.render()}else{alert("Editor is not on the page")}},setValue:function(D,C){if(this.editor){var E=this.el.id+"_editor";if(!YAHOO.util.Dom.get(E)){this.el.value=D}else{this.editor.setEditorHTML(D)}}if(C!==false){this.fireUpdatedEvt()}},getValue:function(){try{this.editor.saveHTML();return this.el.value}catch(C){}}});A.registerType("html",A.RTEField)})();(function(){var B=YAHOO.inputEx,A=YAHOO.util.Event,C=YAHOO.lang;B.SelectField=function(D){B.SelectField.superclass.constructor.call(this,D)};C.extend(B.SelectField,B.Field,{setOptions:function(D){B.SelectField.superclass.setOptions.call(this,D);this.options.multiple=C.isUndefined(D.multiple)?false:D.multiple;this.options.selectValues=[];this.options.selectOptions=[];for(var E=0,F=D.selectValues.length;E<F;E++){this.options.selectValues.push(D.selectValues[E]);this.options.selectOptions.push(""+((D.selectOptions&&!C.isUndefined(D.selectOptions[E]))?D.selectOptions[E]:D.selectValues[E]))}},renderComponent:function(){this.el=B.cn("select",{id:this.divEl.id?this.divEl.id+"-field":YAHOO.util.Dom.generateId(),name:this.options.name||""});if(this.options.multiple){this.el.multiple=true;this.el.size=this.options.selectValues.length}this.optionEls={};var D;for(var E=0;E<this.options.selectValues.length;E++){D=B.cn("option",{value:this.options.selectValues[E]},null,this.options.selectOptions[E]);this.optionEls[this.options.selectOptions[E]]=D;this.el.appendChild(D)}this.fieldContainer.appendChild(this.el)},initEvents:function(){A.addListener(this.el,"change",this.onChange,this,true);A.addFocusListener(this.el,this.onFocus,this,true);A.addBlurListener(this.el,this.onBlur,this,true)},setValue:function(H,E){var D=0;var G;for(var F=0;F<this.options.selectValues.length;F++){if(H===this.options.selectValues[F]){G=this.el.childNodes[F];G.selected="selected"}}B.SelectField.superclass.setValue.call(this,H,E)},getValue:function(){return this.options.selectValues[this.el.selectedIndex]},disable:function(){this.el.disabled=true},enable:function(){this.el.disabled=false},addOption:function(E){var J=E.value;var G=""+(!C.isUndefined(E.option)?E.option:E.value);var K=this.options.selectOptions.length;var D=K;if(C.isNumber(E.position)&&E.position>=0&&E.position<=D){D=parseInt(E.position,10)}else{if(C.isString(E.before)){for(var F=0;F<K;F++){if(this.options.selectOptions[F]===E.before){D=F;break}}}else{if(C.isString(E.after)){for(var F=0;F<K;F++){if(this.options.selectOptions[F]===E.after){D=F+1;break}}}}}this.options.selectValues=this.options.selectValues.slice(0,D).concat([J]).concat(this.options.selectValues.slice(D,K));this.options.selectOptions=this.options.selectOptions.slice(0,D).concat([G]).concat(this.options.selectOptions.slice(D,K));var I=B.cn("option",{value:J},null,G);this.optionEls[G]=I;if(D<K){YAHOO.util.Dom.insertBefore(I,this.el.childNodes[D])}else{this.el.appendChild(I)}if(!!E.selected){var H=this;setTimeout(function(){H.setValue(J)},0)}},removeOption:function(G){var F;var I=this.options.selectOptions.length;var E=this.el.selectedIndex;if(C.isNumber(G.position)&&G.position>=0&&G.position<=I){F=parseInt(G.position,10)}else{if(C.isString(G.option)){for(var H=0;H<I;H++){if(this.options.selectOptions[H]===G.option){F=H;break}}}else{if(C.isString(G.value)){for(var H=0;H<I;H++){if(this.options.selectValues[H]===G.value){F=H;break}}}}}if(!C.isNumber(F)){throw new Error("SelectField : invalid or missing position, option or value in removeOption")}this.options.selectValues.splice(F,1);var D=this.options.selectOptions.splice(F,1);this.el.removeChild(this.optionEls[D]);delete this.optionEls[D];if(E==F){this.clear()}}});B.registerType("select",B.SelectField)})();(function(){var B=YAHOO.inputEx,A=YAHOO.util.Event;B.Textarea=function(C){B.Textarea.superclass.constructor.call(this,C)};YAHOO.lang.extend(B.Textarea,B.StringField,{setOptions:function(C){B.Textarea.superclass.setOptions.call(this,C);this.options.rows=C.rows||6;this.options.cols=C.cols||23},renderComponent:function(){this.wrapEl=B.cn("div",{className:"inputEx-StringField-wrapper"});var C={};C.id=this.divEl.id?this.divEl.id+"-field":YAHOO.util.Dom.generateId();C.rows=this.options.rows;C.cols=this.options.cols;if(this.options.name){C.name=this.options.name}this.el=B.cn("textarea",C,null,this.options.value);this.wrapEl.appendChild(this.el);this.fieldContainer.appendChild(this.wrapEl)},validate:function(){var C=B.Textarea.superclass.validate.call(this);if(this.options.maxLength){C=C&&this.getValue().length<=this.options.maxLength}return C},getStateString:function(C){if(C==B.stateInvalid&&this.options.minLength&&this.el.value.length<this.options.minLength){return B.messages.stringTooShort[0]+this.options.minLength+B.messages.stringTooShort[1]}else{if(C==B.stateInvalid&&this.options.maxLength&&this.el.value.length>this.options.maxLength){return B.messages.stringTooLong[0]+this.options.maxLength+B.messages.stringTooLong[1]}}return B.Textarea.superclass.getStateString.call(this,C)}});B.messages.stringTooLong=["This field should contain at most "," numbers or characters"];B.registerType("text",B.Textarea)})();(function(){var B=YAHOO.inputEx,A=YAHOO.util.Event,C=YAHOO.lang;B.TimeField=function(E){var I=[];for(var G=0;G<24;G++){var H="";if(G<10){H="0"}H+=G;I.push(H)}var D=[];var F=[];for(var G=0;G<60;G++){var H="";if(G<10){H="0"}H+=G;D.push(H);F.push(H)}E.fields=[{type:"select",inputParams:{selectOptions:I,selectValues:I}},{type:"select",inputParams:{selectOptions:D,selectValues:D}},{type:"select",inputParams:{selectOptions:F,selectValues:F}}];E.separators=E.separators||[false,":",":",false];B.TimeField.superclass.constructor.call(this,E)};C.extend(B.TimeField,B.CombineField,{getValue:function(){var D=B.TimeField.superclass.getValue.call(this);return D.join(":")},setValue:function(E,D){B.TimeField.superclass.setValue.call(this,E.split(":"),D)}});B.registerType("time",B.TimeField)})();(function(){var B=YAHOO.inputEx,A=YAHOO.util.Event,C=YAHOO.lang;B.DateTimeField=function(D){D.fields=[{type:"datepicker",inputParams:{}},{type:"time",inputParams:{}}];if(D.dateFormat){D.fields[0].inputParams.dateFormat=D.dateFormat}D.separators=D.separators||[false," ",false];B.DateTimeField.superclass.constructor.call(this,D)};C.extend(B.DateTimeField,B.CombineField,{getValue:function(){var E=this.inputs[0].getValue();if(E==""){return null}var D=this.inputs[1].getValue().split(":");E.setHours(D[0]);E.setMinutes(D[1]);E.setSeconds(D[2]);return E},setValue:function(I,E){if(!C.isObject(I)){return }var G=I.getHours();var D=I.getMinutes();var F=I.getSeconds();var H=([(G<10?"0":"")+G,(D<10?"0":"")+D,(F<10?"0":"")+F]).join(":");B.DateTimeField.superclass.setValue.call(this,[I,H],E)}});B.registerType("datetime",B.DateTimeField)})();(function(){var A=YAHOO.inputEx;A.UneditableField=function(B){A.UneditableField.superclass.constructor.call(this,B)};YAHOO.lang.extend(A.UneditableField,A.Field,{setOptions:function(B){A.UneditableField.superclass.setOptions.call(this,B);this.options.visu=B.visu},setValue:function(C,B){this.value=C;A.renderVisu(this.options.visu,C,this.fieldContainer);A.UneditableField.superclass.setValue.call(this,C,B)},getValue:function(){return this.value}});A.registerType("uneditable",A.UneditableField)})();(function(){var A=YAHOO.inputEx,B=YAHOO.lang;A.UrlField=function(C){A.UrlField.superclass.constructor.call(this,C)};B.extend(A.UrlField,A.StringField,{setOptions:function(C){A.UrlField.superclass.setOptions.call(this,C);this.options.className=C.className?C.className:"inputEx-Field inputEx-UrlField";this.options.messages.invalid=A.messages.invalidUrl;this.options.favicon=B.isUndefined(C.favicon)?(("https:"==document.location.protocol)?false:true):C.favicon;this.options.size=C.size||50;this.options.regexp=A.regexps.url},render:function(){A.UrlField.superclass.render.call(this);this.el.size=this.options.size;if(!this.options.favicon){YAHOO.util.Dom.addClass(this.el,"nofavicon")}if(this.options.favicon){this.favicon=A.cn("img",{src:A.spacerUrl});this.fieldContainer.insertBefore(this.favicon,this.fieldContainer.childNodes[0]);YAHOO.util.Event.addListener(this.favicon,"click",function(){this.focus()},this,true)}},setClassFromState:function(){A.UrlField.superclass.setClassFromState.call(this);if(this.options.favicon){this.updateFavicon((this.previousState==A.stateValid)?this.getValue():null)}},updateFavicon:function(D){var C=D?D.match(/https?:\/\/[^\/]*/)+"/favicon.ico":A.spacerUrl;if(C!=this.favicon.src){A.sn(this.favicon,null,{visibility:"hidden"});this.favicon.src=C;if(this.timer){clearTimeout(this.timer)}var E=this;this.timer=setTimeout(function(){E.displayFavicon()},1000)}},displayFavicon:function(){A.sn(this.favicon,null,{visibility:(this.favicon.naturalWidth!=0)?"visible":"hidden"})}});A.messages.invalidUrl="Invalid URL, ex: http://www.test.com";A.registerType("url",A.UrlField)})();(function(){var D=YAHOO.inputEx,B=YAHOO.util.DragDropMgr,C=YAHOO.util.Dom,A=YAHOO.util.Event;D.widget.DDListItem=function(E){D.widget.DDListItem.superclass.constructor.call(this,E);this.setXConstraint(0,0);this.goingUp=false;this.lastY=0};YAHOO.extend(D.widget.DDListItem,YAHOO.util.DDProxy,{startDrag:function(F,H){var E=this.getDragEl();var G=this.getEl();C.setStyle(G,"visibility","hidden");this._originalIndex=D.indexOf(G,G.parentNode.childNodes);E.className=G.className;E.innerHTML=G.innerHTML},endDrag:function(F){C.setStyle(this.id,"visibility","");var G=this.getEl();var E=D.indexOf(G,G.parentNode.childNodes);if(this._originalIndex!=E){this._list.listReorderedEvt.fire()}},onDragDrop:function(I,J){if(B.interactionInfo.drop.length===1){var H=B.interactionInfo.point;var G=B.interactionInfo.sourceRegion;if(!G.intersect(H)){var E=C.get(J);if(E.nodeName.toLowerCase()!="li"){var F=B.getDDById(J);E.appendChild(this.getEl());F.isEmpty=false;B.refreshCache()}}}},onDrag:function(E){var F=A.getPageY(E);if(F<this.lastY){this.goingUp=true}else{if(F>this.lastY){this.goingUp=false}}this.lastY=F},onDragOver:function(I,J){var G=this.getEl();var F=C.get(J);if(F.nodeName.toLowerCase()=="li"){var E=G.parentNode;var H=F.parentNode;if(this.goingUp){H.insertBefore(G,F)}else{H.insertBefore(G,F.nextSibling)}B.refreshCache()}}});D.widget.DDList=function(E){this.ul=D.cn("ul");if(E.id){this.ul.id=E.id}if(E.value){this.setValue(E.value)}this.itemRemovedEvt=new YAHOO.util.CustomEvent("itemRemoved",this);this.listReorderedEvt=new YAHOO.util.CustomEvent("listReordered",this);if(E.parentEl){if(YAHOO.lang.isString(E.parentEl)){C.get(E.parentEl).appendChild(this.ul)}else{E.parentEl.appendChild(this.ul)}}};D.widget.DDList.prototype={addItem:function(G){var E=D.cn("li",{className:"inputEx-DDList-item"});E.appendChild(D.cn("span",null,null,G));var H=D.cn("a",null,null,"remove");E.appendChild(H);A.addListener(H,"click",function(K){var J=A.getTarget(K);var I=J.parentNode;this.removeItem(D.indexOf(I,this.ul.childNodes))},this,true);var F=new D.widget.DDListItem(E);F._list=this;this.ul.appendChild(E)},_removeItem:function(E){var F=this.ul.childNodes[E].childNodes[0].innerHTML;this.ul.removeChild(this.ul.childNodes[E]);return F},removeItem:function(E){var F=this._removeItem(E);this.itemRemovedEvt.fire(F)},getValue:function(){var F=[];for(var E=0;E<this.ul.childNodes.length;E++){F.push(this.ul.childNodes[E].childNodes[0].innerHTML)}return F},updateItem:function(E,F){this.ul.childNodes[E].childNodes[0].innerHTML=F},setValue:function(H){if(!YAHOO.lang.isArray(H)){H=[]}var I=this.ul.childNodes.length;var G=H.length;for(var F=0;F<G;F++){if(F<I){this.updateItem(F,H[F])}else{this.addItem(H[F])}}for(var E=G;E<I;E++){this._removeItem(G)}}}})();(function(){var A=YAHOO.inputEx;A.MultiSelectField=function(B){A.MultiSelectField.superclass.constructor.call(this,B)};YAHOO.lang.extend(A.MultiSelectField,A.SelectField,{renderComponent:function(){A.MultiSelectField.superclass.renderComponent.call(this);this.ddlist=new A.widget.DDList({parentEl:this.fieldContainer})},initEvents:function(){YAHOO.util.Event.addListener(this.el,"change",this.onAddNewItem,this,true);this.ddlist.itemRemovedEvt.subscribe(this.onItemRemoved,this,true);this.ddlist.listReorderedEvt.subscribe(this.fireUpdatedEvt,this,true)},onItemRemoved:function(C,E){var D=E[0];var B=A.indexOf(D,this.options.selectValues);this.el.childNodes[B].disabled=false;this.fireUpdatedEvt()},onAddNewItem:function(){if(this.el.selectedIndex!=0){this.ddlist.addItem(this.options.selectValues[this.el.selectedIndex]);this.el.childNodes[this.el.selectedIndex].disabled=true;this.el.selectedIndex=0;this.fireUpdatedEvt()}},setValue:function(E,C){this.ddlist.setValue(E);for(var D=0;D<this.el.childNodes.length;D++){this.el.childNodes[D].disabled=false}for(D=0;D<E.length;D++){var B=A.indexOf(E[D],this.options.selectValues);this.el.childNodes[B].disabled=true}if(C!==false){this.fireUpdatedEvt()}},getValue:function(){return this.ddlist.getValue()}});A.registerType("multiselect",A.MultiSelectField)})();(function(){var C=YAHOO.inputEx,D=YAHOO.lang,A=YAHOO.util.Event,B=YAHOO.util.Dom;C.AutoComplete=function(E){C.AutoComplete.superclass.constructor.call(this,E)};D.extend(C.AutoComplete,C.StringField,{setOptions:function(E){C.AutoComplete.superclass.setOptions.call(this,E);this.options.className=E.className?E.className:"inputEx-Field inputEx-AutoComplete";this.options.datasource=E.datasource;this.options.autoComp=E.autoComp;this.options.returnValue=E.returnValue},initEvents:function(){C.AutoComplete.superclass.initEvents.call(this);A.removeBlurListener(this.el,this.onBlur)},renderComponent:function(){this.wrapEl=C.cn("div",{className:"inputEx-StringField-wrapper"});var E={type:"text",id:YAHOO.util.Dom.generateId()};if(this.options.size){E.size=this.options.size}if(this.options.readonly){E.readonly="readonly"}if(this.options.maxLength){E.maxLength=this.options.maxLength}this.el=C.cn("input",E);var F={type:"hidden",value:""};if(this.options.name){F.name=this.options.name}this.hiddenEl=C.cn("input",F);this.wrapEl.appendChild(this.el);this.wrapEl.appendChild(this.hiddenEl);this.fieldContainer.appendChild(this.wrapEl);this.listEl=C.cn("div",{id:B.generateId()});this.fieldContainer.appendChild(this.listEl);A.onAvailable([this.el,this.listEl],this.buildAutocomplete,this,true)},buildAutocomplete:function(){if(!this._nElementsReady){this._nElementsReady=0}this._nElementsReady++;if(this._nElementsReady!=2){return }this.oAutoComp=new YAHOO.widget.AutoComplete(this.el.id,this.listEl.id,this.options.datasource,this.options.autoComp);this.oAutoComp.itemSelectEvent.subscribe(this.itemSelectHandler,this,true);this.oAutoComp.textboxBlurEvent.subscribe(this.onBlur,this,true)},itemSelectHandler:function(G,F){var E=F[2];this.setValue(this.options.returnValue?this.options.returnValue(E):E[0])},onChange:function(E){this.setClassFromState();YAHOO.lang.later(50,this,function(){if(this.el.value==""){this.setValue("")}});this.fireUpdatedEvt()},setValue:function(F,E){this.hiddenEl.value=F;this.setClassFromState();if(E!==false){this.fireUpdatedEvt()}},getValue:function(){return this.hiddenEl.value}});C.registerType("autocomplete",C.AutoComplete)})();(function(){var A=YAHOO.inputEx;A.MultiAutoComplete=function(B){A.MultiAutoComplete.superclass.constructor.call(this,B)};YAHOO.lang.extend(A.MultiAutoComplete,A.AutoComplete,{renderComponent:function(){A.MultiAutoComplete.superclass.renderComponent.call(this);this.ddlist=new A.widget.DDList({parentEl:this.fieldContainer});this.ddlist.itemRemovedEvt.subscribe(function(){this.setClassFromState();this.fireUpdatedEvt()},this,true);this.ddlist.listReorderedEvt.subscribe(this.fireUpdatedEvt,this,true)},itemSelectHandler:function(D,C){var B=C[2];this.ddlist.addItem(this.options.returnValue?this.options.returnValue(B):B[0]);this.el.value="";this.fireUpdatedEvt()},setValue:function(C,B){this.ddlist.setValue(C);this.setClassFromState();if(B!==false){this.fireUpdatedEvt()}},getValue:function(){return this.ddlist.getValue()},getState:function(){var B=this.getValue();if(B.length===0){return this.options.required?A.stateRequired:A.stateEmpty}return this.validate()?A.stateValid:A.stateInvalid},validate:function(){return true},onChange:function(B){}});A.registerType("multiautocomplete",A.MultiAutoComplete)})();(function(){var A=YAHOO.inputEx;A.UneditableField=function(B){A.UneditableField.superclass.constructor.call(this,B)};YAHOO.lang.extend(A.UneditableField,A.Field,{setOptions:function(B){A.UneditableField.superclass.setOptions.call(this,B);this.options.visu=B.visu},setValue:function(C,B){this.value=C;A.renderVisu(this.options.visu,C,this.fieldContainer);A.UneditableField.superclass.setValue.call(this,C,B)},getValue:function(){return this.value}});A.registerType("uneditable",A.UneditableField)})();(function(){var A=YAHOO.inputEx,B=YAHOO.lang;A.SliderField=function(C){A.SliderField.superclass.constructor.call(this,C)};YAHOO.lang.extend(A.SliderField,A.Field,{setOptions:function(C){A.SliderField.superclass.setOptions.call(this,C);this.options.className=C.className?C.className:"inputEx-SliderField";this.options.minValue=B.isUndefined(C.minValue)?0:C.minValue;this.options.maxValue=B.isUndefined(C.maxValue)?100:C.maxValue;this.options.displayValue=B.isUndefined(C.displayValue)?true:C.displayValue},renderComponent:function(){this.sliderbg=A.cn("div",{id:YAHOO.util.Dom.generateId(),className:"inputEx-SliderField-bg"});this.sliderthumb=A.cn("div",{className:"inputEx-SliderField-thumb"});this.sliderbg.appendChild(this.sliderthumb);this.fieldContainer.appendChild(this.sliderbg);if(this.options.displayValue){this.valueDisplay=A.cn("div",{className:"inputEx-SliderField-value"},null,String(this.options.minValue));this.fieldContainer.appendChild(this.valueDisplay)}this.fieldContainer.appendChild(A.cn("div",null,{clear:"both"}));this.slider=YAHOO.widget.Slider.getHorizSlider(this.sliderbg,this.sliderthumb,0,100)},initEvents:function(){this.slider.on("slideEnd",this.fireUpdatedEvt,this,true);if(this.options.displayValue){this.updatedEvt.subscribe(function(C,E){var D=E[0];this.valueDisplay.innerHTML=D},this,true)}},setValue:function(F,D){var C=F;if(C<this.options.minValue){C=this.options.minValue}if(C>this.options.maxValue){C=this.options.maxValue}var E=Math.floor(C-this.options.minValue)*100/this.options.maxValue;this.slider.setValue(E);A.SliderField.superclass.setValue.call(this,F,D)},getValue:function(){var C=Math.floor(this.options.minValue+(this.options.maxValue-this.options.minValue)*this.slider.getValue()/100);return C}});A.registerType("slider",A.SliderField)})(); | PypiClean |
/selkie-0.21.5.tar.gz/selkie-0.21.5/seal/app/handler.py |
import sys, socket, threading, selectors, ssl, urllib, time
from socketserver import _ServerSelector
from http.server import BaseHTTPRequestHandler
from threading import Thread
from traceback import print_exc
from wsgiref.handlers import read_environ, format_date_time
from wsgiref.headers import Headers
from wsgiref.util import is_hop_by_hop
from seal.app.resources import Resources
from seal.app.request import Request
from seal.app.dualserver import DualServer, debug
#-- HTTPRequestHandler -------------------------------------------------------
#
# This is a BaseHTTPRequestHandler that creates an ApplicationCaller to do
# its work.
#
class HTTPRequestHandler (BaseHTTPRequestHandler):
def do_GET (self):
ApplicationCaller(self).run()
def do_POST (self):
ApplicationCaller(self).run()
def log_message(self, format, *args):
log = self.server.config['log']
log('server', "%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
class ApplicationCaller (object):
def __init__ (self, handler):
self.handler = handler
self.stdin = handler.rfile
self.stdout = handler.wfile
self.server = handler.server
self.config = handler.server.config
self.headers = None
self.headers_sent = False
self.bytes_sent = 0
self.environ = self.get_environ() # my get_envirion(), below
## from BaseHandler.run
# called by HTTPRequestHandler.do_X
def run(self):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
#debug('Start running app')
self.result = self._invoke_application(self.environ)
self.finish_response()
#debug('Done running app')
except:
#debug('Got an exception while running app', sys.exc_info())
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
## from BaseHandler
def error_output(self, environ, start_response):
start_response("500 Internal Server Error",
[('Content-Type','text/plain')],
sys.exc_info())
return [
b"A server error occurred. Please contact the administrator."
]
## seal.app.WsgiApp.__call__
def _invoke_application (self, environ):
app = self.config.get('app')
if app is not None:
request = Request(environ, self.config)
response = app(request)
status = response.http_status()
headers = response.http_headers()
body = response.body()
self.start_response(status, headers)
return body
## BaseHTTPRequestHandler
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client ip and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.request.address_string(),
self.log_date_time_string(),
format%args))
## Adapted from BaseHandler
def finish_response(self):
try:
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
## Adapted from BaseHandler
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
# Only zero Content-Length if not set by the application (so
# that HEAD requests can be satisfied properly, see #3839)
self.headers.setdefault('Content-Length', "0")
self.send_headers()
else:
pass # XXX check if content-length was too short?
## Adapted from BaseHandler
def write(self, data):
assert type(data) is bytes, \
"write() argument must be a bytes instance"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
self._write(data)
self._flush()
## Adapted from SimpleHandler
def _write(self,data):
result = self.stdout.write(data)
if result is None or result == len(data):
return
from warnings import warn
warn("SimpleHandler.stdout.write() should not do partial writes",
DeprecationWarning)
while True:
data = data[result:]
if not data:
break
result = self.stdout.write(data)
## Adapted from SimpleHandler
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
## from ServerHandler and SimpleHandler
def close(self):
try:
if self.status is None:
status = 'Error'
else:
status = self.status.split(' ',1)[0]
self.handler.log_request(status, self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = None
self.bytes_sent = 0; self.headers_sent = False
## Environment
#
# BaseHandler.run() originally called self.setup_environ() to set
# the member environ.
#
# The class variable os_environ was set using read_environ().
# A copy was made and stored in environ.
# Then add_cgi_vars() was called.
#
# Add_cgi_vars() updated the environ from the member base_env, which was
# set when SimpleHandler was instantiated, which was done in
# WSGIRequestHandler.handle(). It called its get_environ() method
# to create the dict that was stored in base_env.
#
# The WSGIRequestHandler.get_environ() method started with a copy of the
# server's base_environ. The server in question was of class WSGIServer,
# and its base_environ was set by WSGIServer.setup_environ().
## from WSGIRequestHandler.get_environ and WSGIServer.setup_environ
def get_environ(self):
server = self.server
handler = self.handler
headers = handler.headers
env = {}
## from BaseHandler.setup_environ()
env['wsgi.input'] = self.stdin
## from WSGIServer.setup_environ()
env['SERVER_NAME'] = server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(server.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
## from WSGIRequestHandler.get_environ()
env['SERVER_PROTOCOL'] = 'Seal/1'
env['SERVER_SOFTWARE'] = 'Seal/1'
env['REQUEST_METHOD'] = handler.command
if '?' in handler.path:
(path, query) = handler.path.split('?', 1)
else:
(path, query) = (handler.path, '')
env['PATH_INFO'] = urllib.parse.unquote(path, 'iso-8859-1')
env['QUERY_STRING'] = query
host = self.handler.address_string()
if host != handler.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = handler.client_address[0]
if headers.get('content-type') is None:
env['CONTENT_TYPE'] = headers.get_content_type()
else:
env['CONTENT_TYPE'] = headers['content-type']
length = headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
## Secure connection?
env['HTTPS'] = 'on' if self.server.sslcontext else 'off'
## Any key beginning with 'HTTP_' is copied from the HTTP request!
# HTTP_COOKIE is the most important one.
for k, v in headers.items():
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
## Adapted from BaseHandler
def log_exception(self,exc_info):
try:
from traceback import print_exception
print_exception(
exc_info[0], exc_info[1], exc_info[2],
None, # traceback_limit
sys.stderr
)
sys.stderr.flush()
finally:
exc_info = None
## from BaseHandler
def start_response(self, status, headers,exc_info=None):
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
self.status = status
self.headers = Headers(headers)
status = self._convert_string_type(status, "Status")
assert len(status)>=4,"Status must be at least 4 characters"
assert status[:3].isdigit(), "Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name, val in headers:
name = self._convert_string_type(name, "Header name")
val = self._convert_string_type(val, "Header value")
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
return self.write
## from BaseHandler
def _convert_string_type(self, value, title):
"""Convert/check value type."""
if type(value) is str:
return value
raise AssertionError(
"{0} must be of type str (got {1})".format(title, repr(value))
)
origin_server = True # We are transmitting direct to client
## from BaseHandler
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(bytes(self.headers))
## from BaseHandler
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
## from BaseHandler
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
## from BaseHandler
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError,AttributeError,NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
http_version = "1.0" # Version that should be used for response
## from BaseHandler
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write(('HTTP/%s %s\r\n' % (self.http_version,self.status)).encode('iso-8859-1'))
if 'Date' not in self.headers:
self._write(
('Date: %s\r\n' % format_date_time(time.time())).encode('iso-8859-1')
)
# if self.server_software and 'Server' not in self.headers:
# self._write(('Server: %s\r\n' % self.server_software).encode('iso-8859-1'))
else:
self._write(('Status: %s\r\n' % self.status).encode('iso-8859-1')) | PypiClean |
/PyQt6-Fluent-Widgets-1.1.9.tar.gz/PyQt6-Fluent-Widgets-1.1.9/qfluentwidgets/common/config.py | import json
from enum import Enum
from pathlib import Path
import darkdetect
from PyQt6.QtCore import QObject, pyqtSignal
from PyQt6.QtGui import QColor
from .exception_handler import exceptionHandler
class Theme(Enum):
""" Theme enumeration """
LIGHT = "Light"
DARK = "Dark"
AUTO = "Auto"
class ConfigValidator:
""" Config validator """
def validate(self, value):
""" Verify whether the value is legal """
return True
def correct(self, value):
""" correct illegal value """
return value
class RangeValidator(ConfigValidator):
""" Range validator """
def __init__(self, min, max):
self.min = min
self.max = max
self.range = (min, max)
def validate(self, value):
return self.min <= value <= self.max
def correct(self, value):
return min(max(self.min, value), self.max)
class OptionsValidator(ConfigValidator):
""" Options validator """
def __init__(self, options):
if not options:
raise ValueError("The `options` can't be empty.")
if isinstance(options, Enum):
options = options._member_map_.values()
self.options = list(options)
def validate(self, value):
return value in self.options
def correct(self, value):
return value if self.validate(value) else self.options[0]
class BoolValidator(OptionsValidator):
""" Boolean validator """
def __init__(self):
super().__init__([True, False])
class FolderValidator(ConfigValidator):
""" Folder validator """
def validate(self, value):
return Path(value).exists()
def correct(self, value):
path = Path(value)
path.mkdir(exist_ok=True, parents=True)
return str(path.absolute()).replace("\\", "/")
class FolderListValidator(ConfigValidator):
""" Folder list validator """
def validate(self, value):
return all(Path(i).exists() for i in value)
def correct(self, value):
folders = []
for folder in value:
path = Path(folder)
if path.exists():
folders.append(str(path.absolute()).replace("\\", "/"))
return folders
class ColorValidator(ConfigValidator):
""" RGB color validator """
def __init__(self, default):
self.default = QColor(default)
def validate(self, color):
try:
return QColor(color).isValid()
except:
return False
def correct(self, value):
return QColor(value) if self.validate(value) else self.default
class ConfigSerializer:
""" Config serializer """
def serialize(self, value):
""" serialize config value """
return value
def deserialize(self, value):
""" deserialize config from config file's value """
return value
class EnumSerializer(ConfigSerializer):
""" enumeration class serializer """
def __init__(self, enumClass):
self.enumClass = enumClass
def serialize(self, value):
return value.value
def deserialize(self, value):
return self.enumClass(value)
class ColorSerializer(ConfigSerializer):
""" QColor serializer """
def serialize(self, value: QColor):
return value.name(QColor.NameFormat.HexArgb)
def deserialize(self, value):
if isinstance(value, list):
return QColor(*value)
return QColor(value)
class ConfigItem(QObject):
""" Config item """
valueChanged = pyqtSignal(object)
def __init__(self, group, name, default, validator=None, serializer=None, restart=False):
"""
Parameters
----------
group: str
config group name
name: str
config item name, can be empty
default:
default value
options: list
options value
serializer: ConfigSerializer
config serializer
restart: bool
whether to restart the application after updating value
"""
super().__init__()
self.group = group
self.name = name
self.validator = validator or ConfigValidator()
self.serializer = serializer or ConfigSerializer()
self.__value = default
self.value = default
self.restart = restart
self.defaultValue = self.validator.correct(default)
@property
def value(self):
""" get the value of config item """
return self.__value
@value.setter
def value(self, v):
v = self.validator.correct(v)
ov = self.__value
self.__value = v
if ov != v:
self.valueChanged.emit(v)
@property
def key(self):
""" get the config key separated by `.` """
return self.group+"."+self.name if self.name else self.group
def __str__(self):
return f'{self.__class__.__name__}[value={self.value}]'
def serialize(self):
return self.serializer.serialize(self.value)
def deserializeFrom(self, value):
self.value = self.serializer.deserialize(value)
class RangeConfigItem(ConfigItem):
""" Config item of range """
@property
def range(self):
""" get the available range of config """
return self.validator.range
def __str__(self):
return f'{self.__class__.__name__}[range={self.range}, value={self.value}]'
class OptionsConfigItem(ConfigItem):
""" Config item with options """
@property
def options(self):
return self.validator.options
def __str__(self):
return f'{self.__class__.__name__}[options={self.options}, value={self.value}]'
class ColorConfigItem(ConfigItem):
""" Color config item """
def __init__(self, group, name, default, restart=False):
super().__init__(group, name, QColor(default), ColorValidator(default),
ColorSerializer(), restart)
def __str__(self):
return f'{self.__class__.__name__}[value={self.value.name()}]'
class QConfig(QObject):
""" Config of app """
appRestartSig = pyqtSignal()
themeChanged = pyqtSignal(Theme)
themeChangedFinished = pyqtSignal()
themeColorChanged = pyqtSignal(QColor)
themeMode = OptionsConfigItem(
"QFluentWidgets", "ThemeMode", Theme.AUTO, OptionsValidator(Theme), EnumSerializer(Theme))
themeColor = ColorConfigItem("QFluentWidgets", "ThemeColor", '#009faa')
def __init__(self):
super().__init__()
self.file = Path("config/config.json")
self._theme = Theme.LIGHT
self._cfg = self
def get(self, item):
""" get the value of config item """
return item.value
def set(self, item, value, save=True):
""" set the value of config item
Parameters
----------
item: ConfigItem
config item
value:
the new value of config item
save: bool
whether to save the change to config file
"""
if item.value == value:
return
item.value = value
if save:
self.save()
if item.restart:
self._cfg.appRestartSig.emit()
if item is self._cfg.themeMode:
self.theme = value
self._cfg.themeChanged.emit(value)
if item is self._cfg.themeColor:
self._cfg.themeColorChanged.emit(value)
def toDict(self, serialize=True):
""" convert config items to `dict` """
items = {}
for name in dir(self._cfg.__class__):
item = getattr(self._cfg.__class__, name)
if not isinstance(item, ConfigItem):
continue
value = item.serialize() if serialize else item.value
if not items.get(item.group):
if not item.name:
items[item.group] = value
else:
items[item.group] = {}
if item.name:
items[item.group][item.name] = value
return items
def save(self):
""" save config """
self._cfg.file.parent.mkdir(parents=True, exist_ok=True)
with open(self._cfg.file, "w", encoding="utf-8") as f:
json.dump(self._cfg.toDict(), f, ensure_ascii=False, indent=4)
@exceptionHandler()
def load(self, file=None, config=None):
""" load config
Parameters
----------
file: str or Path
the path of json config file
config: Config
config object to be initialized
"""
if isinstance(config, QConfig):
self._cfg = config
self._cfg.themeChanged.connect(self.themeChanged)
if isinstance(file, (str, Path)):
self._cfg.file = Path(file)
try:
with open(self._cfg.file, encoding="utf-8") as f:
cfg = json.load(f)
except:
cfg = {}
# map config items'key to item
items = {}
for name in dir(self._cfg.__class__):
item = getattr(self._cfg.__class__, name)
if isinstance(item, ConfigItem):
items[item.key] = item
# update the value of config item
for k, v in cfg.items():
if not isinstance(v, dict) and items.get(k) is not None:
items[k].deserializeFrom(v)
elif isinstance(v, dict):
for key, value in v.items():
key = k + "." + key
if items.get(key) is not None:
items[key].deserializeFrom(value)
self.theme = self.get(self.themeMode)
@property
def theme(self):
""" get theme mode, can be `Theme.Light` or `Theme.Dark` """
return self._cfg._theme
@theme.setter
def theme(self, t):
""" chaneg the theme without modifying the config file """
if t == Theme.AUTO:
t = darkdetect.theme()
t = Theme(t) if t else Theme.LIGHT
self._cfg._theme = t
qconfig = QConfig()
def isDarkTheme():
""" whether the theme is dark mode """
return qconfig.theme == Theme.DARK
def theme():
""" get theme mode """
return qconfig.theme | PypiClean |
/dist_probabilty-1.1.tar.gz/dist_probabilty-1.1/dist_probabilty/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/dsin100daysv35-6.0.1.tar.gz/dsin100daysv35-6.0.1/notebook/static/edit/js/main.js |
requirejs([
'jquery',
'contents',
'base/js/namespace',
'base/js/utils',
'base/js/page',
'base/js/events',
'services/config',
'edit/js/editor',
'edit/js/menubar',
'edit/js/savewidget',
'edit/js/notificationarea',
'bidi/bidi',
], function(
$,
contents_service,
IPython,
utils,
page,
events,
configmod,
editmod,
menubar,
savewidget,
notificationarea,
bidi
){
"use strict";
try {
requirejs(['custom/custom'], function() {});
bidi.loadLocale();
} catch(err) {
console.log("Error loading custom.js from edition service. Continuing and logging");
console.warn(err);
}
page = new page.Page('div#header', 'div#site');
var base_url = utils.get_body_data('baseUrl');
var file_path = utils.get_body_data('filePath');
var config = new configmod.ConfigSection('edit', {base_url: base_url});
config.load();
var common_config = new configmod.ConfigSection('common', {base_url: base_url});
common_config.load();
var contents = new contents_service.Contents({
base_url: base_url,
common_config: common_config
});
var editor = new editmod.Editor('#texteditor-container', {
base_url: base_url,
events: events,
contents: contents,
file_path: file_path,
config: config,
});
// Make it available for debugging
IPython.editor = editor;
var save_widget = new savewidget.SaveWidget('span#save_widget', {
editor: editor,
events: events,
});
var menus = new menubar.MenuBar('#menubar', {
base_url: base_url,
editor: editor,
events: events,
save_widget: save_widget,
});
var notification_area = new notificationarea.EditorNotificationArea(
'#notification_area', {
events: events,
});
editor.notification_area = notification_area;
notification_area.init_notification_widgets();
utils.load_extensions_from_config(config);
utils.load_extensions_from_config(common_config);
editor.load();
page.show();
window.onbeforeunload = function () {
if (editor.save_enabled && !editor.codemirror.isClean(editor.generation)) {
return "Unsaved changes will be lost. Close anyway?";
}
};
// Make sure the codemirror editor is sized appropriately.
var _handle_resize = function() {
var backdrop = $("#texteditor-backdrop");
// account for padding on the backdrop wrapper
var padding = backdrop.outerHeight(true) - backdrop.height();
$('div.CodeMirror').height($("#site").height() - padding);
};
$(window).resize(_handle_resize);
// On document ready, resize codemirror.
$(document).ready(_handle_resize);
}); | PypiClean |
/cdktf-cdktf-provider-azurerm-10.0.1.tar.gz/cdktf-cdktf-provider-azurerm-10.0.1/src/cdktf_cdktf_provider_azurerm/storage_data_lake_gen2_path/__init__.py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf as _cdktf_9a9027ec
import constructs as _constructs_77d1e7e8
class StorageDataLakeGen2Path(
_cdktf_9a9027ec.TerraformResource,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.storageDataLakeGen2Path.StorageDataLakeGen2Path",
):
'''Represents a {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path azurerm_storage_data_lake_gen2_path}.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
filesystem_name: builtins.str,
path: builtins.str,
resource: builtins.str,
storage_account_id: builtins.str,
ace: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["StorageDataLakeGen2PathAce", typing.Dict[builtins.str, typing.Any]]]]] = None,
group: typing.Optional[builtins.str] = None,
id: typing.Optional[builtins.str] = None,
owner: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union["StorageDataLakeGen2PathTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
'''Create a new {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path azurerm_storage_data_lake_gen2_path} Resource.
:param scope: The scope in which to define this construct.
:param id_: The scoped construct ID. Must be unique amongst siblings in the same scope
:param filesystem_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#filesystem_name StorageDataLakeGen2Path#filesystem_name}.
:param path: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#path StorageDataLakeGen2Path#path}.
:param resource: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#resource StorageDataLakeGen2Path#resource}.
:param storage_account_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#storage_account_id StorageDataLakeGen2Path#storage_account_id}.
:param ace: ace block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#ace StorageDataLakeGen2Path#ace}
:param group: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#group StorageDataLakeGen2Path#group}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#id StorageDataLakeGen2Path#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param owner: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#owner StorageDataLakeGen2Path#owner}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#timeouts StorageDataLakeGen2Path#timeouts}
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__3c94aee74f63fb2fee19927c02442279e5809e94427f5284a5da5d77e4060106)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"])
config = StorageDataLakeGen2PathConfig(
filesystem_name=filesystem_name,
path=path,
resource=resource,
storage_account_id=storage_account_id,
ace=ace,
group=group,
id=id,
owner=owner,
timeouts=timeouts,
connection=connection,
count=count,
depends_on=depends_on,
for_each=for_each,
lifecycle=lifecycle,
provider=provider,
provisioners=provisioners,
)
jsii.create(self.__class__, self, [scope, id_, config])
@jsii.member(jsii_name="putAce")
def put_ace(
self,
value: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["StorageDataLakeGen2PathAce", typing.Dict[builtins.str, typing.Any]]]],
) -> None:
'''
:param value: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__47e2bf66496bb652d4b9955378c257044b9bab58c3cdbe215efa7f3821edd0ba)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
return typing.cast(None, jsii.invoke(self, "putAce", [value]))
@jsii.member(jsii_name="putTimeouts")
def put_timeouts(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#create StorageDataLakeGen2Path#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#delete StorageDataLakeGen2Path#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#read StorageDataLakeGen2Path#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#update StorageDataLakeGen2Path#update}.
'''
value = StorageDataLakeGen2PathTimeouts(
create=create, delete=delete, read=read, update=update
)
return typing.cast(None, jsii.invoke(self, "putTimeouts", [value]))
@jsii.member(jsii_name="resetAce")
def reset_ace(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetAce", []))
@jsii.member(jsii_name="resetGroup")
def reset_group(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetGroup", []))
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetOwner")
def reset_owner(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetOwner", []))
@jsii.member(jsii_name="resetTimeouts")
def reset_timeouts(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetTimeouts", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="ace")
def ace(self) -> "StorageDataLakeGen2PathAceList":
return typing.cast("StorageDataLakeGen2PathAceList", jsii.get(self, "ace"))
@builtins.property
@jsii.member(jsii_name="timeouts")
def timeouts(self) -> "StorageDataLakeGen2PathTimeoutsOutputReference":
return typing.cast("StorageDataLakeGen2PathTimeoutsOutputReference", jsii.get(self, "timeouts"))
@builtins.property
@jsii.member(jsii_name="aceInput")
def ace_input(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["StorageDataLakeGen2PathAce"]]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["StorageDataLakeGen2PathAce"]]], jsii.get(self, "aceInput"))
@builtins.property
@jsii.member(jsii_name="filesystemNameInput")
def filesystem_name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "filesystemNameInput"))
@builtins.property
@jsii.member(jsii_name="groupInput")
def group_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "groupInput"))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="ownerInput")
def owner_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "ownerInput"))
@builtins.property
@jsii.member(jsii_name="pathInput")
def path_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "pathInput"))
@builtins.property
@jsii.member(jsii_name="resourceInput")
def resource_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "resourceInput"))
@builtins.property
@jsii.member(jsii_name="storageAccountIdInput")
def storage_account_id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "storageAccountIdInput"))
@builtins.property
@jsii.member(jsii_name="timeoutsInput")
def timeouts_input(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "StorageDataLakeGen2PathTimeouts"]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "StorageDataLakeGen2PathTimeouts"]], jsii.get(self, "timeoutsInput"))
@builtins.property
@jsii.member(jsii_name="filesystemName")
def filesystem_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "filesystemName"))
@filesystem_name.setter
def filesystem_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__277abcf418ce63b728f0a8953f0b312f3afb4cbe7a2ca1b520bdcf072325b964)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "filesystemName", value)
@builtins.property
@jsii.member(jsii_name="group")
def group(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "group"))
@group.setter
def group(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__0e22355a977bacf74ba517d7810d0022e855241a28b0fa2fd274a936a69ee997)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "group", value)
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__2f4e220a3acfa949a03844677835634ad052b713e149675c955e312c9025e192)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="owner")
def owner(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "owner"))
@owner.setter
def owner(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__01c5a1b57453b5097f0d36a6a4866124a82ddd3260e6b95e31a8eab5bf529222)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "owner", value)
@builtins.property
@jsii.member(jsii_name="path")
def path(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "path"))
@path.setter
def path(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__66de0178e00716aa15a1ed3b6de9c21c0b8a376286579c2ce4e11322b99595c3)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "path", value)
@builtins.property
@jsii.member(jsii_name="resource")
def resource(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "resource"))
@resource.setter
def resource(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__be73f0ef99a0d151a518b1f3886120d85b04c367c157f991cd32ed8d49287c28)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "resource", value)
@builtins.property
@jsii.member(jsii_name="storageAccountId")
def storage_account_id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "storageAccountId"))
@storage_account_id.setter
def storage_account_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__27a1b24658f6bfbdbac18933e93e467ed02996949f0957033fc0b49a905484d4)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "storageAccountId", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.storageDataLakeGen2Path.StorageDataLakeGen2PathAce",
jsii_struct_bases=[],
name_mapping={
"permissions": "permissions",
"type": "type",
"id": "id",
"scope": "scope",
},
)
class StorageDataLakeGen2PathAce:
def __init__(
self,
*,
permissions: builtins.str,
type: builtins.str,
id: typing.Optional[builtins.str] = None,
scope: typing.Optional[builtins.str] = None,
) -> None:
'''
:param permissions: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#permissions StorageDataLakeGen2Path#permissions}.
:param type: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#type StorageDataLakeGen2Path#type}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#id StorageDataLakeGen2Path#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param scope: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#scope StorageDataLakeGen2Path#scope}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__6c88e648028b542fdc958640b55105e5b90152fda7565b640ee655d050cccad5)
check_type(argname="argument permissions", value=permissions, expected_type=type_hints["permissions"])
check_type(argname="argument type", value=type, expected_type=type_hints["type"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"permissions": permissions,
"type": type,
}
if id is not None:
self._values["id"] = id
if scope is not None:
self._values["scope"] = scope
@builtins.property
def permissions(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#permissions StorageDataLakeGen2Path#permissions}.'''
result = self._values.get("permissions")
assert result is not None, "Required property 'permissions' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def type(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#type StorageDataLakeGen2Path#type}.'''
result = self._values.get("type")
assert result is not None, "Required property 'type' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#id StorageDataLakeGen2Path#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def scope(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#scope StorageDataLakeGen2Path#scope}.'''
result = self._values.get("scope")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StorageDataLakeGen2PathAce(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class StorageDataLakeGen2PathAceList(
_cdktf_9a9027ec.ComplexList,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.storageDataLakeGen2Path.StorageDataLakeGen2PathAceList",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__84005d70e02b4c583f615c1000ad0a60ad1ff8034fd52f289517b12457599602)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set])
@jsii.member(jsii_name="get")
def get(self, index: jsii.Number) -> "StorageDataLakeGen2PathAceOutputReference":
'''
:param index: the index of the item to return.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c7df5c8169db638b2032ce01c1f7e170093aed94f9978d60223070463ae2b66f)
check_type(argname="argument index", value=index, expected_type=type_hints["index"])
return typing.cast("StorageDataLakeGen2PathAceOutputReference", jsii.invoke(self, "get", [index]))
@builtins.property
@jsii.member(jsii_name="terraformAttribute")
def _terraform_attribute(self) -> builtins.str:
'''The attribute on the parent resource this class is referencing.'''
return typing.cast(builtins.str, jsii.get(self, "terraformAttribute"))
@_terraform_attribute.setter
def _terraform_attribute(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b3ebf934531ab38229c111a26e1bfb081acfa51facbc178cfa85c41419cfec53)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformAttribute", value)
@builtins.property
@jsii.member(jsii_name="terraformResource")
def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent:
'''The parent resource.'''
return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource"))
@_terraform_resource.setter
def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__16c66d840606eac3861ef75b82d26b4c695056304cd9b2dcd0cadb35aec83cf0)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformResource", value)
@builtins.property
@jsii.member(jsii_name="wrapsSet")
def _wraps_set(self) -> builtins.bool:
'''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).'''
return typing.cast(builtins.bool, jsii.get(self, "wrapsSet"))
@_wraps_set.setter
def _wraps_set(self, value: builtins.bool) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__d2ddf13f45b7d3c3f065a6becfff1969e9e2385dd56c85ab4a1cfcdcd8b3b4e8)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "wrapsSet", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[StorageDataLakeGen2PathAce]]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[StorageDataLakeGen2PathAce]]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[StorageDataLakeGen2PathAce]]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__fa1ce729dfac024ecc287ad08cc5756ad033e07e13ba0083c9c3b51b7a944066)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
class StorageDataLakeGen2PathAceOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.storageDataLakeGen2Path.StorageDataLakeGen2PathAceOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param complex_object_index: the index of this item in the list.
:param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__cd29bd52ca47877c23b1631cb27453f2bb4e476b2302eece4b6829289105d6b9)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"])
check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set])
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetScope")
def reset_scope(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetScope", []))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="permissionsInput")
def permissions_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "permissionsInput"))
@builtins.property
@jsii.member(jsii_name="scopeInput")
def scope_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "scopeInput"))
@builtins.property
@jsii.member(jsii_name="typeInput")
def type_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "typeInput"))
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__5bbd60b377f8a434d56104270bb384e6dea315fd2899ce750d78a9820f93b78b)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="permissions")
def permissions(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "permissions"))
@permissions.setter
def permissions(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__a74e13b8d2535ac06dadca8141f14ff2df3bcfb3e22a009d02cc1bd383df63cd)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "permissions", value)
@builtins.property
@jsii.member(jsii_name="scope")
def scope(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "scope"))
@scope.setter
def scope(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__39995899955f52de9af0ba1be1d55dffa8a983bf23604b01e43dfa0adc5bae41)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "scope", value)
@builtins.property
@jsii.member(jsii_name="type")
def type(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "type"))
@type.setter
def type(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__8b18a9b306d80b6bfac995935c191b1d99f40efa47cbd586e664131742be7ac6)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "type", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathAce]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathAce]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathAce]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ccaa6f3aef85dbe2f0fd8bf22cd50044895d1b910552c67cf02f70e98434c86a)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.storageDataLakeGen2Path.StorageDataLakeGen2PathConfig",
jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments],
name_mapping={
"connection": "connection",
"count": "count",
"depends_on": "dependsOn",
"for_each": "forEach",
"lifecycle": "lifecycle",
"provider": "provider",
"provisioners": "provisioners",
"filesystem_name": "filesystemName",
"path": "path",
"resource": "resource",
"storage_account_id": "storageAccountId",
"ace": "ace",
"group": "group",
"id": "id",
"owner": "owner",
"timeouts": "timeouts",
},
)
class StorageDataLakeGen2PathConfig(_cdktf_9a9027ec.TerraformMetaArguments):
def __init__(
self,
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
filesystem_name: builtins.str,
path: builtins.str,
resource: builtins.str,
storage_account_id: builtins.str,
ace: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[StorageDataLakeGen2PathAce, typing.Dict[builtins.str, typing.Any]]]]] = None,
group: typing.Optional[builtins.str] = None,
id: typing.Optional[builtins.str] = None,
owner: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union["StorageDataLakeGen2PathTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
'''
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
:param filesystem_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#filesystem_name StorageDataLakeGen2Path#filesystem_name}.
:param path: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#path StorageDataLakeGen2Path#path}.
:param resource: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#resource StorageDataLakeGen2Path#resource}.
:param storage_account_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#storage_account_id StorageDataLakeGen2Path#storage_account_id}.
:param ace: ace block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#ace StorageDataLakeGen2Path#ace}
:param group: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#group StorageDataLakeGen2Path#group}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#id StorageDataLakeGen2Path#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param owner: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#owner StorageDataLakeGen2Path#owner}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#timeouts StorageDataLakeGen2Path#timeouts}
'''
if isinstance(lifecycle, dict):
lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle)
if isinstance(timeouts, dict):
timeouts = StorageDataLakeGen2PathTimeouts(**timeouts)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__596b8bf26921a622f567fa08d4238e07559a078b5547f36092a4d26fa0f09853)
check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"])
check_type(argname="argument count", value=count, expected_type=type_hints["count"])
check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"])
check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"])
check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"])
check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"])
check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"])
check_type(argname="argument filesystem_name", value=filesystem_name, expected_type=type_hints["filesystem_name"])
check_type(argname="argument path", value=path, expected_type=type_hints["path"])
check_type(argname="argument resource", value=resource, expected_type=type_hints["resource"])
check_type(argname="argument storage_account_id", value=storage_account_id, expected_type=type_hints["storage_account_id"])
check_type(argname="argument ace", value=ace, expected_type=type_hints["ace"])
check_type(argname="argument group", value=group, expected_type=type_hints["group"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument owner", value=owner, expected_type=type_hints["owner"])
check_type(argname="argument timeouts", value=timeouts, expected_type=type_hints["timeouts"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"filesystem_name": filesystem_name,
"path": path,
"resource": resource,
"storage_account_id": storage_account_id,
}
if connection is not None:
self._values["connection"] = connection
if count is not None:
self._values["count"] = count
if depends_on is not None:
self._values["depends_on"] = depends_on
if for_each is not None:
self._values["for_each"] = for_each
if lifecycle is not None:
self._values["lifecycle"] = lifecycle
if provider is not None:
self._values["provider"] = provider
if provisioners is not None:
self._values["provisioners"] = provisioners
if ace is not None:
self._values["ace"] = ace
if group is not None:
self._values["group"] = group
if id is not None:
self._values["id"] = id
if owner is not None:
self._values["owner"] = owner
if timeouts is not None:
self._values["timeouts"] = timeouts
@builtins.property
def connection(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]:
'''
:stability: experimental
'''
result = self._values.get("connection")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result)
@builtins.property
def count(
self,
) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]:
'''
:stability: experimental
'''
result = self._values.get("count")
return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result)
@builtins.property
def depends_on(
self,
) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]:
'''
:stability: experimental
'''
result = self._values.get("depends_on")
return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result)
@builtins.property
def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]:
'''
:stability: experimental
'''
result = self._values.get("for_each")
return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result)
@builtins.property
def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]:
'''
:stability: experimental
'''
result = self._values.get("lifecycle")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result)
@builtins.property
def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]:
'''
:stability: experimental
'''
result = self._values.get("provider")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result)
@builtins.property
def provisioners(
self,
) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]:
'''
:stability: experimental
'''
result = self._values.get("provisioners")
return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result)
@builtins.property
def filesystem_name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#filesystem_name StorageDataLakeGen2Path#filesystem_name}.'''
result = self._values.get("filesystem_name")
assert result is not None, "Required property 'filesystem_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def path(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#path StorageDataLakeGen2Path#path}.'''
result = self._values.get("path")
assert result is not None, "Required property 'path' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def resource(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#resource StorageDataLakeGen2Path#resource}.'''
result = self._values.get("resource")
assert result is not None, "Required property 'resource' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def storage_account_id(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#storage_account_id StorageDataLakeGen2Path#storage_account_id}.'''
result = self._values.get("storage_account_id")
assert result is not None, "Required property 'storage_account_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def ace(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[StorageDataLakeGen2PathAce]]]:
'''ace block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#ace StorageDataLakeGen2Path#ace}
'''
result = self._values.get("ace")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[StorageDataLakeGen2PathAce]]], result)
@builtins.property
def group(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#group StorageDataLakeGen2Path#group}.'''
result = self._values.get("group")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#id StorageDataLakeGen2Path#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def owner(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#owner StorageDataLakeGen2Path#owner}.'''
result = self._values.get("owner")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def timeouts(self) -> typing.Optional["StorageDataLakeGen2PathTimeouts"]:
'''timeouts block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#timeouts StorageDataLakeGen2Path#timeouts}
'''
result = self._values.get("timeouts")
return typing.cast(typing.Optional["StorageDataLakeGen2PathTimeouts"], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StorageDataLakeGen2PathConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.storageDataLakeGen2Path.StorageDataLakeGen2PathTimeouts",
jsii_struct_bases=[],
name_mapping={
"create": "create",
"delete": "delete",
"read": "read",
"update": "update",
},
)
class StorageDataLakeGen2PathTimeouts:
def __init__(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#create StorageDataLakeGen2Path#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#delete StorageDataLakeGen2Path#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#read StorageDataLakeGen2Path#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#update StorageDataLakeGen2Path#update}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__51f9778ff783544c0a26a518c0c8f7b2612443a58921a4cdebac5b877dbbd30d)
check_type(argname="argument create", value=create, expected_type=type_hints["create"])
check_type(argname="argument delete", value=delete, expected_type=type_hints["delete"])
check_type(argname="argument read", value=read, expected_type=type_hints["read"])
check_type(argname="argument update", value=update, expected_type=type_hints["update"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if create is not None:
self._values["create"] = create
if delete is not None:
self._values["delete"] = delete
if read is not None:
self._values["read"] = read
if update is not None:
self._values["update"] = update
@builtins.property
def create(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#create StorageDataLakeGen2Path#create}.'''
result = self._values.get("create")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def delete(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#delete StorageDataLakeGen2Path#delete}.'''
result = self._values.get("delete")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def read(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#read StorageDataLakeGen2Path#read}.'''
result = self._values.get("read")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def update(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/storage_data_lake_gen2_path#update StorageDataLakeGen2Path#update}.'''
result = self._values.get("update")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StorageDataLakeGen2PathTimeouts(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class StorageDataLakeGen2PathTimeoutsOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.storageDataLakeGen2Path.StorageDataLakeGen2PathTimeoutsOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4b2ce593b80f28ba0133a842834e707bbc98d32db34872cbc2f351140068b53a)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute])
@jsii.member(jsii_name="resetCreate")
def reset_create(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetCreate", []))
@jsii.member(jsii_name="resetDelete")
def reset_delete(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetDelete", []))
@jsii.member(jsii_name="resetRead")
def reset_read(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetRead", []))
@jsii.member(jsii_name="resetUpdate")
def reset_update(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetUpdate", []))
@builtins.property
@jsii.member(jsii_name="createInput")
def create_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "createInput"))
@builtins.property
@jsii.member(jsii_name="deleteInput")
def delete_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "deleteInput"))
@builtins.property
@jsii.member(jsii_name="readInput")
def read_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "readInput"))
@builtins.property
@jsii.member(jsii_name="updateInput")
def update_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "updateInput"))
@builtins.property
@jsii.member(jsii_name="create")
def create(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "create"))
@create.setter
def create(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ff22bd9acd905e11f34cccfe3d0f0e6d9a4c0d9efa61c86b206be4e0f73570b1)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "create", value)
@builtins.property
@jsii.member(jsii_name="delete")
def delete(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "delete"))
@delete.setter
def delete(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__9c70c9e3d77a9e1e880c63a467dbed5581eff7b5936699d4bf86b9d491c957ac)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "delete", value)
@builtins.property
@jsii.member(jsii_name="read")
def read(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "read"))
@read.setter
def read(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c8e0c58b6e93a383397c3b9febd8cc57aa07b5bee106cf690c098836a5037992)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "read", value)
@builtins.property
@jsii.member(jsii_name="update")
def update(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "update"))
@update.setter
def update(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__979a1c1ebe83601b8218c34bc7a8c4d45d174a8fa6696ba9fe3e18227051e9f1)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "update", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathTimeouts]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathTimeouts]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathTimeouts]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__1037526ce908b9224c06c9b4e91e84fdb89aba0a2ac774738a55731f9f4333eb)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
__all__ = [
"StorageDataLakeGen2Path",
"StorageDataLakeGen2PathAce",
"StorageDataLakeGen2PathAceList",
"StorageDataLakeGen2PathAceOutputReference",
"StorageDataLakeGen2PathConfig",
"StorageDataLakeGen2PathTimeouts",
"StorageDataLakeGen2PathTimeoutsOutputReference",
]
publication.publish()
def _typecheckingstub__3c94aee74f63fb2fee19927c02442279e5809e94427f5284a5da5d77e4060106(
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
filesystem_name: builtins.str,
path: builtins.str,
resource: builtins.str,
storage_account_id: builtins.str,
ace: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[StorageDataLakeGen2PathAce, typing.Dict[builtins.str, typing.Any]]]]] = None,
group: typing.Optional[builtins.str] = None,
id: typing.Optional[builtins.str] = None,
owner: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union[StorageDataLakeGen2PathTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__47e2bf66496bb652d4b9955378c257044b9bab58c3cdbe215efa7f3821edd0ba(
value: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[StorageDataLakeGen2PathAce, typing.Dict[builtins.str, typing.Any]]]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__277abcf418ce63b728f0a8953f0b312f3afb4cbe7a2ca1b520bdcf072325b964(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__0e22355a977bacf74ba517d7810d0022e855241a28b0fa2fd274a936a69ee997(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__2f4e220a3acfa949a03844677835634ad052b713e149675c955e312c9025e192(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__01c5a1b57453b5097f0d36a6a4866124a82ddd3260e6b95e31a8eab5bf529222(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__66de0178e00716aa15a1ed3b6de9c21c0b8a376286579c2ce4e11322b99595c3(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__be73f0ef99a0d151a518b1f3886120d85b04c367c157f991cd32ed8d49287c28(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__27a1b24658f6bfbdbac18933e93e467ed02996949f0957033fc0b49a905484d4(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__6c88e648028b542fdc958640b55105e5b90152fda7565b640ee655d050cccad5(
*,
permissions: builtins.str,
type: builtins.str,
id: typing.Optional[builtins.str] = None,
scope: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__84005d70e02b4c583f615c1000ad0a60ad1ff8034fd52f289517b12457599602(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c7df5c8169db638b2032ce01c1f7e170093aed94f9978d60223070463ae2b66f(
index: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b3ebf934531ab38229c111a26e1bfb081acfa51facbc178cfa85c41419cfec53(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__16c66d840606eac3861ef75b82d26b4c695056304cd9b2dcd0cadb35aec83cf0(
value: _cdktf_9a9027ec.IInterpolatingParent,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__d2ddf13f45b7d3c3f065a6becfff1969e9e2385dd56c85ab4a1cfcdcd8b3b4e8(
value: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__fa1ce729dfac024ecc287ad08cc5756ad033e07e13ba0083c9c3b51b7a944066(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[StorageDataLakeGen2PathAce]]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__cd29bd52ca47877c23b1631cb27453f2bb4e476b2302eece4b6829289105d6b9(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__5bbd60b377f8a434d56104270bb384e6dea315fd2899ce750d78a9820f93b78b(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__a74e13b8d2535ac06dadca8141f14ff2df3bcfb3e22a009d02cc1bd383df63cd(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__39995899955f52de9af0ba1be1d55dffa8a983bf23604b01e43dfa0adc5bae41(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__8b18a9b306d80b6bfac995935c191b1d99f40efa47cbd586e664131742be7ac6(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ccaa6f3aef85dbe2f0fd8bf22cd50044895d1b910552c67cf02f70e98434c86a(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathAce]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__596b8bf26921a622f567fa08d4238e07559a078b5547f36092a4d26fa0f09853(
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
filesystem_name: builtins.str,
path: builtins.str,
resource: builtins.str,
storage_account_id: builtins.str,
ace: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[StorageDataLakeGen2PathAce, typing.Dict[builtins.str, typing.Any]]]]] = None,
group: typing.Optional[builtins.str] = None,
id: typing.Optional[builtins.str] = None,
owner: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union[StorageDataLakeGen2PathTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__51f9778ff783544c0a26a518c0c8f7b2612443a58921a4cdebac5b877dbbd30d(
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4b2ce593b80f28ba0133a842834e707bbc98d32db34872cbc2f351140068b53a(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ff22bd9acd905e11f34cccfe3d0f0e6d9a4c0d9efa61c86b206be4e0f73570b1(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__9c70c9e3d77a9e1e880c63a467dbed5581eff7b5936699d4bf86b9d491c957ac(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c8e0c58b6e93a383397c3b9febd8cc57aa07b5bee106cf690c098836a5037992(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__979a1c1ebe83601b8218c34bc7a8c4d45d174a8fa6696ba9fe3e18227051e9f1(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__1037526ce908b9224c06c9b4e91e84fdb89aba0a2ac774738a55731f9f4333eb(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, StorageDataLakeGen2PathTimeouts]],
) -> None:
"""Type checking stubs"""
pass | PypiClean |
/tensorflow_cpu_test_package-2.11.0rc0-cp38-cp38-win_amd64.whl/tensorflow/python/profiler/profiler_client.py | """Profiler client APIs."""
from tensorflow.python.framework import errors
from tensorflow.python.profiler.internal import _pywrap_profiler
from tensorflow.python.util.tf_export import tf_export
_GRPC_PREFIX = 'grpc://'
@tf_export('profiler.experimental.client.trace', v1=[])
def trace(service_addr,
logdir,
duration_ms,
worker_list='',
num_tracing_attempts=3,
options=None):
"""Sends gRPC requests to one or more profiler servers to perform on-demand profiling.
This method will block the calling thread until it receives responses from all
servers or until deadline expiration. Both single host and multiple host
profiling are supported on CPU, GPU, and TPU.
The profiled results will be saved by each server to the specified TensorBoard
log directory (i.e. the directory you save your model checkpoints). Use the
TensorBoard profile plugin to view the visualization and analysis results.
Args:
service_addr: A comma delimited string of gRPC addresses of the workers to
profile.
e.g. service_addr='grpc://localhost:6009'
service_addr='grpc://10.0.0.2:8466,grpc://10.0.0.3:8466'
service_addr='grpc://localhost:12345,grpc://localhost:23456'
logdir: Path to save profile data to, typically a TensorBoard log directory.
This path must be accessible to both the client and server.
e.g. logdir='gs://your_tb_dir'
duration_ms: Duration of tracing or monitoring in milliseconds. Must be
greater than zero.
worker_list: An optional TPU only configuration. The list of workers to
profile in the current session.
num_tracing_attempts: Optional. Automatically retry N times when no trace
event is collected (default 3).
options: profiler.experimental.ProfilerOptions namedtuple for miscellaneous
profiler options.
Raises:
InvalidArgumentError: For when arguments fail validation checks.
UnavailableError: If no trace event was collected.
Example usage (CPU/GPU):
```python
# Start a profiler server before your model runs.
tf.profiler.experimental.server.start(6009)
# (Model code goes here).
# Send gRPC request to the profiler server to collect a trace of your model.
tf.profiler.experimental.client.trace('grpc://localhost:6009',
'/nfs/tb_log', 2000)
```
Example usage (Multiple GPUs):
```python
# E.g. your worker IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you
# would like to schedule start of profiling 1 second from now, for a
# duration of 2 seconds.
options['delay_ms'] = 1000
tf.profiler.experimental.client.trace(
'grpc://10.0.0.2:8466,grpc://10.0.0.3:8466,grpc://10.0.0.4:8466',
'gs://your_tb_dir',
2000,
options=options)
```
Example usage (TPU):
```python
# Send gRPC request to a TPU worker to collect a trace of your model. A
# profiler service has been started in the TPU worker at port 8466.
# E.g. your TPU IP address is 10.0.0.2 and you want to profile for 2 seconds
# .
tf.profiler.experimental.client.trace('grpc://10.0.0.2:8466',
'gs://your_tb_dir', 2000)
```
Example usage (Multiple TPUs):
```python
# Send gRPC request to a TPU pod to collect a trace of your model on
# multiple TPUs. A profiler service has been started in all the TPU workers
# at the port 8466.
# E.g. your TPU IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you want
# to profile for 2 seconds.
tf.profiler.experimental.client.trace(
'grpc://10.0.0.2:8466',
'gs://your_tb_dir',
2000,
'10.0.0.2:8466,10.0.0.3:8466,10.0.0.4:8466')
```
Launch TensorBoard and point it to the same logdir you provided to this API.
```shell
# logdir can be gs://your_tb_dir as in the above examples.
$ tensorboard --logdir=/tmp/tb_log
```
Open your browser and go to localhost:6006/#profile to view profiling results.
"""
if duration_ms <= 0:
raise errors.InvalidArgumentError(None, None,
'duration_ms must be greater than zero.')
opts = dict(options._asdict()) if options is not None else {}
_pywrap_profiler.trace(
_strip_addresses(service_addr, _GRPC_PREFIX), logdir, worker_list, True,
duration_ms, num_tracing_attempts, opts)
@tf_export('profiler.experimental.client.monitor', v1=[])
def monitor(service_addr, duration_ms, level=1):
"""Sends grpc requests to profiler server to perform on-demand monitoring.
The monitoring result is a light weight performance summary of your model
execution. This method will block the caller thread until it receives the
monitoring result. This method currently supports Cloud TPU only.
Args:
service_addr: gRPC address of profiler service e.g. grpc://10.0.0.2:8466.
duration_ms: Duration of monitoring in ms.
level: Choose a monitoring level between 1 and 2 to monitor your job. Level
2 is more verbose than level 1 and shows more metrics.
Returns:
A string of monitoring output.
Example usage:
```python
# Continuously send gRPC requests to the Cloud TPU to monitor the model
# execution.
for query in range(0, 100):
print(
tf.profiler.experimental.client.monitor('grpc://10.0.0.2:8466', 1000))
```
"""
return _pywrap_profiler.monitor(
_strip_prefix(service_addr, _GRPC_PREFIX), duration_ms, level, True)
def _strip_prefix(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s
def _strip_addresses(addresses, prefix):
return ','.join([_strip_prefix(s, prefix) for s in addresses.split(',')]) | PypiClean |
/tryp-nvim-7.4.1.tar.gz/tryp-nvim-7.4.1/trypnv/test/spec.py | from typing import Callable, Any
from contextlib import contextmanager
from flexmock import flexmock
from tryp import may, Maybe, List
from tryp.test import Spec
import trypnv
from trypnv.nvim import Buffer, Tab, Window
from trypnv import NvimFacade
class MockNvim(object):
@property
def window(self):
return MockWindow(self.vim, None, self.prefix)
@property
def buffer(self):
return MockBuffer(self.vim, None, self.prefix)
@property
def tab(self):
return MockTab(self.vim, None, self.prefix)
def set_option(self, *a, **kw):
pass
def cmd(self, *a, **kw):
pass
class MockTab(MockNvim, Tab):
pass
class MockWindow(MockNvim, Window):
pass
class MockBuffer(MockNvim, Buffer):
pass
class MockNvimFacade(MockNvim, NvimFacade):
def __init__(self, prefix):
self.vars = {}
super().__init__(None, prefix)
self.target = self
@may
def var(self, name: str) -> Maybe[str]: # type: ignore
v = self.vars.get(name)
if v is None:
ignore_names = ['_machine', '_message',
'{}__message'.format(self.prefix),
'{}__machine'.format(self.prefix),
]
if name not in ignore_names:
self.log.error('variable not found: {}'.format(name))
return v
@property
def windows(self):
return List()
def switch_root(self, root):
pass
def async(self, f: Callable[['NvimFacade'], Any]):
return f(self)
@contextmanager
def main_event_loop(self):
yield None
def reload_windows(self):
pass
class MockNvimSpec(Spec):
def __init__(self, prefix):
super().__init__()
self.prefix = prefix
def setup(self):
super().setup()
trypnv.in_vim = False
self.vim = MockNvimFacade(self.prefix)
self.vim_mock = flexmock(self.vim)
__all__ = ('MockNvimSpec', 'MockNvimFacade') | PypiClean |
/hetdex_api-0.8.7.tar.gz/hetdex_api-0.8.7/hetdex_api/extinction.py | import numpy as np
from scipy import interpolate
import extinction
from hetdex_api.config import HDRconfig
LATEST_HDR_NAME = HDRconfig.LATEST_HDR_NAME
config = HDRconfig(LATEST_HDR_NAME)
from dustmaps.config import config as dustmaps_config
if dustmaps_config['data_dir'] is None:
print("Populating dustmaps config with {}".format(config.dustmaps))
dustmaps_config['data_dir'] = config.dustmaps
from dustmaps.sfd import SFDQuery
def dustmaps_setup():
# this is how I intially set up dustmaps
# Need to put maps in config.dustmaps
from dustmaps.config import config as dustmaps_config
import dustmaps
config = HDRconfig()
dustmaps_config['data_dir'] = config.dustmaps
dustmaps.sfd.fetch() #don't need to do this
def get_2pt1_extinction_fix(pad=True):
"""
This is to fix the E(B-V)=0.02
flat extinction applied to 2.1
Curve from Karl
/work/00115/gebhardt/fluxcor/extinction
Paramaters
----------
pad bool
This is to pad curve to 3470 and 5400 to match
wave_rect
"""
config = HDRconfig()
karl_data = np.loadtxt( config.extinction_fix)
if pad:
karl_wave = np.concatenate(([3450.],
karl_data[:, 0],
[5550.]))
karl_curve = np.concatenate(([karl_data[0, 1]],
karl_data[:, 1],
[karl_data[-1, 1]]))
else:
karl_wave = karl_data[:, 0]
karl_curve = karl_data[:, 1]
correction = interpolate.interp1d( karl_wave, karl_curve)
return correction
def deredden_spectra(wave, coords):
"""
Apply S&F 2011 Extinction from SFD Map
https://iopscience.iop.org/article/10.1088/0004-637X/737/2/103#apj398709t6
Paramters
---------
wave array
wavelength to apply correction
coords SkyCoord object
sky coordinates
"""
Rv = 3.1
corr_SF2011 = 2.742 # Landolt V
sfd = SFDQuery()
ebv = sfd(coords)
Av = corr_SF2011 * ebv
ext = extinction.fitzpatrick99(np.array(wave, dtype=np.double), Av, Rv)
deredden = 10**(0.4*np.array(ext))
return deredden | PypiClean |
/matplotlib-3.8.0rc1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/mpl_toolkits/axes_grid1/inset_locator.py | from matplotlib import _api, _docstring
from matplotlib.offsetbox import AnchoredOffsetbox
from matplotlib.patches import Patch, Rectangle
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxTransformTo
from matplotlib.transforms import IdentityTransform, TransformedBbox
from . import axes_size as Size
from .parasite_axes import HostAxes
@_api.deprecated("3.8", alternative="Axes.inset_axes")
class InsetPosition:
@_docstring.dedent_interpd
def __init__(self, parent, lbwh):
"""
An object for positioning an inset axes.
This is created by specifying the normalized coordinates in the axes,
instead of the figure.
Parameters
----------
parent : `~matplotlib.axes.Axes`
Axes to use for normalizing coordinates.
lbwh : iterable of four floats
The left edge, bottom edge, width, and height of the inset axes, in
units of the normalized coordinate of the *parent* axes.
See Also
--------
:meth:`matplotlib.axes.Axes.set_axes_locator`
Examples
--------
The following bounds the inset axes to a box with 20%% of the parent
axes height and 40%% of the width. The size of the axes specified
([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:
>>> parent_axes = plt.gca()
>>> ax_ins = plt.axes([0, 0, 1, 1])
>>> ip = InsetPosition(parent_axes, [0.5, 0.1, 0.4, 0.2])
>>> ax_ins.set_axes_locator(ip)
"""
self.parent = parent
self.lbwh = lbwh
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = BboxTransformTo(bbox_parent)
bbox_inset = Bbox.from_bounds(*self.lbwh)
bb = TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super().__init__(
loc, pad=0., child=None, borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform
)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
if renderer is None:
renderer = ax.figure._get_renderer()
self.axes = ax
bbox = self.get_window_extent(renderer)
px, py = self.get_offset(bbox.width, bbox.height, 0, 0, renderer)
bbox_canvas = Bbox.from_bounds(px, py, bbox.width, bbox.height)
tr = ax.figure.transSubfigure.inverted()
return TransformedBbox(bbox_canvas, tr)
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
super().__init__(
bbox_to_anchor, None, loc,
borderpad=borderpad, bbox_transform=bbox_transform
)
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
def get_bbox(self, renderer):
bbox = self.get_bbox_to_anchor()
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = bbox.width * r + a * dpi
r, a = self.y_size.get_size(renderer)
height = bbox.height * r + a * dpi
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return Bbox.from_bounds(0, 0, width, height).padded(pad)
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super().__init__(
bbox_to_anchor, None, loc, borderpad=borderpad,
bbox_transform=bbox_transform)
def get_bbox(self, renderer):
bb = self.parent_axes.transData.transform_bbox(self.axes.viewLim)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return (
Bbox.from_bounds(
0, 0, abs(bb.width * self.zoom), abs(bb.height * self.zoom))
.padded(pad))
class BboxPatch(Patch):
@_docstring.dedent_interpd
def __init__(self, bbox, **kwargs):
"""
Patch showing the shape bounded by a Bbox.
Parameters
----------
bbox : `~matplotlib.transforms.Bbox`
Bbox to use for the extents of this patch.
**kwargs
Patch properties. Valid arguments include:
%(Patch:kwdoc)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
super().__init__(**kwargs)
self.bbox = bbox
def get_path(self):
# docstring inherited
x0, y0, x1, y1 = self.bbox.extents
return Path._create_closed([(x0, y0), (x1, y0), (x1, y1), (x0, y1)])
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
"""
Return the ``(x, y)`` coordinates of corner *loc* of *bbox*; parameters
behave as documented for the `.BboxConnector` constructor.
"""
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
"""
Construct a `.Path` connecting corner *loc1* of *bbox1* to corner
*loc2* of *bbox2*, where parameters behave as documented as for the
`.BboxConnector` constructor.
"""
if isinstance(bbox1, Rectangle):
bbox1 = TransformedBbox(Bbox.unit(), bbox1.get_transform())
if isinstance(bbox2, Rectangle):
bbox2 = TransformedBbox(Bbox.unit(), bbox2.get_transform())
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
return Path([[x1, y1], [x2, y2]])
@_docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
Connect two bboxes with a straight line.
Parameters
----------
bbox1, bbox2 : `~matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1, loc2 : {1, 2, 3, 4}
Corner of *bbox1* and *bbox2* to draw the line. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
*loc2* is optional and defaults to *loc1*.
**kwargs
Patch properties for the line drawn. Valid arguments include:
%(Patch:kwdoc)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
kwargs.setdefault(
"fill", bool({'fc', 'facecolor', 'color'}.intersection(kwargs)))
super().__init__(**kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
# docstring inherited
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
class BboxConnectorPatch(BboxConnector):
@_docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
"""
Connect two bboxes with a quadrilateral.
The quadrilateral is specified by two lines that start and end at
corners of the bboxes. The four sides of the quadrilateral are defined
by the two lines given, the line between the two corners specified in
*bbox1* and the line between the two corners specified in *bbox2*.
Parameters
----------
bbox1, bbox2 : `~matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1a, loc2a, loc1b, loc2b : {1, 2, 3, 4}
The first line connects corners *loc1a* of *bbox1* and *loc2a* of
*bbox2*; the second line connects corners *loc1b* of *bbox1* and
*loc2b* of *bbox2*. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn:
%(Patch:kwdoc)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
super().__init__(bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
# docstring inherited
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1,
self.loc2b, self.loc1b)
path_merged = [*path1.vertices, *path2.vertices, path1.vertices[0]]
return Path(path_merged)
def _add_inset_axes(parent_axes, axes_class, axes_kwargs, axes_locator):
"""Helper function to add an inset axes and disable navigation in it."""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
axes_kwargs = {}
inset_axes = axes_class(
parent_axes.figure, parent_axes.get_position(),
**{"navigate": False, **axes_kwargs, "axes_locator": axes_locator})
return parent_axes.figure.add_axes(inset_axes)
@_docstring.dedent_interpd
def inset_axes(parent_axes, width, height, loc='upper right',
bbox_to_anchor=None, bbox_transform=None,
axes_class=None, axes_kwargs=None,
borderpad=0.5):
"""
Create an inset axes with a given width and height.
Both sizes used can be specified either in inches or percentage.
For example,::
inset_axes(parent_axes, width='40%%', height='30%%', loc='lower left')
creates in inset axes in the lower left corner of *parent_axes* which spans
over 30%% in height and 40%% in width of the *parent_axes*. Since the usage
of `.inset_axes` may become slightly tricky when exceeding such standard
cases, it is recommended to read :doc:`the examples
</gallery/axes_grid1/inset_locator_demo>`.
Notes
-----
The meaning of *bbox_to_anchor* and *bbox_to_transform* is interpreted
differently from that of legend. The value of bbox_to_anchor
(or the return value of its get_points method; the default is
*parent_axes.bbox*) is transformed by the bbox_transform (the default
is Identity transform) and then interpreted as points in the pixel
coordinate (which is dpi dependent).
Thus, following three calls are identical and creates an inset axes
with respect to the *parent_axes*::
axins = inset_axes(parent_axes, "30%%", "40%%")
axins = inset_axes(parent_axes, "30%%", "40%%",
bbox_to_anchor=parent_axes.bbox)
axins = inset_axes(parent_axes, "30%%", "40%%",
bbox_to_anchor=(0, 0, 1, 1),
bbox_transform=parent_axes.transAxes)
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
width, height : float or str
Size of the inset axes to create. If a float is provided, it is
the size in inches, e.g. *width=1.3*. If a string is provided, it is
the size in relative units, e.g. *width='40%%'*. By default, i.e. if
neither *bbox_to_anchor* nor *bbox_transform* are specified, those
are relative to the parent_axes. Otherwise, they are to be understood
relative to the bounding box provided via *bbox_to_anchor*.
loc : str, default: 'upper right'
Location to place the inset axes. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center', 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
bbox_to_anchor : tuple or `~matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored to. If None,
a tuple of (0, 0, 1, 1) is used if *bbox_transform* is set
to *parent_axes.transAxes* or *parent_axes.figure.transFigure*.
Otherwise, *parent_axes.bbox* is used. If a tuple, can be either
[left, bottom, width, height], or [left, bottom].
If the kwargs *width* and/or *height* are specified in relative units,
the 2-tuple [left, bottom] cannot be used. Note that,
unless *bbox_transform* is set, the units of the bounding box
are interpreted in the pixel coordinate. When using *bbox_to_anchor*
with tuple, it almost always makes sense to also specify
a *bbox_transform*. This might often be the axes transform
*parent_axes.transAxes*.
bbox_transform : `~matplotlib.transforms.Transform`, optional
Transformation for the bbox that contains the inset axes.
If None, a `.transforms.IdentityTransform` is used. The value
of *bbox_to_anchor* (or the return value of its get_points method)
is transformed by the *bbox_transform* and then interpreted
as points in the pixel coordinate (which is dpi dependent).
You may provide *bbox_to_anchor* in some normalized coordinate,
and give an appropriate transform (e.g., *parent_axes.transAxes*).
axes_class : `~matplotlib.axes.Axes` type, default: `.HostAxes`
The type of the newly created inset axes.
axes_kwargs : dict, optional
Keyword arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes:kwdoc)s
borderpad : float, default: 0.5
Padding between inset axes and the bbox_to_anchor.
The units are axes font size, i.e. for a default font size of 10 points
*borderpad = 0.5* is equivalent to a padding of 5 points.
Returns
-------
inset_axes : *axes_class*
Inset axes object created.
"""
if (bbox_transform in [parent_axes.transAxes, parent_axes.figure.transFigure]
and bbox_to_anchor is None):
_api.warn_external("Using the axes or figure transform requires a "
"bounding box in the respective coordinates. "
"Using bbox_to_anchor=(0, 0, 1, 1) now.")
bbox_to_anchor = (0, 0, 1, 1)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
if (isinstance(bbox_to_anchor, tuple) and
(isinstance(width, str) or isinstance(height, str))):
if len(bbox_to_anchor) != 4:
raise ValueError("Using relative units for width or height "
"requires to provide a 4-tuple or a "
"`Bbox` instance to `bbox_to_anchor.")
return _add_inset_axes(
parent_axes, axes_class, axes_kwargs,
AnchoredSizeLocator(
bbox_to_anchor, width, height, loc=loc,
bbox_transform=bbox_transform, borderpad=borderpad))
@_docstring.dedent_interpd
def zoomed_inset_axes(parent_axes, zoom, loc='upper right',
bbox_to_anchor=None, bbox_transform=None,
axes_class=None, axes_kwargs=None,
borderpad=0.5):
"""
Create an anchored inset axes by scaling a parent axes. For usage, also see
:doc:`the examples </gallery/axes_grid1/inset_locator_demo2>`.
Parameters
----------
parent_axes : `~matplotlib.axes.Axes`
Axes to place the inset axes.
zoom : float
Scaling factor of the data axes. *zoom* > 1 will enlarge the
coordinates (i.e., "zoomed in"), while *zoom* < 1 will shrink the
coordinates (i.e., "zoomed out").
loc : str, default: 'upper right'
Location to place the inset axes. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center', 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
bbox_to_anchor : tuple or `~matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored to. If None,
*parent_axes.bbox* is used. If a tuple, can be either
[left, bottom, width, height], or [left, bottom].
If the kwargs *width* and/or *height* are specified in relative units,
the 2-tuple [left, bottom] cannot be used. Note that
the units of the bounding box are determined through the transform
in use. When using *bbox_to_anchor* it almost always makes sense to
also specify a *bbox_transform*. This might often be the axes transform
*parent_axes.transAxes*.
bbox_transform : `~matplotlib.transforms.Transform`, optional
Transformation for the bbox that contains the inset axes.
If None, a `.transforms.IdentityTransform` is used (i.e. pixel
coordinates). This is useful when not providing any argument to
*bbox_to_anchor*. When using *bbox_to_anchor* it almost always makes
sense to also specify a *bbox_transform*. This might often be the
axes transform *parent_axes.transAxes*. Inversely, when specifying
the axes- or figure-transform here, be aware that not specifying
*bbox_to_anchor* will use *parent_axes.bbox*, the units of which are
in display (pixel) coordinates.
axes_class : `~matplotlib.axes.Axes` type, default: `.HostAxes`
The type of the newly created inset axes.
axes_kwargs : dict, optional
Keyword arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes:kwdoc)s
borderpad : float, default: 0.5
Padding between inset axes and the bbox_to_anchor.
The units are axes font size, i.e. for a default font size of 10 points
*borderpad = 0.5* is equivalent to a padding of 5 points.
Returns
-------
inset_axes : *axes_class*
Inset axes object created.
"""
return _add_inset_axes(
parent_axes, axes_class, axes_kwargs,
AnchoredZoomLocator(
parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform,
borderpad=borderpad))
class _TransformedBboxWithCallback(TransformedBbox):
"""
Variant of `.TransformBbox` which calls *callback* before returning points.
Used by `.mark_inset` to unstale the parent axes' viewlim as needed.
"""
def __init__(self, *args, callback, **kwargs):
super().__init__(*args, **kwargs)
self._callback = callback
def get_points(self):
self._callback()
return super().get_points()
@_docstring.dedent_interpd
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
"""
Draw a box to mark the location of an area represented by an inset axes.
This function draws a box in *parent_axes* at the bounding box of
*inset_axes*, and shows a connection with the inset axes by drawing lines
at the corners, giving a "zoomed in" effect.
Parameters
----------
parent_axes : `~matplotlib.axes.Axes`
Axes which contains the area of the inset axes.
inset_axes : `~matplotlib.axes.Axes`
The inset axes.
loc1, loc2 : {1, 2, 3, 4}
Corners to use for connecting the inset axes and the area in the
parent axes.
**kwargs
Patch properties for the lines and box drawn:
%(Patch:kwdoc)s
Returns
-------
pp : `~matplotlib.patches.Patch`
The patch drawn to represent the area of the inset axes.
p1, p2 : `~matplotlib.patches.Patch`
The patches connecting two corners of the inset axes and its area.
"""
rect = _TransformedBboxWithCallback(
inset_axes.viewLim, parent_axes.transData,
callback=parent_axes._unstale_viewLim)
kwargs.setdefault("fill", bool({'fc', 'facecolor', 'color'}.intersection(kwargs)))
pp = BboxPatch(rect, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2 | PypiClean |
/torch_hd-5.3.0-py3-none-any.whl/torchhd/datasets/conn_bench_sonar_mines_rocks.py | from typing import List
from torchhd.datasets import DatasetFourFold
class ConnBenchSonarMinesRocks(DatasetFourFold):
"""`Connectionist Bench (Sonar, Mines vs. Rocks) <https://archive.ics.uci.edu/ml/datasets/connectionist+bench+(sonar,+mines+vs.+rocks)>`_ dataset.
.. list-table::
:widths: 10 10 10 10
:align: center
:header-rows: 1
* - Instances
- Attributes
- Task
- Area
* - 208
- 60
- Classification
- Physical
Args:
root (string): Root directory containing the files of the dataset.
train (bool, optional): If True, returns training (sub)set from the file storing training data as further determined by fold and hyper_search variables.
Otherwise returns a subset of train dataset if hypersearch is performed (``hyper_search = True``) if not (``hyper_search = False``) returns a subset of training dataset
as specified in ``conxuntos_kfold.dat`` if fold number is correct. Otherwise issues an error.
fold (int, optional): Specifies which fold number to use. The default value of -1 returns all the training data from the corresponding file.
Values between 0 and 3 specify, which fold in ``conxuntos_kfold.dat`` to use. Relevant only if hyper_search is set to False and ``0 <= fold <= 3``.
Indices in even rows (zero indexing) of ``conxuntos_kfold.dat`` correspond to train subsets while indices in odd rows correspond to test subsets.
hyper_search (bool, optional): If True, creates dataset using indeces in ``conxuntos.dat``. This split is used for hyperparameter search. The first row corresponds to train indices (used if ``train = True``)
while the second row corresponds to test indices (used if ``train = False``).
transform (callable, optional): A function/transform that takes in an torch.FloatTensor
and returns a transformed version.
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
name = "conn-bench-sonar-mines-rocks"
classes: List[str] = [
"Mine",
"Rock",
] | PypiClean |
/jupyter_rtc_test-0.0.4.tar.gz/jupyter_rtc_test-0.0.4/jupyter_rtc_test/rtc1/loaders.py |
from __future__ import annotations
import asyncio
from logging import Logger, getLogger
from typing import Any, Callable, Coroutine
from jupyter_server.services.contents.manager import (
AsyncContentsManager,
ContentsManager,
)
from jupyter_server.utils import ensure_async
from jupyter_server_fileid.manager import BaseFileIdManager
from .utils import OutOfBandChanges
class FileLoader:
"""
A class to centralize all the operation on a file.
"""
def __init__(
self,
file_id: str,
file_id_manager: BaseFileIdManager,
contents_manager: AsyncContentsManager | ContentsManager,
log: Logger | None = None,
poll_interval: float | None = None,
) -> None:
self._file_id: str = file_id
self._lock = asyncio.Lock()
self._poll_interval = poll_interval
self._file_id_manager = file_id_manager
self._contents_manager = contents_manager
self._log = log or getLogger(__name__)
self._subscriptions: dict[
str, Callable[[str, dict[str, Any]], Coroutine[Any, Any, None]]
] = {}
self._watcher = asyncio.create_task(self._watch_file()) if self._poll_interval else None
@property
def file_id(self) -> str:
"""File ID"""
return self._file_id
@property
def path(self) -> str:
"""
The file path.
"""
path = self._file_id_manager.get_path(self.file_id)
if path is None:
raise RuntimeError(f"No path found for file ID '{self.file_id}'")
return path
@property
def number_of_subscriptions(self) -> int:
"""
The number of rooms subscribed to this file.
"""
return len(self._subscriptions)
async def clean(self) -> None:
"""
Clean up the file.
Stops the watch task.
"""
if self._watcher is not None:
if not self._watcher.cancelled():
self._watcher.cancel()
await self._watcher
def observe(
self, id: str, callback: Callable[[str, dict[str, Any]], Coroutine[Any, Any, None]]
) -> None:
"""
Subscribe to the file to get notified on file changes.
Parameters:
id (str): Room ID
callback (Callable): Callback for notifying the room.
"""
self._subscriptions[id] = callback
def unobserve(self, id: str) -> None:
"""
Unsubscribe to the file.
Parameters:
id (str): Room ID
"""
del self._subscriptions[id]
async def load_content(self, format: str, file_type: str, content: bool) -> dict[str, Any]:
"""
Load the content of the file.
Parameters:
format (str): File format.
file_type (str): Content type.
content (bool): Whether to load the content or not.
Returns:
model (dict): A dictionary with the metadata and content of the file.
"""
async with self._lock:
return await ensure_async(
self._contents_manager.get(
self.path, format=format, type=file_type, content=content
)
)
async def save_content(self, model: dict[str, Any]) -> dict[str, Any]:
"""
Save the content of the file.
Parameters:
model (dict): A dictionary with format, type, last_modified, and content of the file.
Returns:
model (dict): A dictionary with the metadata and content of the file.
Raises:
OutOfBandChanges: if the file was modified at a latter time than the model
### Note:
If there is changes on disk, this method will raise an OutOfBandChanges exception.
"""
async with self._lock:
path = self.path
if model["type"] not in {"directory", "file", "notebook"}:
# fall back to file if unknown type, the content manager only knows
# how to handle these types
model["type"] = "file"
m = await ensure_async(
self._contents_manager.get(
path, format=model["format"], type=model["type"], content=False
)
)
if model["last_modified"] == m["last_modified"]:
self._log.info("Saving file: %s", path)
return await ensure_async(self._contents_manager.save(model, path))
else:
# file changed on disk, raise an error
raise OutOfBandChanges
async def _watch_file(self) -> None:
"""
Async task for watching a file.
"""
self._log.info("Watching file: %s", self.path)
if self._poll_interval is None:
return
while True:
try:
await asyncio.sleep(self._poll_interval)
try:
await self.notify()
except Exception as e:
self._log.error(f"Error watching file: {self.path}\n{e!r}", exc_info=e)
except asyncio.CancelledError:
break
async def notify(self) -> None:
"""
Notifies subscribed rooms about changes on the content of the file.
"""
async with self._lock:
path = self.path
# Get model metadata; format and type are not need
model = await ensure_async(self._contents_manager.get(path, content=False))
# Notify that the content changed on disk
for callback in self._subscriptions.values():
await callback("metadata", model)
class FileLoaderMapping:
"""Map rooms to file loaders."""
def __init__(
self,
settings: dict,
log: Logger | None = None,
file_poll_interval: float | None = None,
) -> None:
"""
Args:
settings: Server settings
log: [optional] Server log; default to local logger
file_poll_interval: [optional] Interval between room notification; default the loader won't poll
"""
self._settings = settings
self.__dict: dict[str, FileLoader] = {}
self.log = log or getLogger(__name__)
self.file_poll_interval = file_poll_interval
@property
def contents_manager(self) -> AsyncContentsManager | ContentsManager:
return self._settings["contents_manager"]
@property
def file_id_manager(self) -> BaseFileIdManager:
return self._settings["file_id_manager"]
def __contains__(self, file_id: str) -> bool:
"""Test if a file has a loader."""
return file_id in self.__dict
def __getitem__(self, file_id: str) -> FileLoader:
"""Get the loader for a given file.
If there is none, create one.
"""
path = self.file_id_manager.get_path(file_id)
# Instantiate the FileLoader if it doesn't exist yet
file = self.__dict.get(file_id)
if file is None:
self.log.info("Creating FileLoader for: %s", path)
file = FileLoader(
file_id,
self.file_id_manager,
self.contents_manager,
self.log,
self.file_poll_interval,
)
self.__dict[file_id] = file
return file
async def __delitem__(self, file_id: str) -> None:
"""Delete a loader for a given file."""
await self.remove(file_id)
async def clear(self) -> None:
"""Clear all loaders."""
tasks = []
for id in list(self.__dict):
loader = self.__dict.pop(id)
tasks.append(loader.clean())
await asyncio.gather(*tasks)
async def remove(self, file_id: str) -> None:
"""Remove the loader for a given file."""
loader = self.__dict.pop(file_id)
await loader.clean() | PypiClean |
/ScopeFoundry-1.2.2.tar.gz/ScopeFoundry-1.2.2/plugin_manager/templates/_readout.py | import pyqtgraph as pg
from pyqtgraph.dockarea import DockArea
from qtpy.QtWidgets import QVBoxLayout, QHBoxLayout, QWidget
from ScopeFoundry import Measurement, h5_io
class $READOUT_CLASS_NAME(Measurement):
name = "$READOUT_NAME"
def setup(self):
S = self.settings
S.New("save_h5", bool, initial=True)
# just making a guess of data structure here to test if display works
self.data = {
"y": [2, 5, 2]
}
def setup_figure(self):
self.ui = DockArea()
widget = QWidget()
layout = QVBoxLayout(widget)
self.plot_dock = self.ui.addDock(
name=self.name, widget=widget, position='right')
# setup measurement controlls
cb_layout = QHBoxLayout()
layout.addLayout(cb_layout)
cb_layout.addWidget(self.settings.New_UI(exclude=('activation', 'run_state', 'profile', )))
cb_layout.addWidget(self.settings.activation.new_pushButton())
# setup a plot
graph_layout = pg.GraphicsLayoutWidget(border=(100, 100, 100))
layout.addWidget(graph_layout)
self.plot = graph_layout.addPlot(title=self.name)
self.plot_lines = {}
self.plot_lines["y"] = self.plot.plot(pen="g")
# setup hw controls widget
hw = self.app.hardware["$HW_NAME"]
self.ui.addDock(name="$HW_NAME",
widget=hw.new_control_widgets(),
#position='below'
)
self.update_display()
def update_display(self):
self.plot_lines["y"].setData(self.data['y'])
def run(self):
print(self.__class__, 'run method not implemented')
# hw = self.app.hardware["$HW_NAME"]
# self.data = {"y": []}
# self.data['y'] = hw.dev.read_data()
if self.settings['save_h5']:
self.save_h5_data()
def save_h5_data(self):
h5_file = h5_io.h5_base_file(app=self.app, measurement=self)
h5_meas_group = h5_io.h5_create_measurement_group(self, h5_file)
for k, v in self.data.items():
h5_meas_group[k] = v
h5_file.close() | PypiClean |
/pulumi_google_native-0.31.2a1689827148.tar.gz/pulumi_google_native-0.31.2a1689827148/pulumi_google_native/iam/v1/get_workforce_pool_provider.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWorkforcePoolProviderResult',
'AwaitableGetWorkforcePoolProviderResult',
'get_workforce_pool_provider',
'get_workforce_pool_provider_output',
]
@pulumi.output_type
class GetWorkforcePoolProviderResult:
def __init__(__self__, attribute_condition=None, attribute_mapping=None, description=None, disabled=None, display_name=None, name=None, oidc=None, saml=None, state=None):
if attribute_condition and not isinstance(attribute_condition, str):
raise TypeError("Expected argument 'attribute_condition' to be a str")
pulumi.set(__self__, "attribute_condition", attribute_condition)
if attribute_mapping and not isinstance(attribute_mapping, dict):
raise TypeError("Expected argument 'attribute_mapping' to be a dict")
pulumi.set(__self__, "attribute_mapping", attribute_mapping)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if disabled and not isinstance(disabled, bool):
raise TypeError("Expected argument 'disabled' to be a bool")
pulumi.set(__self__, "disabled", disabled)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if oidc and not isinstance(oidc, dict):
raise TypeError("Expected argument 'oidc' to be a dict")
pulumi.set(__self__, "oidc", oidc)
if saml and not isinstance(saml, dict):
raise TypeError("Expected argument 'saml' to be a dict")
pulumi.set(__self__, "saml", saml)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> str:
"""
A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo` and `google.display_name` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` "'admins' in google.groups" ```
"""
return pulumi.get(self, "attribute_condition")
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> Mapping[str, str]:
"""
Maps attributes from the authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. This is a required field and the mapped subject cannot exceed 127 bytes. * `google.groups`: Groups the authenticating user belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. * `google.display_name`: The name of the authenticated user. This is an optional field and the mapped display name cannot exceed 100 bytes. If not set, `google.subject` will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.profile_photo`: The URL that specifies the authenticated user's thumbnail photo. This is an optional field. When set, the image will be visible as the user's profile picture. If not set, a generic user icon will be displayed instead. This attribute cannot be referenced in IAM bindings. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where {custom_attribute} is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workforce pool to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/locations/global/workforcePools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language] (https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 4KB. For OIDC providers, you must supply a custom mapping that includes the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token: ``` {"google.subject": "assertion.sub"} ```
"""
return pulumi.get(self, "attribute_mapping")
@property
@pulumi.getter
def description(self) -> str:
"""
A user-specified description of the provider. Cannot exceed 256 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def disabled(self) -> bool:
"""
Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A user-specified display name for the provider. Cannot exceed 32 characters.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the provider. Format: `locations/{location}/workforcePools/{workforce_pool_id}/providers/{provider_id}`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def oidc(self) -> 'outputs.GoogleIamAdminV1WorkforcePoolProviderOidcResponse':
"""
An OpenId Connect 1.0 identity provider configuration.
"""
return pulumi.get(self, "oidc")
@property
@pulumi.getter
def saml(self) -> 'outputs.GoogleIamAdminV1WorkforcePoolProviderSamlResponse':
"""
A SAML identity provider configuration.
"""
return pulumi.get(self, "saml")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the provider.
"""
return pulumi.get(self, "state")
class AwaitableGetWorkforcePoolProviderResult(GetWorkforcePoolProviderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkforcePoolProviderResult(
attribute_condition=self.attribute_condition,
attribute_mapping=self.attribute_mapping,
description=self.description,
disabled=self.disabled,
display_name=self.display_name,
name=self.name,
oidc=self.oidc,
saml=self.saml,
state=self.state)
def get_workforce_pool_provider(location: Optional[str] = None,
provider_id: Optional[str] = None,
workforce_pool_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkforcePoolProviderResult:
"""
Gets an individual WorkforcePoolProvider.
"""
__args__ = dict()
__args__['location'] = location
__args__['providerId'] = provider_id
__args__['workforcePoolId'] = workforce_pool_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('google-native:iam/v1:getWorkforcePoolProvider', __args__, opts=opts, typ=GetWorkforcePoolProviderResult).value
return AwaitableGetWorkforcePoolProviderResult(
attribute_condition=pulumi.get(__ret__, 'attribute_condition'),
attribute_mapping=pulumi.get(__ret__, 'attribute_mapping'),
description=pulumi.get(__ret__, 'description'),
disabled=pulumi.get(__ret__, 'disabled'),
display_name=pulumi.get(__ret__, 'display_name'),
name=pulumi.get(__ret__, 'name'),
oidc=pulumi.get(__ret__, 'oidc'),
saml=pulumi.get(__ret__, 'saml'),
state=pulumi.get(__ret__, 'state'))
@_utilities.lift_output_func(get_workforce_pool_provider)
def get_workforce_pool_provider_output(location: Optional[pulumi.Input[str]] = None,
provider_id: Optional[pulumi.Input[str]] = None,
workforce_pool_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkforcePoolProviderResult]:
"""
Gets an individual WorkforcePoolProvider.
"""
... | PypiClean |
/smart_home_tng-2023.1.3.tar.gz/smart_home_tng-2023.1.3/smart_home_tng/components/media_source/upload_media_view.py | import logging
import pathlib
import shutil
import typing
import voluptuous as vol
from aiohttp import web, web_request
from ... import core
from .const import Const
from .local_source import LocalSource
from .media_source_item import MediaSourceItem
if not typing.TYPE_CHECKING:
class MediaSourceComponent:
...
if typing.TYPE_CHECKING:
from .media_source_component import MediaSourceComponent
_MAX_UPLOAD_SIZE: typing.Final = 1024 * 1024 * 10
_LOGGER: typing.Final = logging.getLogger(__name__)
_SCHEMA: typing.Final = vol.Schema(
{
"media_content_id": str,
"file": web_request.FileField,
}
)
# pylint: disable=unused-variable
class UploadMediaView(core.SmartHomeControllerView):
"""View to upload images."""
def __init__(self, owner: MediaSourceComponent, source: LocalSource) -> None:
"""Initialize the media view."""
url = "/api/media_source/local_source/upload"
name = "api:media_source:local_source:upload"
super().__init__(url, name)
self._owner = owner
self._source = source
async def post(self, request: web.Request) -> web.Response:
"""Handle upload."""
if not request[core.Const.KEY_SHC_USER].is_admin:
raise core.Unauthorized()
# Increase max payload
request._client_max_size = _MAX_UPLOAD_SIZE # pylint: disable=protected-access
try:
data = _SCHEMA(dict(await request.post()))
except vol.Invalid as err:
_LOGGER.error(f"Received invalid upload data: {err}")
raise web.HTTPBadRequest() from err
try:
item = MediaSourceItem.from_uri(self._owner, data["media_content_id"], None)
except ValueError as err:
_LOGGER.error(f"Received invalid upload data: {err}")
raise web.HTTPBadRequest() from err
try:
source_dir_id, location = self._source.async_parse_identifier(item)
except core.Unresolvable as err:
_LOGGER.error("Invalid local source ID")
raise web.HTTPBadRequest() from err
uploaded_file: web_request.FileField = data["file"]
if not uploaded_file.content_type.startswith(Const.VALID_CONTENT_TYPES):
_LOGGER.error("Content type not allowed")
raise vol.Invalid("Only images and video are allowed")
try:
core.helpers.raise_if_invalid_filename(uploaded_file.filename)
except ValueError as err:
_LOGGER.error("Invalid filename")
raise web.HTTPBadRequest() from err
try:
await self._owner.controller.async_add_executor_job(
self._move_file,
self._source.async_full_path(source_dir_id, location),
uploaded_file,
)
except ValueError as err:
_LOGGER.error(f"Moving upload failed: {err}")
raise web.HTTPBadRequest() from err
return self.json(
{"media_content_id": f"{data['media_content_id']}/{uploaded_file.filename}"}
)
def _move_file(
self, target_dir: pathlib.Path, uploaded_file: web_request.FileField
) -> None:
"""Move file to target."""
if not target_dir.is_dir():
raise ValueError("Target is not an existing directory")
target_path = target_dir / uploaded_file.filename
target_path.relative_to(target_dir)
core.helpers.raise_if_invalid_path(str(target_path))
with target_path.open("wb") as target_fp:
shutil.copyfileobj(uploaded_file.file, target_fp) | PypiClean |
/tw2.ace-0.2.1.tar.gz/tw2.ace-0.2.1/tw2/ace/static/ace/theme-mono_industrial.js | ace.define('ace/theme/mono_industrial', ['require', 'exports', 'module' , 'ace/lib/dom'], function(require, exports, module) {
exports.isDark = true;
exports.cssClass = "ace-mono-industrial";
exports.cssText = ".ace-mono-industrial .ace_gutter {\
background: #1d2521;\
color: #C5C9C9\
}\
.ace-mono-industrial .ace_print-margin {\
width: 1px;\
background: #555651\
}\
.ace-mono-industrial .ace_scroller {\
background-color: #222C28\
}\
.ace-mono-industrial .ace_text-layer {\
color: #FFFFFF\
}\
.ace-mono-industrial .ace_cursor {\
border-left: 2px solid #FFFFFF\
}\
.ace-mono-industrial .ace_overwrite-cursors .ace_cursor {\
border-left: 0px;\
border-bottom: 1px solid #FFFFFF\
}\
.ace-mono-industrial .ace_marker-layer .ace_selection {\
background: rgba(145, 153, 148, 0.40)\
}\
.ace-mono-industrial.ace_multiselect .ace_selection.ace_start {\
box-shadow: 0 0 3px 0px #222C28;\
border-radius: 2px\
}\
.ace-mono-industrial .ace_marker-layer .ace_step {\
background: rgb(102, 82, 0)\
}\
.ace-mono-industrial .ace_marker-layer .ace_bracket {\
margin: -1px 0 0 -1px;\
border: 1px solid rgba(102, 108, 104, 0.50)\
}\
.ace-mono-industrial .ace_marker-layer .ace_active-line {\
background: rgba(12, 13, 12, 0.25)\
}\
.ace-mono-industrial .ace_gutter-active-line {\
background-color: rgba(12, 13, 12, 0.25)\
}\
.ace-mono-industrial .ace_marker-layer .ace_selected-word {\
border: 1px solid rgba(145, 153, 148, 0.40)\
}\
.ace-mono-industrial .ace_invisible {\
color: rgba(102, 108, 104, 0.50)\
}\
.ace-mono-industrial .ace_string {\
background-color: #151C19;\
color: #FFFFFF\
}\
.ace-mono-industrial .ace_keyword,\
.ace-mono-industrial .ace_meta {\
color: #A39E64\
}\
.ace-mono-industrial .ace_constant,\
.ace-mono-industrial .ace_constant.ace_character,\
.ace-mono-industrial .ace_constant.ace_character.ace_escape,\
.ace-mono-industrial .ace_constant.ace_numeric,\
.ace-mono-industrial .ace_constant.ace_other {\
color: #E98800\
}\
.ace-mono-industrial .ace_entity.ace_name.ace_function,\
.ace-mono-industrial .ace_keyword.ace_operator,\
.ace-mono-industrial .ace_variable {\
color: #A8B3AB\
}\
.ace-mono-industrial .ace_invalid {\
color: #FFFFFF;\
background-color: rgba(153, 0, 0, 0.68)\
}\
.ace-mono-industrial .ace_support.ace_constant {\
color: #C87500\
}\
.ace-mono-industrial .ace_fold {\
background-color: #A8B3AB;\
border-color: #FFFFFF\
}\
.ace-mono-industrial .ace_support.ace_function {\
color: #588E60\
}\
.ace-mono-industrial .ace_entity.ace_name,\
.ace-mono-industrial .ace_support.ace_class,\
.ace-mono-industrial .ace_support.ace_type {\
color: #5778B6\
}\
.ace-mono-industrial .ace_storage {\
color: #C23B00\
}\
.ace-mono-industrial .ace_variable.ace_language,\
.ace-mono-industrial .ace_variable.ace_parameter {\
color: #648BD2\
}\
.ace-mono-industrial .ace_comment {\
color: #666C68;\
background-color: #151C19\
}\
.ace-mono-industrial .ace_entity.ace_other.ace_attribute-name {\
color: #909993\
}\
.ace-mono-industrial .ace_markup.ace_underline {\
text-decoration: underline\
}\
.ace-mono-industrial .ace_entity.ace_name.ace_tag {\
color: #A65EFF\
}\
.ace-mono-industrial .ace_indent-guide {\
background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAEklEQVQImWNQ0tH4zzBz5sz/ABAOBECKH+evAAAAAElFTkSuQmCC) right repeat-y\
}\
";
var dom = require("../lib/dom");
dom.importCssString(exports.cssText, exports.cssClass);
}); | PypiClean |
/model-porter-4.1.1.tar.gz/model-porter-4.1.1/src/model_porter/model_porter.py | import importlib
import sys
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from .repository import assign_key_value_to_instance
from .support_mixin import ModelPorterSupportMixin
from .config import ModelPorterConfig
mod = sys.modules[__name__]
class WagtailFunctions:
@staticmethod
def lookup_wagtail_locale(*, specifier, context):
# from wagtail.models.i18n import Locale
# from wagtail.coreutils import get_supported_content_language_variant
lang = get_supported_content_language_variant(specifier) # noqa
result = Locale.objects.get(language_code=lang) # noqa
return result
@staticmethod
def locate_page_for_path(path, site=None):
if site is None:
site = Site.objects.get(is_default_site=True) # noqa
if not path:
return site.root_page
page = site.root_page
path_components = path.split("/")
while path_components:
child_slug = path_components[0]
path_components = path_components[1:]
if not child_slug:
continue
# find a matching child or 404
try:
page = page.get_children().get(slug=child_slug)
except Page.DoesNotExist: # noqa
return None
return page
@staticmethod
def create_page(*, model, context, parent):
content_type = ContentType.objects.get_for_model(model)
# depth = parent.depth + 1
# index = parent.numchild + 1
# path = parent.path + "{:04d}".format(index)
page = model(
content_type=content_type,
locale=parent.locale
)
return page
@staticmethod
def finalise_page(*, instance, context, parent, extra_content=None):
if extra_content:
block_streams = extra_content.get("@unpack_block_streams", [])
# for identifier in block_streams:
# stream_block_def = getattr(instance.__class__, identifier).field.stream_block
# raw_stream = extra_content[identifier]
# extra_content[identifier] = unpack_block_stream(context=context, stream_block_def=stream_block_def, raw_stream=raw_stream)
if isinstance(instance, ModelPorterSupportMixin):
extra_content = instance.from_repository(extra_content, context)
for key, value in extra_content.items():
if key.startswith("@"):
continue
assign_key_value_to_instance(instance, key, value, context, True)
parent.add_child(instance=instance)
return instance
def object_from_identifier(*, django_label, identifier, identifier_field="identifier"):
Model = apps.get_model(django_label)
kwargs = {
identifier_field: identifier
}
result = Model.objects.get(**kwargs)
return result
def unpack_block_stream(*, context, stream_block_def, raw_stream):
result = []
for stream_item in raw_stream:
block_type = stream_item["type"]
value = stream_item["value"]
item_block_def = stream_block_def.child_blocks.get(block_type, None)
if item_block_def is None:
continue
if isinstance(item_block_def, ModelPorterSupportMixin):
value = item_block_def.from_repository(value, context)
result.append({"type": block_type, "value": value})
return result
try:
wagtail = importlib.import_module("wagtail")
from wagtail.models.i18n import Locale
from wagtail.coreutils import get_supported_content_language_variant
from wagtail.models import Site, Page
setattr(mod, 'Locale', Locale)
setattr(mod, 'get_supported_content_language_variant', get_supported_content_language_variant)
setattr(mod, 'Site', Site)
setattr(mod, 'Page', Page)
except ModuleNotFoundError:
wagtail = None
class BuiltinModelPorterConfig(ModelPorterConfig):
def __init__(self, app_label, module):
super(BuiltinModelPorterConfig, self).__init__(app_label, module)
if wagtail:
takes_no_context = ['locate_page_for_path']
for name, fn in WagtailFunctions.__dict__.items():
if name.startswith('__'):
continue
takes_context = name not in takes_no_context
self.register_function_action(fn.__func__, context_argument='context' if takes_context else None)
self.register_function_action(object_from_identifier)
self.register_function_action(unpack_block_stream, context_argument='context') | PypiClean |
/xotl.ql-0.7.0.tar.gz/xotl.ql-0.7.0/docs/source/api/core.rst | .. _query-lang:
=====================================
`xotl.ql.core`:mod: -- The core API
=====================================
.. module:: xotl.ql.core
The module `xotl.ql.core`:mod: provide the high level API for obtaining a
`query object`:term: from a `query expression`:term:.
.. data:: this
This is an object whose meaning is the *entire universe of objects* when
used as a generator inside a query expression. Its precise semantics
varies with the `object model`:term:. The documentation of `query
translators <query translator>`:term: must give the precise meaning of this
object.
.. autofunction:: get_query_object
This function expects a `query expression`:term: in the form of a generator
object and returns an object that complies with the interface
`xotl.ql.interfaces.QueryObject`:class:.
:param query: The query expression.
:param query_type: An object which complies with the interface
`xotl.ql.interfaces.QueryObjectType`:class: or the fully
qualified name of such an object.
:param frame_type: An object with complies with the interface
`xotl.ql.interfaces.FrameType`:class: or the fully
qualified name of such an object.
This function works by inspecting the byte-code of the generator object to
obtain the `Query Syntax Tree`:term:. This function uses the attribute
`gi_frame` of the generator to build the frame object needed by query
objects.
Nested sub-queries are not expanded automatically::
>>> from xotl.ql.core import this, get_query_object
>>> query = get_query_object(y for y in (x for x in this))
>>> print(query.qst)
<ast: Expression>
body: <ast: GeneratorExp>
elt: <ast: Name>
id: 'y'
ctx: <ast: Load>
generators[0]: <ast: comprehension>
target: <ast: Name>
id: 'y'
ctx: <ast: Store>
iter: <ast: Name>
id: '.0'
ctx: <ast: Load>
The sub-query ``(x for x in this)`` is simply encoded as a variable '.0'.
If no `frame_type` is provided, use the attribute
`~xotl.ql.interfaces.QueryObjectType.frame_type`:attr: of the query object
type.
Additional keyword arguments are passed unchanged when instantiating the
query object.
.. autofunction:: normalize_query
.. class:: QueryObject(qst, frame, **kwargs)
A query object implementation.
Instances of this class implement the interface
`xotl.ql.interfaces.QueryObject`:class: and this class itself complies with
`xotl.ql.interfaces.QueryObjectType`:class:.
.. class:: Frame(locals, globals)
Instances of this class implement the interface
`xotl.ql.interfaces.Frame`:class: and the class itself complies with
`xotl.ql.interface.FrameType`:class:.
The `f_locals` and `f_globals` are immutable mapping views that support all
the `collections.Mapping` interface.
In order to support for the view concept to work we keep a references to
the original `locals` and `globals`.
.. rubric:: Additional attributes and methods:
.. attribute:: auto_expand_subqueries
When trying to get the name '.0' from either view, if the current value
is a generator object obtained via a generator expression, we actually
return the result of calling `get_query_object`:func: on the current
value.
You may suppress this behavior by setting this attribute to False. The
default is True.
.. warning:: Notice this will use the default query object type and
frame type.
Example::
>>> from xotl.ql.core import this, get_query_object
>>> query = get_query_object(y for y in (x for x in this))
>>> query.locals['.0'] # doctest: +ELLIPSIS
<xotl.ql.core.QueryObject...>
.. autofunction:: thesefy(target, make_subquery=True)
| PypiClean |
/insights-engine-1.0.14.tar.gz/insights-engine-1.0.14/insights_engine/api/groups_api.py | import re # noqa: F401
import sys # noqa: F401
from insights_engine.api_client import ApiClient, Endpoint as _Endpoint
from insights_engine.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from insights_engine.model.add_members_input import AddMembersInput
from insights_engine.model.add_members_output import AddMembersOutput
from insights_engine.model.create_group_input import CreateGroupInput
from insights_engine.model.create_group_output import CreateGroupOutput
from insights_engine.model.delete_group_members_input import DeleteGroupMembersInput
from insights_engine.model.get_all_admin_groups_output import GetAllAdminGroupsOutput
from insights_engine.model.get_group_admins_output import GetGroupAdminsOutput
from insights_engine.model.get_group_members_ouput import GetGroupMembersOuput
class GroupsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.add_members_endpoint = _Endpoint(
settings={
'response_type': (AddMembersOutput,),
'auth': [
'Authorization'
],
'endpoint_path': '/group/{group_name}/member',
'operation_id': 'add_members',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
'group_name',
'add_members_input',
],
'required': [
'admin_user_name',
'app_name',
'group_name',
'add_members_input',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
'group_name':
(str,),
'add_members_input':
(AddMembersInput,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
'group_name': 'group_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
'group_name': 'path',
'add_members_input': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/plain'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.create_group_endpoint = _Endpoint(
settings={
'response_type': (CreateGroupOutput,),
'auth': [
'Authorization'
],
'endpoint_path': '/group',
'operation_id': 'create_group',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
'create_group_input',
],
'required': [
'admin_user_name',
'app_name',
'create_group_input',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
'create_group_input':
(CreateGroupInput,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
'create_group_input': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/plain'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_all_admin_groups_endpoint = _Endpoint(
settings={
'response_type': (str,),
'auth': [
'Authorization'
],
'endpoint_path': '/group',
'operation_id': 'delete_all_admin_groups',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
],
'required': [
'admin_user_name',
'app_name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain'
],
'content_type': [],
},
api_client=api_client
)
self.delete_group_endpoint = _Endpoint(
settings={
'response_type': (str,),
'auth': [
'Authorization'
],
'endpoint_path': '/group/{group_name}',
'operation_id': 'delete_group',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
'group_name',
],
'required': [
'admin_user_name',
'app_name',
'group_name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
'group_name':
(str,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
'group_name': 'group_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
'group_name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain'
],
'content_type': [],
},
api_client=api_client
)
self.delete_group_members_endpoint = _Endpoint(
settings={
'response_type': (str,),
'auth': [
'Authorization'
],
'endpoint_path': '/group/{group_name}/member',
'operation_id': 'delete_group_members',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
'group_name',
'delete_group_members_input',
],
'required': [
'admin_user_name',
'app_name',
'group_name',
'delete_group_members_input',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
'group_name':
(str,),
'delete_group_members_input':
(DeleteGroupMembersInput,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
'group_name': 'group_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
'group_name': 'path',
'delete_group_members_input': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.get_all_admin_groups_endpoint = _Endpoint(
settings={
'response_type': (GetAllAdminGroupsOutput,),
'auth': [
'Authorization'
],
'endpoint_path': '/group',
'operation_id': 'get_all_admin_groups',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
],
'required': [
'admin_user_name',
'app_name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/plain'
],
'content_type': [],
},
api_client=api_client
)
self.get_group_admins_endpoint = _Endpoint(
settings={
'response_type': (GetGroupAdminsOutput,),
'auth': [
'Authorization'
],
'endpoint_path': '/group/{group_name}',
'operation_id': 'get_group_admins',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
'group_name',
],
'required': [
'admin_user_name',
'app_name',
'group_name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
'group_name':
(str,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
'group_name': 'group_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
'group_name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/plain'
],
'content_type': [],
},
api_client=api_client
)
self.get_group_members_endpoint = _Endpoint(
settings={
'response_type': (GetGroupMembersOuput,),
'auth': [
'Authorization'
],
'endpoint_path': '/group/{group_name}/member',
'operation_id': 'get_group_members',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'admin_user_name',
'app_name',
'group_name',
],
'required': [
'admin_user_name',
'app_name',
'group_name',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'admin_user_name':
(str,),
'app_name':
(str,),
'group_name':
(str,),
},
'attribute_map': {
'admin_user_name': 'admin_user_name',
'app_name': 'app_name',
'group_name': 'group_name',
},
'location_map': {
'admin_user_name': 'query',
'app_name': 'query',
'group_name': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/plain'
],
'content_type': [],
},
api_client=api_client
)
def add_members(
self,
admin_user_name,
app_name,
group_name,
add_members_input,
**kwargs
):
"""Add members to group # noqa: E501
Adds a set of registered users/members to a group that already exists # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_members(admin_user_name, app_name, group_name, add_members_input, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
group_name (str): This is the name of the group that contains a collection of users. It is used for getting insights for all the users in the group.
add_members_input (AddMembersInput):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
AddMembersOutput
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
kwargs['group_name'] = \
group_name
kwargs['add_members_input'] = \
add_members_input
return self.add_members_endpoint.call_with_http_info(**kwargs)
def create_group(
self,
admin_user_name,
app_name,
create_group_input,
**kwargs
):
"""Create Admin Group # noqa: E501
Create a new group for this admin only. \\\"members\\\" can be optionally supplied in the body to create an empty or filled group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_group(admin_user_name, app_name, create_group_input, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
create_group_input (CreateGroupInput):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CreateGroupOutput
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
kwargs['create_group_input'] = \
create_group_input
return self.create_group_endpoint.call_with_http_info(**kwargs)
def delete_all_admin_groups(
self,
admin_user_name,
app_name,
**kwargs
):
"""Delete Admin's Groups # noqa: E501
Delete all the groups for this admin user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_all_admin_groups(admin_user_name, app_name, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
return self.delete_all_admin_groups_endpoint.call_with_http_info(**kwargs)
def delete_group(
self,
admin_user_name,
app_name,
group_name,
**kwargs
):
"""Delete a group # noqa: E501
Completely delete the group, group_name, belonging to this admin_user_name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_group(admin_user_name, app_name, group_name, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
group_name (str): This is the name of the group that contains a collection of users. It is used for getting insights for all the users in the group.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
kwargs['group_name'] = \
group_name
return self.delete_group_endpoint.call_with_http_info(**kwargs)
def delete_group_members(
self,
admin_user_name,
app_name,
group_name,
delete_group_members_input,
**kwargs
):
"""Delete Group members # noqa: E501
Removes the provided set of members from the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_group_members(admin_user_name, app_name, group_name, delete_group_members_input, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
group_name (str): This is the name of the group that contains a collection of users. It is used for getting insights for all the users in the group.
delete_group_members_input (DeleteGroupMembersInput):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
kwargs['group_name'] = \
group_name
kwargs['delete_group_members_input'] = \
delete_group_members_input
return self.delete_group_members_endpoint.call_with_http_info(**kwargs)
def get_all_admin_groups(
self,
admin_user_name,
app_name,
**kwargs
):
"""Fetch Admins Groups # noqa: E501
Gets all the groups belonging to the specified admin user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_admin_groups(admin_user_name, app_name, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetAllAdminGroupsOutput
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
return self.get_all_admin_groups_endpoint.call_with_http_info(**kwargs)
def get_group_admins(
self,
admin_user_name,
app_name,
group_name,
**kwargs
):
"""Fetch admins for group name # noqa: E501
Gets all the admins that have a group with this group name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_admins(admin_user_name, app_name, group_name, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
group_name (str): This is the name of the group that contains a collection of users. It is used for getting insights for all the users in the group.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetGroupAdminsOutput
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
kwargs['group_name'] = \
group_name
return self.get_group_admins_endpoint.call_with_http_info(**kwargs)
def get_group_members(
self,
admin_user_name,
app_name,
group_name,
**kwargs
):
"""Fetch group members # noqa: E501
Fetch all the members of that group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_members(admin_user_name, app_name, group_name, async_req=True)
>>> result = thread.get()
Args:
admin_user_name (str): This user can create groups and can view group insights
app_name (str): This is the name of a downstream application. Each unique value of this parameter signifies a partition of the Insight Engine, including the utilized Generators and Users.
group_name (str): This is the name of the group that contains a collection of users. It is used for getting insights for all the users in the group.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetGroupMembersOuput
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['admin_user_name'] = \
admin_user_name
kwargs['app_name'] = \
app_name
kwargs['group_name'] = \
group_name
return self.get_group_members_endpoint.call_with_http_info(**kwargs) | PypiClean |
/teenstudy-0.2.1-py3-none-any.whl/TeenStudy/web/pages/addArea.py | from amis import InputText, Form, DisplayModeEnum, Alert, LevelEnum, Select, PageSchema
from amis import Page, ActionType, Dialog, Card, Tpl, Switch, CardsCRUD
detail_form = Form(
title='',
api='put:/TeenStudy/api/change?user_id=${user_id}',
submitText='保存修改',
mode=DisplayModeEnum.horizontal,
labelAlign='right',
body=[
Select(
label="通知群聊",
name="group_id",
description="大学习提醒群号",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='${group_id}',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
InputText(label='性别', name='gender', value='${gender}', required=True,
trimContents=True, clearable=True,
showCounter=True, maxLength=3, visibleOn="${gender==null?false:true}",
description='性别'),
InputText(label='uid|nid', name='dxx_id', value='${dxx_id}', required=True,
trimContents=True, clearable=True,
showCounter=True, maxLength=24, visibleOn="${dxx_id==null?false:true}",
description='大学习认证ID,不清楚请勿改动'),
InputText(label='手机号|学号', name='mobile', value='${mobile}', required=False,
trimContents=True, clearable=True,
showCounter=True, maxLength=24, visibleOn="${mobile==null?false:true}",
description='手机号'),
InputText(label='团支书ID', name='leader', value='${leader}',
showCounter=True, maxLength=10, hiddenOn=True, trimContents=True,
clearable=True,
description='团支书ID,填写后团支书可操作提交功能,不清楚请勿改动'),
InputText(label='openid', name='openid', value='${openid}', required=True,
trimContents=True, clearable=True,
showCounter=True, maxLength=64, visibleOn="${openid==null?false:true}",
description='微信认证ID,不清楚请勿改动'),
InputText(label='登录密码', name='Password', value='', type="input-password",
trimContents=True, clearable=True,
showCounter=True, maxLength=16, visibleOn="${password==null?false:true}",
description='登录Web UI的密码'),
InputText(label='学校类型', name='university_type', value='${university_type}',
showCounter=True, maxLength=16, required=True, trimContents=True,
clearable=True,
visibleOn="${university_type==null?false:true}",
description='学校类型,不清楚请勿改动'),
InputText(label='学校ID', name='university_id', value='${university_id}',
required=True, trimContents=True, clearable=True,
showCounter=True, maxLength=24,
visibleOn="${university_id==null?false:true}",
description='学校ID,不清楚请勿改动'),
InputText(label='学校名称', name='university', value='${university}', required=True,
trimContents=True, clearable=True,
showCounter=True, maxLength=20, visibleOn="${university==null?false:true}",
description='学校名称'),
InputText(label='学院ID', name='college_id', value='${college_id}', required=False,
trimContents=True, clearable=True,
showCounter=True, maxLength=24, visibleOn="${college_id==null?false:true}",
description='学院ID'),
InputText(label='学院名称', name='college', value='${college}',
trimContents=True, clearable=True,
showCounter=True, maxLength=24, visibleOn="${college==null?false:true}",
description='学院名称'),
InputText(label='团支部ID', name='organization_id', value='${organization_id}',
required=True, trimContents=True, clearable=True,
showCounter=True, maxLength=24,
visibleOn="${organization_id==null?false:true}",
description='团支部ID'),
InputText(label='团支部名称', name='organization', value='${organization}',
required=False, trimContents=True, clearable=True,
showCounter=True, maxLength=36,
visibleOn="${organization==null?false:true}",
description='团支部名称'),
InputText(label='token', name='token', value='${token}', required=True,
trimContents=True, clearable=True,
showCounter=True, visibleOn="${token==null?false:true}",
description='提交大学习需要的token'),
InputText(label='cookie', name='cookie', value='${cookie}', required=True,
trimContents=True, clearable=True,
showCounter=True, visibleOn="${cookie==null?false:true}",
description='提交大学习需要的cookie')
])
detail_button = ActionType.Dialog(label='信息',
tooltip='查看|修改信息',
size='lg',
icon='fa fa-user-tag text-primary',
dialog=Dialog(title='${name}的详细信息', size='lg', body=[detail_form]))
card = Card(
header=Card.Header(title='$name',
subTitle='$user_id',
description='$catalogue',
avatarText='${area}',
avatarTextClassName='overflow-hidden'),
actions=[detail_button, ActionType.Ajax(
label="提交",
tooltip='提交大学习',
icon='fa fa-check text-success',
confirmText='是否提交最新一期青年大学习?',
api='get:/TeenStudy/api/commit?user_id=${user_id}&area=${area}'
), ActionType.Ajax(
tooltip='删除',
label="删除",
icon='fa fa-trash-can text-danger',
confirmText='删除该用户',
api='delete:/TeenStudy/api/delete_member?user_id=${user_id}'
), ],
toolbar=[
Tpl(tpl='$area', className='label label-warning', hiddenOn=True),
Switch(name='auto_submit',
value='${auto_submit}',
tooltip='自动提交大学习开关',
onText='开启',
offText='关闭',
onEvent={
'change': {
'actions': {
'actionType': 'ajax',
'args': {
'api': {
'url': '/TeenStudy/api/set_auto_submit',
'method': 'put'
},
'messages': {
'success': '自动提交已设置为${event.data.value==true?"开启":"关闭"}',
'failed': '修改失败!'
},
'status': '${event.data.value}',
'id': '${id}'
}
}
}
})
])
"""成员卡片面板"""
cards_curd = CardsCRUD(mode='cards',
title='',
syncLocation=False,
name="member",
fetchFailed="数据初始化!",
api='get:/TeenStudy/api/get_members',
loadDataOnce=True,
interval=180000,
source='${rows | filter:user_id:keywords_user_id | filter:name:keywords_name|filter:area:keywords_area|filter:university:keywords_university|filter:college:keywords_college|filter:organization:keywords_organization}',
filter={
'body': [
InputText(name='keywords_user_id', label='用户ID',
trimContents=True, clearable=True,
submitOnChange=True),
InputText(name='keywords_name', label='用户姓名',
trimContents=True, clearable=True,
submitOnChange=True),
InputText(name='keywords_area', label='地区',
trimContents=True, clearable=True,
submitOnChange=True),
InputText(name='keywords_university', label='学校',
trimContents=True, clearable=True,
submitOnChange=True),
InputText(name='keywords_college', label='学院',
trimContents=True, clearable=True,
submitOnChange=True),
InputText(name='keywords_organization', label='团支部',
trimContents=True, clearable=True,
submitOnChange=True),
]
},
perPage=16,
autoJumpToTopOnPagerChange=True,
placeholder='暂无大学习用户',
footerToolbar=['switch-per-page', 'pagination'],
columnsCount=4,
card=card)
list_page = PageSchema(url='/TeenStudy/list', icon='fa fa-list-ul', label='成员列表',
schema=Page(title='成员列表', body=[cards_curd]))
"""湖北添加成员面板"""
hubei_table = Form(
title="添加青春湖北用户",
submitText="添加",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/add",
resetAfterSubmit=True,
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="湖北",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="对应青春湖北个人信息页 您的姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="用户编号",
description="对应青春湖北个人信息页 用户编号",
name="dxx_id",
inline=False,
required=True,
value="",
clearable=True,
maxLength=9
),
InputText(
label="学校",
description="对应青春湖北填写信息页 高校",
name="university",
inline=False,
required=True,
value="",
clearable=True,
maxLength=24
),
InputText(
label="学院",
description="对应青春湖北填写信息页 院系",
name="college",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
),
InputText(
label="团支部",
description="对应青春湖北填写信息页 选择组织",
name="organization",
inline=False,
required=False,
value="",
clearable=True,
maxLength=32
)
]
)
"""江西添加成员面板"""
jiangxi_table = Form(
title="添加江西共青团用户",
submitText="添加",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/add",
resetAfterSubmit=True,
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="this.group_id==''?true:false"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="江西",
disabled=True
),
InputText(
label="用户编号",
description="团组织ID,无需填写",
name="dxx_id",
inline=False,
required=True,
value="${IF(ISEMPTY(organization),SPLIT(college,'-')[1],SPLIT(organization,'-')[1])}",
disabled=True
),
Select(
type="select",
label="学校类型",
name="university_type",
searchable=True,
required=True,
clearable=True,
options=[
{'label': "团省委机关", "value": "团省委机关-N0017"},
{'label': "省直属单位团委", "value": "省直属单位团委-N0016"},
{'label': "省属本科院校团委", "value": "省属本科院校团委-N0013"},
{'label': "非省属本科院校团委", "value": "非省属本科院校团委-N0014"},
{'label': "高职专科院校团委", "value": "高职专科院校团委-N0015"},
{'label': "南昌市", "value": "南昌市-N0002"},
{'label': "九江市", "value": "九江市-N0003"},
{'label': "景德镇市", "value": "景德镇市-N0004"},
{'label': "萍乡市", "value": "萍乡市-N0005"},
{'label': "新余市", "value": "新余市-N0006"},
{'label': "鹰潭市", "value": "鹰潭市-N0007"},
{'label': "赣州市", "value": "赣州市-N0008"},
{'label': "宜春市", "value": "宜春市-N0009"},
{'label': "上饶市", "value": "上饶市-N0010"},
{'label': "吉安市", "value": "吉安市-N0011"},
{'label': "抚州市", "value": "抚州市-N0012"}
]
),
Select(
type="select",
label="学校名称",
name="university",
value="${IF(ISEMPTY(university_type),university,'')}",
searchable=True,
required=True,
clearable=True,
source={
"method": "get",
"url": "/TeenStudy/api/organization?pid=${SPLIT(university_type,'-')[1]}",
"sendOn": "this.university_type!==''"
}, hiddenOn="this.university_type===''|| this.university_type===undefined"
),
Select(
type="select",
label="学院名称",
name="college",
value="${IF(ISEMPTY(university),college,'')}",
searchable=True,
required=True,
clearable=True,
source={
"method": "get",
"url": "/TeenStudy/api/organization?pid=${SPLIT(university,'-')[1]}",
"sendOn": "this.university!==''"
},
hiddenOn="this.university_type==='' || this.university===''||this.university_type===undefined || this.university===undefined"
),
Select(
type="select",
label="团支部",
description="团支部名称,对应江西共青团个人修改信息页 班级/团支部",
name="organization",
value="${IF(ISEMPTY(college),organization,'')}",
searchable=True,
required=False,
clearable=True,
source={
"method": "get",
"url": "/TeenStudy/api/organization?pid=${SPLIT(college,'-')[1]}",
"sendOn": "this.college!==''"
},
hiddenOn="this.university_type===''||this.university===''||this.college===''||this.university_type===undefined||this.university===undefined||this.college===undefined"
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="手机号/学号",
description="对应江西共青团个人修改信息页 手机号/学号,空着不用填",
name="mobile",
inline=False,
required=False,
value="",
clearable=True,
maxLength=11
),
InputText(
label="姓名",
description="对应江西共青团个人修改信息页 真实姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
)
]
)
"""安徽添加成员面板"""
anhui_table = Form(
title="添加安徽共青团用户",
submitText="添加",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/anhui/add",
resetAfterSubmit=True,
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="安徽",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="url",
description="个人信息修改页,点右上角分享,复制链接填入即可 链接格式:http://dxx.ahyouth.org.cn/modify/?tk=您的token值",
name="url",
inline=False,
required=True,
value="",
clearable=True,
maxLength=128
)
]
)
"""四川添加成员面板"""
sichuan_table = Form(
title="添加天府新青年用户",
submitText="添加",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/sichuan/add",
resetAfterSubmit=True,
body=[
Alert(level=LevelEnum.info,
className='white-space-pre-wrap',
body=(
"该地区需要自行抓包填入\ntoken值在https://dxx.scyol.com/api/wechat/login 响应里\n其余信息在 https://dxx.scyol.com/api/student/showStudyStageOrg?id=xxxxxx&stageId=xx 响应里")),
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="四川",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="对应抓包内容 name",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="token",
description="自行抓包获取,在:https://dxx.scyol.com/api/wechat/login 链接的响应内容里",
name="token",
inline=False,
required=True,
value="",
clearable=True,
),
InputText(
label="手机号",
description="自行抓包获取,对应tel",
name="mobile",
inline=False,
required=True,
value="",
clearable=True,
),
InputText(
label="整体组织ID",
description="自行抓包获取,对应org",
name="org",
inline=False,
required=True,
value="",
clearable=True,
),
InputText(
label="组织ID",
description="自行抓包获取,对应lastOrg",
name="lastOrg",
inline=False,
required=True,
value="",
clearable=True,
),
InputText(
label="团支部名称",
description="自行抓包获取,对应 orgName",
name="orgName",
inline=False,
required=True,
value="",
clearable=True,
), InputText(
label="组织全称",
description="自行抓包获取,对应allOrgName",
name="allOrgName",
inline=False,
required=True,
value="",
clearable=True,
)
]
)
"""山东添加成员面板"""
shandong_table = Form(
title="添加青春山东用户",
sunmitText="添加",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/shandong/add",
resetAfterSubmit=True,
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="山东",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="对应青春山东个人信息页 您的姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="cookie",
description="自行抓包获取,结构为:JSESSIONID=1873FXXXXXXXX5DFCBF1CC13703",
name="cookie",
inline=False,
required=True,
value="",
clearable=True,
),
InputText(
label="openid",
description="自行抓包获取,结构为:ohz9xxxxxxxxxxxxlF0Io0uCnM",
name="openid",
inline=False,
required=True,
value="",
clearable=True,
)
]
)
"""重庆添加成员面板"""
chongqing_table = Form(
title="添加重庆共青团用户",
sunmitText="添加",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/chongqing/add",
resetAfterSubmit=True,
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="重庆",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="对应重庆共青团个人信息页 您的姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="openid",
description="自行抓包获取,结构为: ohz9xxxxxxxxxxxxlF0Io0uCnM",
name="openid",
inline=False,
required=True,
value="",
clearable=True,
)
]
)
"""吉林添加成员面板"""
jilin_table = Form(
title="添加吉青飞扬用户",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/jilin/add",
redirect="/TeenStudy/login",
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="吉林",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="您的姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="openid",
description="自行抓包获取,结构为: ohz9xxxxxxxxxxxxlF0Io0uCnM",
name="openid",
inline=False,
required=True,
value="",
clearable=True,
),
InputText(
label="学校",
description="你就读的高校",
name="university",
inline=False,
required=True,
value="",
clearable=True,
maxLength=24
),
InputText(
label="学院",
description="学院名称",
name="college",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
),
InputText(
label="团支部",
description="团支部|班级,没有可不填",
name="organization",
inline=False,
required=False,
value="",
clearable=True,
maxLength=32
)
]
)
"""广东地区添加用户"""
guangdong_table = Form(
title="添加广东共青团用户",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/guangdong/add",
redirect="/TeenStudy/login",
body=[
Alert(level=LevelEnum.info,
className='white-space-pre-wrap',
body=(
"链接获取方式:\n12355青春之声公众号\n智慧团建-认证资料-生成电子团员证,点击最下方生成按钮。\n在团员证页面复制链接 应为:https://tuan.12355.net/wechat/view/information/member_certification_generated.html?memberId=xxxxxx&showMemberAdditionNames=&showMemberRewardIds=&isShowAllFee=true \n其中xxxxxx即为mid")),
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="广东",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="您的姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="url",
description="链接格式:https://tuan.12355.net/wechat/view/information/member_certification_generated.html?memberId=xxxxxx&showMemberAdditionNames=&showMemberRewardIds=&isShowAllFee=true",
name="url",
inline=False,
required=True,
value="",
clearable=True,
maxLength=512
),
InputText(
label="学校",
description="你就读的高校",
name="university",
inline=False,
required=True,
value="",
clearable=True,
maxLength=24
),
InputText(
label="学院",
description="学院名称",
name="college",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
),
InputText(
label="团支部",
description="团支部|班级,没有可不填",
name="organization",
inline=False,
required=False,
value="",
clearable=True,
maxLength=32
)]
)
"""北京地区添加用户面板"""
beijing_table = Form(
title="添加北京共青团用户",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/beijing/add",
redirect="/TeenStudy/login",
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="北京",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="您的姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
), InputText(
label="大学习ID",
description="名字后面括号内的数字",
name="dxx_id",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="学校",
description="你就读的高校",
name="university",
inline=False,
required=True,
value="",
clearable=True,
maxLength=24
),
InputText(
label="学院",
description="学院名称",
name="college",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
),
InputText(
label="团支部",
description="团支部|班级,没有可不填",
name="organization",
inline=False,
required=False,
value="",
clearable=True,
maxLength=32
),
InputText(
label="团支部ID",
description="支部后面括号内的数字",
name="organization_id",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
),
InputText(
label="账号",
description="登录北京共青团的账号",
name="cookie",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
), InputText(
label="密码",
description="登陆北京共青团的密码",
name="token",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
)
]
)
"""天津地区添加用户"""
tianjin_table = Form(
title="添加津彩青春用户",
mode=DisplayModeEnum.horizontal,
api="post:/TeenStudy/api/tianjin/add",
redirect="/TeenStudy/login",
body=[
Select(
label="群聊",
name="group_id",
description="需要添加的群组",
checkAll=False,
source="get:/TeenStudy/api/get_group_list",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
),
Select(
label="用户ID",
name="user_id",
description="需要添加的用户ID",
checkAll=False,
source="get:/TeenStudy/api/get_member_list?group_id=${group_id}",
value='',
multiple=False,
required=True,
searchable=True,
joinValues=False,
extractValue=True,
statistics=True,
hiddenOn="${group_id==''?true:false}"
),
InputText(
label="地区",
description="所处省份",
name="area",
value="天津",
disabled=True
),
InputText(
label="登录密码",
type='input-password',
description="可不填,默认为用户ID",
name="password",
inline=False,
required=False,
value="",
clearable=True,
maxLength=16
),
InputText(
label="姓名",
description="您的姓名",
name="name",
inline=False,
required=True,
value="",
clearable=True,
maxLength=8
),
InputText(
label="学校",
description="你就读的高校",
name="university",
inline=False,
required=True,
value="",
clearable=True,
maxLength=24
),
InputText(
label="学院",
description="学院名称",
name="college",
inline=False,
required=True,
value="",
clearable=True,
maxLength=32
),
InputText(
label="团支部",
description="团支部|班级,没有可不填",
name="organization",
inline=False,
required=False,
value="",
clearable=True,
maxLength=32
),
InputText(
label="cookie",
description="自行抓包获取,结构为:JSESSIONID=1873FXXXXXXXX5DFCBF1CC13703",
name="cookie",
inline=False,
required=True,
value="",
clearable=True,
),
]
)
hubei_page = PageSchema(url='/TeenStudy/add/hubei', icon='fa fa-pen-to-square', label='青春湖北',
schema=Page(title='青春湖北', body=[hubei_table]))
jiangxi_page = PageSchema(url='/TeenStudy/add/jiangxi', icon='fa fa-pen-to-square', label='江西共青团',
schema=Page(title='江西共青团', body=[jiangxi_table]))
anhui_page = PageSchema(url='/TeenStudy/add/anhui', icon='fa fa-pen-to-square', label='安徽共青团',
schema=Page(title='安徽共青团', body=[anhui_table]))
sichuan_page = PageSchema(url='/TeenStudy/add/sichuan', icon='fa fa-pen-to-square', label='天府新青年',
schema=Page(title='天府新青年', body=[sichuan_table]))
shandong_page = PageSchema(url='/TeenStudy/add/shandong', icon='fa fa-pen-to-square', label='青春山东',
schema=Page(title='青春山东', body=[shandong_table]))
chongqing_page = PageSchema(url='/TeenStudy/add/chongqing', icon='fa fa-pen-to-square', label='重庆共青团',
schema=Page(title='重庆共青团', body=[chongqing_table]))
jilin_page = PageSchema(url='/TeenStudy/add/jilin', icon='fa fa-pen-to-square', label='吉青飞扬',
schema=Page(title='吉青飞扬', body=[jilin_table]))
guangdong_page = PageSchema(url='/TeenStudy/add/guangdong', icon='fa fa-pen-to-square', label='广东共青团',
schema=Page(title='广东共青团', body=[guangdong_table]))
beijing_page = PageSchema(url='/TeenStudy/add/beijing', icon='fa fa-pen-to-square', label='北京共青团',
schema=Page(title='北京共青团', body=[beijing_table]))
tianjin_page = PageSchema(url='/TeenStudy/add/tianjin', icon='fa fa-pen-to-square', label='津彩青春',
schema=Page(title='津彩青春', body=[tianjin_table]))
areaPage = [list_page,
hubei_page, jiangxi_page, anhui_page,
sichuan_page, shandong_page, chongqing_page, jilin_page, guangdong_page, beijing_page,
tianjin_page
] | PypiClean |
/resoto-plugin-cleanup-aws-alarms-3.6.5.tar.gz/resoto-plugin-cleanup-aws-alarms-3.6.5/resoto_plugin_cleanup_aws_alarms/__init__.py | from resotolib.baseplugin import BaseActionPlugin
from resotolib.core.search import CoreGraph
from resotolib.graph import Graph
from resoto_plugin_aws.resource.cloudwatch import AwsCloudwatchAlarm
from resoto_plugin_aws.resource.ec2 import AwsEc2Instance
from resotolib.logger import log
from resotolib.config import Config
from .config import CleanupAWSAlarmsConfig
from typing import Dict
class CleanupAWSAlarmsPlugin(BaseActionPlugin):
action = "cleanup_plan"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = {}
if Config.plugin_cleanup_aws_alarms.enabled:
self.config = Config.plugin_cleanup_aws_alarms.config
def bootstrap(self) -> bool:
return Config.plugin_cleanup_aws_alarms.enabled
def do_action(self, data: Dict) -> None:
cg = CoreGraph(tls_data=self.tls_data)
query = "is(aws_cloudwatch_alarm) <-default,delete[0:]delete->"
graph = cg.graph(query)
self.alarm_cleanup(graph)
cg.patch_nodes(graph)
def alarm_cleanup(self, graph: Graph):
log.info("AWS Cloudwatch Alarms cleanup called")
for node in graph.nodes:
if node.protected or not isinstance(node, AwsCloudwatchAlarm):
continue
cloud = node.cloud(graph)
account = node.account(graph)
region = node.region(graph)
log_prefix = f"Found {node.rtdname} in cloud {cloud.name} account {account.dname} " f"region {region.name}."
if len(self.config) > 0:
if cloud.id not in self.config or account.id not in self.config[cloud.id]:
log.debug((f"{log_prefix} Account not found in config - ignoring."))
continue
should_clean = False
i = None
log_msg = log_prefix
for dimension in node.dimensions:
if dimension.get("Name") == "InstanceId":
instance_id = dimension.get("Value")
i = graph.search_first_all({"kind": "aws_ec2_instance", "id": instance_id})
if isinstance(i, AwsEc2Instance) and i.instance_status not in ("terminated"):
should_clean = False
break
else:
should_clean = True
log_msg += f" Referenced EC2 instance {instance_id} not found."
if not should_clean:
continue
log.debug(f"{log_msg} - cleaning alarm")
node.clean = True
@staticmethod
def add_config(config: Config) -> None:
config.add_config(CleanupAWSAlarmsConfig) | PypiClean |
/qgis-deployment-toolbelt-0.26.0.tar.gz/qgis-deployment-toolbelt-0.26.0/qgis_deployment_toolbelt/cli.py | # #############################################################################
# ########## Libraries #############
# ##################################
# Standard library
import argparse
import logging
import sys
# submodules
from qgis_deployment_toolbelt.__about__ import (
__author__,
__summary__,
__title__,
__title_clean__,
__uri_homepage__,
__version__,
)
from qgis_deployment_toolbelt.commands import parser_main_deployment, parser_upgrade
from qgis_deployment_toolbelt.utils.journalizer import configure_logger
# #############################################################################
# ########## Globals ###############
# ##################################
# ############################################################################
# ########## FUNCTIONS ###########
# ################################
def add_common_arguments(parser_to_update: argparse.ArgumentParser):
"""Apply common argument to an existing parser.
Args:
parser_to_update (argparse.ArgumentParser): parser to which arguments need to be added
Returns:
argparse.ArgumentParser: parser with added options
"""
parser_to_update.add_argument(
"-v",
"--verbose",
action="count",
default=1,
dest="verbosity",
help="Niveau de verbosité : None = WARNING, -v = INFO, -vv = DEBUG",
)
return parser_to_update
def set_default_subparser(
parser_to_update: argparse.ArgumentParser,
default_subparser_name: str,
args: list = None,
):
"""Set a default subparser to a parent parser. Call after setup and just before
parse_args().
See: <https://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand>
Args:
parser_to_update (argparse.ArgumentParser): parent parser to add
default_subparser_name (str): name of the subparser to call by default
args (list, optional): if set is the argument list handed to parse_args().
Defaults to None.
"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in [
"-h",
"--help",
"--version",
"--no-logfile",
]: # ignore main parser args
break
else:
for x in parser_to_update._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, default_subparser_name)
else:
args.insert(0, default_subparser_name)
# ############################################################################
# ########## MAIN ################
# ################################
def main(in_args: list[str] = None):
"""Main CLI entrypoint.
Args:
in_args (List[str], optional): list of command-line arguments. Defaults to None.
"""
# create the top-level parser
main_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f"Developed by: {__author__}\nDocumentation: {__uri_homepage__}",
description=f"{__title__} {__version__} - {__summary__}",
argument_default=argparse.SUPPRESS,
)
# -- ROOT ARGUMENTS --
# Optional verbosity counter (eg. -v, -vv, -vvv, etc.)
main_parser.add_argument(
"-v",
"--verbose",
action="count",
default=1,
dest="verbosity",
help="Verbosity level. None = WARNING, -v = INFO, -vv = DEBUG. Can be set with "
"QDT_LOGS_LEVEL environment variable and logs location with QDT_LOGS_DIR.",
)
main_parser.add_argument(
"--no-logfile",
default=True,
action="store_false",
dest="opt_logfile_disabled",
help="Disable log file. Log files are usually created, rotated and stored in the"
"folder set by QDT_LOGS_DIR.",
)
main_parser.add_argument(
"--version",
action="version",
version=__version__,
help="Display CLI version",
)
# -- SUB-COMMANDS --
subparsers = main_parser.add_subparsers(title="Sub-commands", dest="command")
# Main logic
subcmd_deployment = subparsers.add_parser(
"deploy",
help="QDT's main logic: run the deployment's scenario.",
formatter_class=main_parser.formatter_class,
prog="deployment",
)
add_common_arguments(subcmd_deployment)
parser_main_deployment(subcmd_deployment)
# Upgrader
subcmd_upgrade = subparsers.add_parser(
"upgrade",
aliases=["auto-update", "update"],
help="Check if a new version of QDT is available and download it locally.",
formatter_class=main_parser.formatter_class,
prog="upgrade",
)
add_common_arguments(subcmd_upgrade)
parser_upgrade(subcmd_upgrade)
# -- PARSE ARGS --
set_default_subparser(parser_to_update=main_parser, default_subparser_name="deploy")
# just get passed args
args = main_parser.parse_args(in_args)
# log configuration
if args.opt_logfile_disabled:
configure_logger(
verbosity=args.verbosity, logfile=f"{__title_clean__}_{__version__}.log"
)
else:
configure_logger(verbosity=args.verbosity)
# add the handler to the root logger
logger = logging.getLogger(__title_clean__)
logger.debug(f"Log level set: {logging.getLevelName(args.verbosity)}")
# -- RUN LOGIC --
if hasattr(args, "func"):
args.func(args)
# #############################################################################
# ##### Stand alone program ########
# ##################################
if __name__ == "__main__":
"""Standalone execution."""
# launch cli
main() # required by unittest | PypiClean |
/nifstd-tools-0.0.7.tar.gz/nifstd-tools-0.0.7/nifstd_tools/hbp_parc_output.py | import subprocess
from pathlib import Path
from collections import defaultdict
import rdflib
from ttlser import natsort
from pyontutils.core import qname, makeGraph
from pyontutils.utils import TermColors as tc
from pyontutils.namespaces import NIFRID, ilxtr
from pyontutils.combinators import restriction, annotation
from pyontutils.closed_namespaces import owl, rdf, rdfs, skos
current_file = Path(__file__).absolute()
gitf = current_file.parent.parent.parent
def labelkey(line):
label, *rest = line.split('|', 1)
return natsort(label)
def edkey(line):
ed, label, *rest = line.split('|', 2)
return natsort(ed + ' ' + label)
def main():
for filename in ('mbaslim', 'hbaslim', 'paxinos-rat-labels', 'waxholm-rat-labels'):
filepath = gitf / 'NIF-Ontology/ttl/generated/parcellation' / (filename + '.ttl')
dir_ = filepath.parent.as_posix()
print(dir_)
file_commit = subprocess.check_output(['git', 'log', '-n', '1',
'--pretty=format:%H', '--',
filepath.name],
cwd=dir_,
stderr=subprocess.DEVNULL).decode().rstrip()
graph = rdflib.Graph().parse(filepath.as_posix(), format='ttl')
g = makeGraph('', graph=graph)
annos = defaultdict(set)
anno_trips = defaultdict(set)
for triple, predicate_objects in annotation.parse(graph=graph):
for a_p, a_o in predicate_objects:
annos[a_p, a_o].add(triple)
anno_trips[triple].add((a_p, a_o))
anno_trips = {k:v for k, v in anno_trips.items()}
for lifted_triple in restriction.parse(graph=graph):
graph.add(lifted_triple)
out_header = 'label|abbrev|curie|superPart curie\n'
out = []
editions_header = 'edition|label|abbrev|curie\n'
editions = []
for s in graph.subjects(rdf.type, owl.Class):
rdfsLabel = next(graph.objects(s, rdfs.label))
try:
prefLabel = next(graph.objects(s, skos.prefLabel))
except StopIteration:
print(tc.red('WARNING:'), f'skipping {s} {rdfsLabel} since it has no prefLabel')
continue
syns = sorted(graph.objects(s, NIFRID.synonym)) # TODO are there cases where we need to recaptulate what we are doing for for abbrevs?
abbrevs = sorted(graph.objects(s, NIFRID.abbrev)) # FIXME paxinos has more than one
try:
if annos:
if len(abbrevs) > 1:
print(tc.blue('INFO:'), g.qname(s), repr(prefLabel.value), 'has multiple abbrevs', [a.value for a in abbrevs])
# prefer latest
current_edition = ''
for a in abbrevs:
for a_p, edition in anno_trips[s, NIFRID.abbrev, a]:
if a_p == ilxtr.literalUsedBy:
if current_edition < edition:
current_edition = edition
abbrev = a
else:
abbrev = abbrevs[0]
except IndexError:
abbrev = ''
try:
superPart = next(graph.objects(s, ilxtr.labelPartOf))
except StopIteration:
superPart = ''
out.append(f'{prefLabel}|{abbrev}|{g.qname(s)}|{g.qname(superPart)}')
if annos:
#asdf = {'ed':{'label':,'abbrev':,'curie':}}
asdf = defaultdict(dict)
triple = s, skos.prefLabel, prefLabel
eds = anno_trips[triple]
for a_p, a_o in eds:
asdf[a_o]['curie'] = g.qname(s)
asdf[a_o]['label'] = prefLabel
for syn in graph.objects(s, NIFRID.synonym):
triple = s, NIFRID.synonym, syn
eds = anno_trips[triple]
for a_p, a_o in eds:
asdf[a_o]['curie'] = g.qname(s)
if 'label' in asdf[a_o]:
print(tc.red('WARNING:'), f'{a_o} already has a label "{asdf[a_o]["label"]}" for "{syn}"')
asdf[a_o]['label'] = syn
for abbrev in graph.objects(s, NIFRID.abbrev):
triple = s, NIFRID.abbrev, abbrev
eds = anno_trips[triple]
#print('aaaaaaaaaaa', g.qname(s), )
for a_p, a_o in eds:
asdf[a_o]['curie'] = g.qname(s)
if 'abbrev' in asdf[a_o]:
print(tc.red('WARNING:'), f'{a_o} already has a abbrev "{asdf[a_o]["abbrev"]}" for "{abbrev}"')
asdf[a_o]['abbrev'] = abbrev
#print(asdf)
for ed, kwargs in sorted(asdf.items()):
if 'abbrev' not in kwargs:
print('Skipping', ed, 'for\n', kwargs)
continue
editions.append('{ed}|{label}|{abbrev}|{curie}'.format(ed=g.qname(ed), **kwargs))
with open('/tmp/' + filename + f'-{file_commit[:8]}.psv', 'wt') as f:
f.write(out_header + '\n'.join(sorted(out, key=labelkey)))
if editions:
with open('/tmp/' + filename + f'-editions-{file_commit[:8]}.psv', 'wt') as f:
f.write(editions_header + '\n'.join(sorted(editions, key=edkey)))
if __name__ == '__main__':
main() | PypiClean |
/sendbird_platform_sdk-0.0.16-py3-none-any.whl/sendbird_platform_sdk/model/add_hms_push_configuration_response_push_configurations_inner.py | import re # noqa: F401
import sys # noqa: F401
from sendbird_platform_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from sendbird_platform_sdk.exceptions import ApiAttributeError
class AddHmsPushConfigurationResponsePushConfigurationsInner(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'push_type': (str,), # noqa: E501
'huawei_app_id': (str,), # noqa: E501
'huawei_app_secret': (str,), # noqa: E501
'push_sound': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'push_type': 'push_type', # noqa: E501
'huawei_app_id': 'huawei_app_id', # noqa: E501
'huawei_app_secret': 'huawei_app_secret', # noqa: E501
'push_sound': 'push_sound', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AddHmsPushConfigurationResponsePushConfigurationsInner - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
push_type (str): [optional] # noqa: E501
huawei_app_id (str): [optional] # noqa: E501
huawei_app_secret (str): [optional] # noqa: E501
push_sound (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AddHmsPushConfigurationResponsePushConfigurationsInner - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
push_type (str): [optional] # noqa: E501
huawei_app_id (str): [optional] # noqa: E501
huawei_app_secret (str): [optional] # noqa: E501
push_sound (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/django-tastypie-swagger-ng-0.1.3.tar.gz/django-tastypie-swagger-ng-0.1.3/tastypie_swagger/static/tastypie_swagger/js/lib/backbone-min.js |
// (c) 2010-2012 Jeremy Ashkenas, DocumentCloud Inc.
// Backbone may be freely distributed under the MIT license.
// For all details and documentation:
// http://backbonejs.org
(function(){var l=this,y=l.Backbone,z=Array.prototype.slice,A=Array.prototype.splice,g;g="undefined"!==typeof exports?exports:l.Backbone={};g.VERSION="0.9.2";var f=l._;!f&&"undefined"!==typeof require&&(f=require("underscore"));var i=l.jQuery||l.Zepto||l.ender;g.setDomLibrary=function(a){i=a};g.noConflict=function(){l.Backbone=y;return this};g.emulateHTTP=!1;g.emulateJSON=!1;var p=/\s+/,k=g.Events={on:function(a,b,c){var d,e,f,g,j;if(!b)return this;a=a.split(p);for(d=this._callbacks||(this._callbacks=
{});e=a.shift();)f=(j=d[e])?j.tail:{},f.next=g={},f.context=c,f.callback=b,d[e]={tail:g,next:j?j.next:f};return this},off:function(a,b,c){var d,e,h,g,j,q;if(e=this._callbacks){if(!a&&!b&&!c)return delete this._callbacks,this;for(a=a?a.split(p):f.keys(e);d=a.shift();)if(h=e[d],delete e[d],h&&(b||c))for(g=h.tail;(h=h.next)!==g;)if(j=h.callback,q=h.context,b&&j!==b||c&&q!==c)this.on(d,j,q);return this}},trigger:function(a){var b,c,d,e,f,g;if(!(d=this._callbacks))return this;f=d.all;a=a.split(p);for(g=
z.call(arguments,1);b=a.shift();){if(c=d[b])for(e=c.tail;(c=c.next)!==e;)c.callback.apply(c.context||this,g);if(c=f){e=c.tail;for(b=[b].concat(g);(c=c.next)!==e;)c.callback.apply(c.context||this,b)}}return this}};k.bind=k.on;k.unbind=k.off;var o=g.Model=function(a,b){var c;a||(a={});b&&b.parse&&(a=this.parse(a));if(c=n(this,"defaults"))a=f.extend({},c,a);b&&b.collection&&(this.collection=b.collection);this.attributes={};this._escapedAttributes={};this.cid=f.uniqueId("c");this.changed={};this._silent=
{};this._pending={};this.set(a,{silent:!0});this.changed={};this._silent={};this._pending={};this._previousAttributes=f.clone(this.attributes);this.initialize.apply(this,arguments)};f.extend(o.prototype,k,{changed:null,_silent:null,_pending:null,idAttribute:"id",initialize:function(){},toJSON:function(){return f.clone(this.attributes)},get:function(a){return this.attributes[a]},escape:function(a){var b;if(b=this._escapedAttributes[a])return b;b=this.get(a);return this._escapedAttributes[a]=f.escape(null==
b?"":""+b)},has:function(a){return null!=this.get(a)},set:function(a,b,c){var d,e;f.isObject(a)||null==a?(d=a,c=b):(d={},d[a]=b);c||(c={});if(!d)return this;d instanceof o&&(d=d.attributes);if(c.unset)for(e in d)d[e]=void 0;if(!this._validate(d,c))return!1;this.idAttribute in d&&(this.id=d[this.idAttribute]);var b=c.changes={},h=this.attributes,g=this._escapedAttributes,j=this._previousAttributes||{};for(e in d){a=d[e];if(!f.isEqual(h[e],a)||c.unset&&f.has(h,e))delete g[e],(c.silent?this._silent:
b)[e]=!0;c.unset?delete h[e]:h[e]=a;!f.isEqual(j[e],a)||f.has(h,e)!=f.has(j,e)?(this.changed[e]=a,c.silent||(this._pending[e]=!0)):(delete this.changed[e],delete this._pending[e])}c.silent||this.change(c);return this},unset:function(a,b){(b||(b={})).unset=!0;return this.set(a,null,b)},clear:function(a){(a||(a={})).unset=!0;return this.set(f.clone(this.attributes),a)},fetch:function(a){var a=a?f.clone(a):{},b=this,c=a.success;a.success=function(d,e,f){if(!b.set(b.parse(d,f),a))return!1;c&&c(b,d)};
a.error=g.wrapError(a.error,b,a);return(this.sync||g.sync).call(this,"read",this,a)},save:function(a,b,c){var d,e;f.isObject(a)||null==a?(d=a,c=b):(d={},d[a]=b);c=c?f.clone(c):{};if(c.wait){if(!this._validate(d,c))return!1;e=f.clone(this.attributes)}a=f.extend({},c,{silent:!0});if(d&&!this.set(d,c.wait?a:c))return!1;var h=this,i=c.success;c.success=function(a,b,e){b=h.parse(a,e);if(c.wait){delete c.wait;b=f.extend(d||{},b)}if(!h.set(b,c))return false;i?i(h,a):h.trigger("sync",h,a,c)};c.error=g.wrapError(c.error,
h,c);b=this.isNew()?"create":"update";b=(this.sync||g.sync).call(this,b,this,c);c.wait&&this.set(e,a);return b},destroy:function(a){var a=a?f.clone(a):{},b=this,c=a.success,d=function(){b.trigger("destroy",b,b.collection,a)};if(this.isNew())return d(),!1;a.success=function(e){a.wait&&d();c?c(b,e):b.trigger("sync",b,e,a)};a.error=g.wrapError(a.error,b,a);var e=(this.sync||g.sync).call(this,"delete",this,a);a.wait||d();return e},url:function(){var a=n(this,"urlRoot")||n(this.collection,"url")||t();
return this.isNew()?a:a+("/"==a.charAt(a.length-1)?"":"/")+encodeURIComponent(this.id)},parse:function(a){return a},clone:function(){return new this.constructor(this.attributes)},isNew:function(){return null==this.id},change:function(a){a||(a={});var b=this._changing;this._changing=!0;for(var c in this._silent)this._pending[c]=!0;var d=f.extend({},a.changes,this._silent);this._silent={};for(c in d)this.trigger("change:"+c,this,this.get(c),a);if(b)return this;for(;!f.isEmpty(this._pending);){this._pending=
{};this.trigger("change",this,a);for(c in this.changed)!this._pending[c]&&!this._silent[c]&&delete this.changed[c];this._previousAttributes=f.clone(this.attributes)}this._changing=!1;return this},hasChanged:function(a){return!arguments.length?!f.isEmpty(this.changed):f.has(this.changed,a)},changedAttributes:function(a){if(!a)return this.hasChanged()?f.clone(this.changed):!1;var b,c=!1,d=this._previousAttributes,e;for(e in a)if(!f.isEqual(d[e],b=a[e]))(c||(c={}))[e]=b;return c},previous:function(a){return!arguments.length||
!this._previousAttributes?null:this._previousAttributes[a]},previousAttributes:function(){return f.clone(this._previousAttributes)},isValid:function(){return!this.validate(this.attributes)},_validate:function(a,b){if(b.silent||!this.validate)return!0;var a=f.extend({},this.attributes,a),c=this.validate(a,b);if(!c)return!0;b&&b.error?b.error(this,c,b):this.trigger("error",this,c,b);return!1}});var r=g.Collection=function(a,b){b||(b={});b.model&&(this.model=b.model);b.comparator&&(this.comparator=b.comparator);
this._reset();this.initialize.apply(this,arguments);a&&this.reset(a,{silent:!0,parse:b.parse})};f.extend(r.prototype,k,{model:o,initialize:function(){},toJSON:function(a){return this.map(function(b){return b.toJSON(a)})},add:function(a,b){var c,d,e,g,i,j={},k={},l=[];b||(b={});a=f.isArray(a)?a.slice():[a];c=0;for(d=a.length;c<d;c++){if(!(e=a[c]=this._prepareModel(a[c],b)))throw Error("Can't add an invalid model to a collection");g=e.cid;i=e.id;j[g]||this._byCid[g]||null!=i&&(k[i]||this._byId[i])?
l.push(c):j[g]=k[i]=e}for(c=l.length;c--;)a.splice(l[c],1);c=0;for(d=a.length;c<d;c++)(e=a[c]).on("all",this._onModelEvent,this),this._byCid[e.cid]=e,null!=e.id&&(this._byId[e.id]=e);this.length+=d;A.apply(this.models,[null!=b.at?b.at:this.models.length,0].concat(a));this.comparator&&this.sort({silent:!0});if(b.silent)return this;c=0;for(d=this.models.length;c<d;c++)if(j[(e=this.models[c]).cid])b.index=c,e.trigger("add",e,this,b);return this},remove:function(a,b){var c,d,e,g;b||(b={});a=f.isArray(a)?
a.slice():[a];c=0;for(d=a.length;c<d;c++)if(g=this.getByCid(a[c])||this.get(a[c]))delete this._byId[g.id],delete this._byCid[g.cid],e=this.indexOf(g),this.models.splice(e,1),this.length--,b.silent||(b.index=e,g.trigger("remove",g,this,b)),this._removeReference(g);return this},push:function(a,b){a=this._prepareModel(a,b);this.add(a,b);return a},pop:function(a){var b=this.at(this.length-1);this.remove(b,a);return b},unshift:function(a,b){a=this._prepareModel(a,b);this.add(a,f.extend({at:0},b));return a},
shift:function(a){var b=this.at(0);this.remove(b,a);return b},get:function(a){return null==a?void 0:this._byId[null!=a.id?a.id:a]},getByCid:function(a){return a&&this._byCid[a.cid||a]},at:function(a){return this.models[a]},where:function(a){return f.isEmpty(a)?[]:this.filter(function(b){for(var c in a)if(a[c]!==b.get(c))return!1;return!0})},sort:function(a){a||(a={});if(!this.comparator)throw Error("Cannot sort a set without a comparator");var b=f.bind(this.comparator,this);1==this.comparator.length?
this.models=this.sortBy(b):this.models.sort(b);a.silent||this.trigger("reset",this,a);return this},pluck:function(a){return f.map(this.models,function(b){return b.get(a)})},reset:function(a,b){a||(a=[]);b||(b={});for(var c=0,d=this.models.length;c<d;c++)this._removeReference(this.models[c]);this._reset();this.add(a,f.extend({silent:!0},b));b.silent||this.trigger("reset",this,b);return this},fetch:function(a){a=a?f.clone(a):{};void 0===a.parse&&(a.parse=!0);var b=this,c=a.success;a.success=function(d,
e,f){b[a.add?"add":"reset"](b.parse(d,f),a);c&&c(b,d)};a.error=g.wrapError(a.error,b,a);return(this.sync||g.sync).call(this,"read",this,a)},create:function(a,b){var c=this,b=b?f.clone(b):{},a=this._prepareModel(a,b);if(!a)return!1;b.wait||c.add(a,b);var d=b.success;b.success=function(e,f){b.wait&&c.add(e,b);d?d(e,f):e.trigger("sync",a,f,b)};a.save(null,b);return a},parse:function(a){return a},chain:function(){return f(this.models).chain()},_reset:function(){this.length=0;this.models=[];this._byId=
{};this._byCid={}},_prepareModel:function(a,b){b||(b={});a instanceof o?a.collection||(a.collection=this):(b.collection=this,a=new this.model(a,b),a._validate(a.attributes,b)||(a=!1));return a},_removeReference:function(a){this==a.collection&&delete a.collection;a.off("all",this._onModelEvent,this)},_onModelEvent:function(a,b,c,d){("add"==a||"remove"==a)&&c!=this||("destroy"==a&&this.remove(b,d),b&&a==="change:"+b.idAttribute&&(delete this._byId[b.previous(b.idAttribute)],this._byId[b.id]=b),this.trigger.apply(this,
arguments))}});f.each("forEach,each,map,reduce,reduceRight,find,detect,filter,select,reject,every,all,some,any,include,contains,invoke,max,min,sortBy,sortedIndex,toArray,size,first,initial,rest,last,without,indexOf,shuffle,lastIndexOf,isEmpty,groupBy".split(","),function(a){r.prototype[a]=function(){return f[a].apply(f,[this.models].concat(f.toArray(arguments)))}});var u=g.Router=function(a){a||(a={});a.routes&&(this.routes=a.routes);this._bindRoutes();this.initialize.apply(this,arguments)},B=/:\w+/g,
C=/\*\w+/g,D=/[-[\]{}()+?.,\\^$|#\s]/g;f.extend(u.prototype,k,{initialize:function(){},route:function(a,b,c){g.history||(g.history=new m);f.isRegExp(a)||(a=this._routeToRegExp(a));c||(c=this[b]);g.history.route(a,f.bind(function(d){d=this._extractParameters(a,d);c&&c.apply(this,d);this.trigger.apply(this,["route:"+b].concat(d));g.history.trigger("route",this,b,d)},this));return this},navigate:function(a,b){g.history.navigate(a,b)},_bindRoutes:function(){if(this.routes){var a=[],b;for(b in this.routes)a.unshift([b,
this.routes[b]]);b=0;for(var c=a.length;b<c;b++)this.route(a[b][0],a[b][1],this[a[b][1]])}},_routeToRegExp:function(a){a=a.replace(D,"\\$&").replace(B,"([^/]+)").replace(C,"(.*?)");return RegExp("^"+a+"$")},_extractParameters:function(a,b){return a.exec(b).slice(1)}});var m=g.History=function(){this.handlers=[];f.bindAll(this,"checkUrl")},s=/^[#\/]/,E=/msie [\w.]+/;m.started=!1;f.extend(m.prototype,k,{interval:50,getHash:function(a){return(a=(a?a.location:window.location).href.match(/#(.*)$/))?a[1]:
""},getFragment:function(a,b){if(null==a)if(this._hasPushState||b){var a=window.location.pathname,c=window.location.search;c&&(a+=c)}else a=this.getHash();a.indexOf(this.options.root)||(a=a.substr(this.options.root.length));return a.replace(s,"")},start:function(a){if(m.started)throw Error("Backbone.history has already been started");m.started=!0;this.options=f.extend({},{root:"/"},this.options,a);this._wantsHashChange=!1!==this.options.hashChange;this._wantsPushState=!!this.options.pushState;this._hasPushState=
!(!this.options.pushState||!window.history||!window.history.pushState);var a=this.getFragment(),b=document.documentMode;if(b=E.exec(navigator.userAgent.toLowerCase())&&(!b||7>=b))this.iframe=i('<iframe src="javascript:0" tabindex="-1" />').hide().appendTo("body")[0].contentWindow,this.navigate(a);this._hasPushState?i(window).bind("popstate",this.checkUrl):this._wantsHashChange&&"onhashchange"in window&&!b?i(window).bind("hashchange",this.checkUrl):this._wantsHashChange&&(this._checkUrlInterval=setInterval(this.checkUrl,
this.interval));this.fragment=a;a=window.location;b=a.pathname==this.options.root;if(this._wantsHashChange&&this._wantsPushState&&!this._hasPushState&&!b)return this.fragment=this.getFragment(null,!0),window.location.replace(this.options.root+"#"+this.fragment),!0;this._wantsPushState&&this._hasPushState&&b&&a.hash&&(this.fragment=this.getHash().replace(s,""),window.history.replaceState({},document.title,a.protocol+"//"+a.host+this.options.root+this.fragment));if(!this.options.silent)return this.loadUrl()},
stop:function(){i(window).unbind("popstate",this.checkUrl).unbind("hashchange",this.checkUrl);clearInterval(this._checkUrlInterval);m.started=!1},route:function(a,b){this.handlers.unshift({route:a,callback:b})},checkUrl:function(){var a=this.getFragment();a==this.fragment&&this.iframe&&(a=this.getFragment(this.getHash(this.iframe)));if(a==this.fragment)return!1;this.iframe&&this.navigate(a);this.loadUrl()||this.loadUrl(this.getHash())},loadUrl:function(a){var b=this.fragment=this.getFragment(a);return f.any(this.handlers,
function(a){if(a.route.test(b))return a.callback(b),!0})},navigate:function(a,b){if(!m.started)return!1;if(!b||!0===b)b={trigger:b};var c=(a||"").replace(s,"");this.fragment!=c&&(this._hasPushState?(0!=c.indexOf(this.options.root)&&(c=this.options.root+c),this.fragment=c,window.history[b.replace?"replaceState":"pushState"]({},document.title,c)):this._wantsHashChange?(this.fragment=c,this._updateHash(window.location,c,b.replace),this.iframe&&c!=this.getFragment(this.getHash(this.iframe))&&(b.replace||
this.iframe.document.open().close(),this._updateHash(this.iframe.location,c,b.replace))):window.location.assign(this.options.root+a),b.trigger&&this.loadUrl(a))},_updateHash:function(a,b,c){c?a.replace(a.toString().replace(/(javascript:|#).*$/,"")+"#"+b):a.hash=b}});var v=g.View=function(a){this.cid=f.uniqueId("view");this._configure(a||{});this._ensureElement();this.initialize.apply(this,arguments);this.delegateEvents()},F=/^(\S+)\s*(.*)$/,w="model,collection,el,id,attributes,className,tagName".split(",");
f.extend(v.prototype,k,{tagName:"div",$:function(a){return this.$el.find(a)},initialize:function(){},render:function(){return this},remove:function(){this.$el.remove();return this},make:function(a,b,c){a=document.createElement(a);b&&i(a).attr(b);c&&i(a).html(c);return a},setElement:function(a,b){this.$el&&this.undelegateEvents();this.$el=a instanceof i?a:i(a);this.el=this.$el[0];!1!==b&&this.delegateEvents();return this},delegateEvents:function(a){if(a||(a=n(this,"events"))){this.undelegateEvents();
for(var b in a){var c=a[b];f.isFunction(c)||(c=this[a[b]]);if(!c)throw Error('Method "'+a[b]+'" does not exist');var d=b.match(F),e=d[1],d=d[2],c=f.bind(c,this),e=e+(".delegateEvents"+this.cid);""===d?this.$el.bind(e,c):this.$el.delegate(d,e,c)}}},undelegateEvents:function(){this.$el.unbind(".delegateEvents"+this.cid)},_configure:function(a){this.options&&(a=f.extend({},this.options,a));for(var b=0,c=w.length;b<c;b++){var d=w[b];a[d]&&(this[d]=a[d])}this.options=a},_ensureElement:function(){if(this.el)this.setElement(this.el,
!1);else{var a=n(this,"attributes")||{};this.id&&(a.id=this.id);this.className&&(a["class"]=this.className);this.setElement(this.make(this.tagName,a),!1)}}});o.extend=r.extend=u.extend=v.extend=function(a,b){var c=G(this,a,b);c.extend=this.extend;return c};var H={create:"POST",update:"PUT","delete":"DELETE",read:"GET"};g.sync=function(a,b,c){var d=H[a];c||(c={});var e={type:d,dataType:"json"};c.url||(e.url=n(b,"url")||t());if(!c.data&&b&&("create"==a||"update"==a))e.contentType="application/json",
e.data=JSON.stringify(b.toJSON());g.emulateJSON&&(e.contentType="application/x-www-form-urlencoded",e.data=e.data?{model:e.data}:{});if(g.emulateHTTP&&("PUT"===d||"DELETE"===d))g.emulateJSON&&(e.data._method=d),e.type="POST",e.beforeSend=function(a){a.setRequestHeader("X-HTTP-Method-Override",d)};"GET"!==e.type&&!g.emulateJSON&&(e.processData=!1);return i.ajax(f.extend(e,c))};g.wrapError=function(a,b,c){return function(d,e){e=d===b?e:d;a?a(b,e,c):b.trigger("error",b,e,c)}};var x=function(){},G=function(a,
b,c){var d;d=b&&b.hasOwnProperty("constructor")?b.constructor:function(){a.apply(this,arguments)};f.extend(d,a);x.prototype=a.prototype;d.prototype=new x;b&&f.extend(d.prototype,b);c&&f.extend(d,c);d.prototype.constructor=d;d.__super__=a.prototype;return d},n=function(a,b){return!a||!a[b]?null:f.isFunction(a[b])?a[b]():a[b]},t=function(){throw Error('A "url" property or function must be specified');}}).call(this); | PypiClean |
/django-canjs-0.1.2.tar.gz/django-canjs-0.1.2/canjs/static/canjs/1.1.5/amd/can/observe/list.js | define(['can/util/library', 'can/observe', 'can/observe/compute'], function (can) {
can.extend(can.Observe.List.prototype, {
filter: function (callback) {
// The filtered list
var filtered = new this.constructor();
var self = this;
// Creates the binder for a single element at a given index
var generator = function (element, index) {
// The event handler that updates the filtered list
var binder = function (ev, val) {
var index = filtered.indexOf(element);
// Remove it from the list if it exists but the new value is false
if (!val && index !== -1) {
filtered.splice(index, 1);
}
// Add it to the list if it isn't in there and the new value is true
if (val && index === -1) {
filtered.push(element);
}
};
// a can.compute that executes the callback
var compute = can.compute(function () {
return callback(element, self.indexOf(element), self);
});
// Update the filtered list on any compute change
compute.bind('change', binder);
// Call binder explicitly for the initial list
binder(null, compute());
};
// We also want to know when something gets added to our original list
this.bind('add', function (ev, data, index) {
can.each(data, function (element, i) {
// Call the generator for each newly added element
// The index is the start index + the loop index
generator(element, index + i);
});
});
// Removed items should be removed from both lists
this.bind('remove', function (ev, data, index) {
can.each(data, function (element, i) {
var index = filtered.indexOf(element);
if (index !== -1) {
filtered.splice(index, 1);
}
});
});
// Run the generator for each list element
this.forEach(generator);
return filtered;
},
map: function (callback) {
var mapped = new can.Observe.List();
var self = this;
// Again, lets run a generator function
var generator = function (element, index) {
// The can.compute for the mapping
var compute = can.compute(function () {
return callback(element, index, self);
});
compute.bind('change', function (ev, val) {
// On change, replace the current value with the new one
mapped.splice(index, 1, val);
});
mapped.splice(index, 0, compute());
}
this.forEach(generator);
// We also want to know when something gets added to our original list
this.bind('add', function (ev, data, index) {
can.each(data, function (element, i) {
// Call the generator for each newly added element
// The index is the start index + the loop index
generator(element, index + i);
});
});
this.bind('remove', function (ev, data, index) {
// The indices in the mapped list are the same so lets just splice it out
mapped.splice(index, data.length);
})
return mapped;
}
});
return can.Observe.List;
}); | PypiClean |
/cdk8s_aws_alb_ingress_controller-1.0.0-py3-none-any.whl/cdk8s_aws_alb_ingress_controller/__init__.py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import constructs
class AlbIngressController(
constructs.Construct,
metaclass=jsii.JSIIMeta,
jsii_type="cdk8s-aws-alb-ingress-controller.AlbIngressController",
):
"""Generate alb-ingress-controller config yaml.
see https://github.com/kubernetes-sigs/aws-alb-ingress-controller/blob/master/docs/examples
"""
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
cluster_name: builtins.str,
args: typing.Optional[typing.List[builtins.str]] = None,
env: typing.Optional[typing.List["EnvVar"]] = None,
image: typing.Optional[builtins.str] = None,
labels: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
namespace: typing.Optional[builtins.str] = None,
replicas: typing.Optional[jsii.Number] = None,
service_account_name: typing.Optional[builtins.str] = None,
) -> None:
"""
:param scope: -
:param id: -
:param cluster_name: Kubernetes Cluster Name for alb-ingress-controller. Default: - None
:param args: Another Args for alb-ingress-controller. Default: - None
:param env: Another Args for alb-ingress-controller. Default: - None
:param image: Default image for alb-ingress-controller. Default: - docker.io/amazon/aws-alb-ingress-controller:v1.1.9
:param labels: Extra labels to associate with resources. Default: - none
:param namespace: Default Namespace for alb-ingress-controller. Default: - kube-system
:param replicas: Replicas for alb-ingress-controller. Default: - 1
:param service_account_name: Default Service Account Name for alb-ingress-controller. Default: - alb-ingress-controller
"""
options = AlbIngressControllerOptions(
cluster_name=cluster_name,
args=args,
env=env,
image=image,
labels=labels,
namespace=namespace,
replicas=replicas,
service_account_name=service_account_name,
)
jsii.create(AlbIngressController, self, [scope, id, options])
@builtins.property # type: ignore
@jsii.member(jsii_name="clusterName")
def cluster_name(self) -> builtins.str:
"""Kubernetes Cluster Name for alb-ingress-controller."""
return jsii.get(self, "clusterName")
@builtins.property # type: ignore
@jsii.member(jsii_name="deploymentName")
def deployment_name(self) -> builtins.str:
"""Kubernetes Deployment Name for alb-ingress-controller."""
return jsii.get(self, "deploymentName")
@builtins.property # type: ignore
@jsii.member(jsii_name="namespace")
def namespace(self) -> builtins.str:
"""Namespace for alb-ingress-controller.
:default: - kube-system
"""
return jsii.get(self, "namespace")
@builtins.property # type: ignore
@jsii.member(jsii_name="serviceAccountName")
def service_account_name(self) -> builtins.str:
"""Service Account Name for alb-ingress-controller."""
return jsii.get(self, "serviceAccountName")
@jsii.data_type(
jsii_type="cdk8s-aws-alb-ingress-controller.AlbIngressControllerOptions",
jsii_struct_bases=[],
name_mapping={
"cluster_name": "clusterName",
"args": "args",
"env": "env",
"image": "image",
"labels": "labels",
"namespace": "namespace",
"replicas": "replicas",
"service_account_name": "serviceAccountName",
},
)
class AlbIngressControllerOptions:
def __init__(
self,
*,
cluster_name: builtins.str,
args: typing.Optional[typing.List[builtins.str]] = None,
env: typing.Optional[typing.List["EnvVar"]] = None,
image: typing.Optional[builtins.str] = None,
labels: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
namespace: typing.Optional[builtins.str] = None,
replicas: typing.Optional[jsii.Number] = None,
service_account_name: typing.Optional[builtins.str] = None,
) -> None:
"""
:param cluster_name: Kubernetes Cluster Name for alb-ingress-controller. Default: - None
:param args: Another Args for alb-ingress-controller. Default: - None
:param env: Another Args for alb-ingress-controller. Default: - None
:param image: Default image for alb-ingress-controller. Default: - docker.io/amazon/aws-alb-ingress-controller:v1.1.9
:param labels: Extra labels to associate with resources. Default: - none
:param namespace: Default Namespace for alb-ingress-controller. Default: - kube-system
:param replicas: Replicas for alb-ingress-controller. Default: - 1
:param service_account_name: Default Service Account Name for alb-ingress-controller. Default: - alb-ingress-controller
"""
self._values: typing.Dict[str, typing.Any] = {
"cluster_name": cluster_name,
}
if args is not None:
self._values["args"] = args
if env is not None:
self._values["env"] = env
if image is not None:
self._values["image"] = image
if labels is not None:
self._values["labels"] = labels
if namespace is not None:
self._values["namespace"] = namespace
if replicas is not None:
self._values["replicas"] = replicas
if service_account_name is not None:
self._values["service_account_name"] = service_account_name
@builtins.property
def cluster_name(self) -> builtins.str:
"""Kubernetes Cluster Name for alb-ingress-controller.
:default: - None
"""
result = self._values.get("cluster_name")
assert result is not None, "Required property 'cluster_name' is missing"
return result
@builtins.property
def args(self) -> typing.Optional[typing.List[builtins.str]]:
"""Another Args for alb-ingress-controller.
:default: - None
"""
result = self._values.get("args")
return result
@builtins.property
def env(self) -> typing.Optional[typing.List["EnvVar"]]:
"""Another Args for alb-ingress-controller.
:default: - None
"""
result = self._values.get("env")
return result
@builtins.property
def image(self) -> typing.Optional[builtins.str]:
"""Default image for alb-ingress-controller.
:default: - docker.io/amazon/aws-alb-ingress-controller:v1.1.9
"""
result = self._values.get("image")
return result
@builtins.property
def labels(self) -> typing.Optional[typing.Mapping[builtins.str, builtins.str]]:
"""Extra labels to associate with resources.
:default: - none
"""
result = self._values.get("labels")
return result
@builtins.property
def namespace(self) -> typing.Optional[builtins.str]:
"""Default Namespace for alb-ingress-controller.
:default: - kube-system
"""
result = self._values.get("namespace")
return result
@builtins.property
def replicas(self) -> typing.Optional[jsii.Number]:
"""Replicas for alb-ingress-controller.
:default: - 1
"""
result = self._values.get("replicas")
return result
@builtins.property
def service_account_name(self) -> typing.Optional[builtins.str]:
"""Default Service Account Name for alb-ingress-controller.
:default: - alb-ingress-controller
"""
result = self._values.get("service_account_name")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "AlbIngressControllerOptions(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class AwsLoadBalancePolicy(
metaclass=jsii.JSIIMeta,
jsii_type="cdk8s-aws-alb-ingress-controller.AwsLoadBalancePolicy",
):
"""awsLoadBalancePolicy class ,help you add policy to your Iam Role for service account."""
def __init__(self) -> None:
jsii.create(AwsLoadBalancePolicy, self, [])
@jsii.member(jsii_name="addPolicy")
@builtins.classmethod
def add_policy(cls, version: builtins.str, role: typing.Any) -> typing.Any:
"""
:param version: -
:param role: -
"""
return jsii.sinvoke(cls, "addPolicy", [version, role])
class AwsLoadBalancerController(
constructs.Construct,
metaclass=jsii.JSIIMeta,
jsii_type="cdk8s-aws-alb-ingress-controller.AwsLoadBalancerController",
):
"""Generate aws-load-balancer-controller config yaml.
see https://github.com/kubernetes-sigs/aws-aws-load-balancer-controller/blob/master/docs/install/v2_0_0_full.yaml
"""
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
cluster_name: builtins.str,
create_service_account: typing.Optional[builtins.bool] = None,
) -> None:
"""
:param scope: -
:param id: -
:param cluster_name: Kubernetes Cluster Name for aws-load-balancer-controller. Default: - None
:param create_service_account: service account for aws-load-balancer-controller. Default: - true
"""
options = AwsLoadBalancerControllerOptions(
cluster_name=cluster_name, create_service_account=create_service_account
)
jsii.create(AwsLoadBalancerController, self, [scope, id, options])
@builtins.property # type: ignore
@jsii.member(jsii_name="clusterName")
def cluster_name(self) -> builtins.str:
"""Kubernetes Cluster Name for aws-load-balancer-controller."""
return jsii.get(self, "clusterName")
@builtins.property # type: ignore
@jsii.member(jsii_name="deploymentName")
def deployment_name(self) -> builtins.str:
"""Kubernetes Deployment Name for aws-load-balancer-controller."""
return jsii.get(self, "deploymentName")
@builtins.property # type: ignore
@jsii.member(jsii_name="namespace")
def namespace(self) -> builtins.str:
"""Namespace for aws-load-balancer-controller.
:default: - default
"""
return jsii.get(self, "namespace")
@builtins.property # type: ignore
@jsii.member(jsii_name="serviceAccountName")
def service_account_name(self) -> builtins.str:
"""Service Account Name for aws-load-balancer-controller."""
return jsii.get(self, "serviceAccountName")
@jsii.data_type(
jsii_type="cdk8s-aws-alb-ingress-controller.AwsLoadBalancerControllerOptions",
jsii_struct_bases=[],
name_mapping={
"cluster_name": "clusterName",
"create_service_account": "createServiceAccount",
},
)
class AwsLoadBalancerControllerOptions:
def __init__(
self,
*,
cluster_name: builtins.str,
create_service_account: typing.Optional[builtins.bool] = None,
) -> None:
"""
:param cluster_name: Kubernetes Cluster Name for aws-load-balancer-controller. Default: - None
:param create_service_account: service account for aws-load-balancer-controller. Default: - true
"""
self._values: typing.Dict[str, typing.Any] = {
"cluster_name": cluster_name,
}
if create_service_account is not None:
self._values["create_service_account"] = create_service_account
@builtins.property
def cluster_name(self) -> builtins.str:
"""Kubernetes Cluster Name for aws-load-balancer-controller.
:default: - None
"""
result = self._values.get("cluster_name")
assert result is not None, "Required property 'cluster_name' is missing"
return result
@builtins.property
def create_service_account(self) -> typing.Optional[builtins.bool]:
"""service account for aws-load-balancer-controller.
:default: - true
"""
result = self._values.get("create_service_account")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "AwsLoadBalancerControllerOptions(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class CertManager(
metaclass=jsii.JSIIMeta,
jsii_type="cdk8s-aws-alb-ingress-controller.CertManager",
):
def __init__(self) -> None:
jsii.create(CertManager, self, [])
@jsii.member(jsii_name="certManagerConfig")
@builtins.classmethod
def cert_manager_config(cls) -> typing.Any:
return jsii.sinvoke(cls, "certManagerConfig", [])
@jsii.data_type(
jsii_type="cdk8s-aws-alb-ingress-controller.EnvVar",
jsii_struct_bases=[],
name_mapping={"name": "name", "value": "value"},
)
class EnvVar:
def __init__(
self,
*,
name: builtins.str,
value: typing.Optional[builtins.str] = None,
) -> None:
"""
:param name: Name of the environment variable. Must be a C_IDENTIFIER.
:param value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". Default: .
"""
self._values: typing.Dict[str, typing.Any] = {
"name": name,
}
if value is not None:
self._values["value"] = value
@builtins.property
def name(self) -> builtins.str:
"""Name of the environment variable.
Must be a C_IDENTIFIER.
:schema: io.k8s.api.core.v1.EnvVar#name
"""
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return result
@builtins.property
def value(self) -> typing.Optional[builtins.str]:
"""Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables.
If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:default: .
:schema: io.k8s.api.core.v1.EnvVar#value
"""
result = self._values.get("value")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "EnvVar(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.enum(jsii_type="cdk8s-aws-alb-ingress-controller.VersionsLists")
class VersionsLists(enum.Enum):
AWS_LOAD_BALANCER_CONTROLLER_POLICY_V1 = "AWS_LOAD_BALANCER_CONTROLLER_POLICY_V1"
AWS_LOAD_BALANCER_CONTROLLER_POLICY_V2 = "AWS_LOAD_BALANCER_CONTROLLER_POLICY_V2"
__all__ = [
"AlbIngressController",
"AlbIngressControllerOptions",
"AwsLoadBalancePolicy",
"AwsLoadBalancerController",
"AwsLoadBalancerControllerOptions",
"CertManager",
"EnvVar",
"VersionsLists",
]
publication.publish() | PypiClean |
/enrich-1.2.6.tar.gz/enrich-1.2.6/README.md | # enrich
Enriched extends [rich](https://pypi.org/project/rich/) library functionality
with a set of changes that were not accepted to rich itself.
## Console with redirect support
Our Console class adds one additional option to rich.Console in order to
redirect `sys.stdout` and `sys.stderr` streams using a FileProxy.
```python
from enrich.console import Console
import sys
console = Console(
redirect=True, # <-- not supported by rich.cosole.Console
record=True)
sys.write("foo")
# this assert would have passed without redirect=True
assert console.export_text() == "foo"
```
## Console with implicit soft wrapping
If you want to produce fluid terminal output, one where the client terminal
decides where to wrap the text instead of the application, you can now
tell the Console constructor the soft_wrap preference.
```python
from enrich.console import Console
import sys
console = Console(soft_wrap=True)
console.print(...) # no longer need to pass soft_wrap to each print
```
## Console.print can also deal with ANSI escapes
Extends Rich Console to detect if original text already had ANSI escapes and
decodes it before processing it. This solves the case where printing
output captured from other processes that contained ANSI escapes would brake.
[upstream-404](https://github.com/willmcgugan/rich/discussions/404)
## Soft-wrapping logger
Rich logger assumes that you always have a fixed width console and it does
wrap logged output according to it. Our alternative logger does exactly the
opposite: it ignores the columns of the current console and prints output
using a Console with soft wrapping enabled.
The result are logged lines that can be displayed on any terminal or web
page as they will allow the client to decide when to perform the wrapping.
```python
import logging
from enrich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(
level="NOTSET", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)
log = logging.getLogger("rich")
log.info("Text that we do not want pre-wrapped by logger: %s", 100 * "x")
```
| PypiClean |
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/ai-shang-qing-gan/爱上情感《魅力男神全套》:12超级吸引力(老实人也能拥有让女孩倒追的魅力!):如何真正的学会恋爱学?带你了解一手经验与二手经验!.md | # 爱上情感《魅力男神全套》:12 超级吸引力(老实人也能拥有让女孩倒追的魅力!):如何真正的学会恋爱学?带你了解一手经验与二手经验!
今天给大家带了利机我们这个内议进品课我们去看一下现在要讲的这个图题,现在主题就是说如何真正的去真正的学会内议学,如果真正的学会设交或者说如何真正的能够找到一个你喜欢的女孩,他也是并且能够喜欢你。
打如何去更加高兆的去学起,那么你如果说想去真正的学会内议学你必须要去了解一个概念,对个概念这对一手经验和二手经验是非常重要的一个东西,如果说你不知道什么叫做一手经验和二手经验的话。
那么你一定要好好地来听我们今天要讲的这个东西,我在之前的课程中为您您想想的去提到过一些有关今天要讲的这个知识点,一就是一手经验和二手经验。
所以说这个东西对于你帮助你去学习好这个设交帮助你去学习好这个念学,他是有非常重要的一个因素在的,那么很多人可能会比较困惑说什么一手经验什么是二手经验,对吧。
这连者的吧一手经验和二手经验跟我学习设交跟我学习念学有什么关系呢,跟我学习聊门有什么关系呢,那他们确实非常的重要并且他们的观念非常的深,我来举一个非常简单的例子,你现在可能二手多岁了。
会有说你这个三十岁,可能你二十四三十岁对不对,那么你长了这么大,在你这个成长了这二十多年这三十多年女的,你接收到了二十多年的信息,你接收到了三十多年的信息,那么这些信息是从那里来的。
这些信息是从社会上来的对不对,这些信息是从你周围的这些人告诉你的对不对,那么你的所有的这些观念,包括说你对一些事务的一些看法,你的价值观呀你的三观呀,你所有的东西是你长这么大。
然后所接收到了方法外面的一些信息,而形成的,而形成你的价值观呀,你的道德观呀,你的感情观等等的一些东西,那么这些东西它往往影响着你的社交,它非常影响着你的社交影响着你去聊门,这也是为什么很多兄弟可能说。
为什么没有女孩喜欢,就是因为你在过去的这个成长的二十来年,这三十来年女的这三十多年里边,你接收到了那些有关于女人的那些信息,全部是错误的你形成了非常多的一些限制性的信念,你形成了非常多的一些事务价的观。
那我们之前的很多课程就是帮助你去清楚掉那些,事务价的观帮助你去去除掉那些限制性的信念,那如果说你不知道什么事,这个事务价的观可以去看看我们之前的一些课程,你如果不知道什么事性的信念一些信念。
也去看一看我之前讲过的一些课程,你就会了解到,那么刚才讲那个概念就是说,在你人生成长到了到这二十多年三十年里边,你接收到了这个社会上很多的一些信息,从而形成了你的三观,那么当然这些三观有对的有错的。
那么这个东西跟一手经验好二十经验,可以有什么关系呢,什么是这个一手经验,什么是二十经验呢,那我们简单来讲一讲,就说,我们把这个,你从社会上所接收到的信息,你被别人灌出到的一些经验,别人告诉你的一些话。
或者说社会媒体上他告诉你的一些东西,我们把这个东西称之为叫做二十经验,在那里解吗,那里解可以敲个离,就是说什么叫做二十经验呢,就是说你在这个社会上你长这么大,你长这么大,然后呢。
你被灌出到了别人告诉你的很多的一些经验,别人告诉你的一些话,或者说这个社会上媒体告诉你的一些东西,别人告诉你说这个是对的,这个是错的,这个家的关系的,这个世界关这些信念,就是别人告诉你的一些东西。
那么这些东西我们把它通通叫做二十经验,所以说你把这些东西,你把这些东西,你把这些东西,你把这些东西,你把这些东西,你把这些东西,所以说你们现在能理解什么叫二十经验,那么这个非常的关键。
包括说我们在交给你的这些东西,当你去理解到了之后,也属于二十经验,包括我们交给你们的这些东西,还有这个社会上,还有你长这么大,你所有接受到社会上的一些信息,全部都通知为叫做二十经验,所以说你一定要去。
冠册地去理解这个概念,什么是二十经验,非常重要,那么为什么呢,为什么呢,就是说,是因为你自己没有牵身的去体验这些事情,那么这些东西都通知为叫做二十经验,那你自己牵身去做了,那么这就叫做一手经验。
你自己没有牵身去做这件事情,只是别人告诉你,告诉你说这个经验那个经验,别人告诉你的,别人告诉你说这个前任节的吧,你去给你喜欢的女孩送花,你去表白,然后你们就能在一起,是不是,这个社会上的一些。
社会上的一些社会上的一些社会上的关键所传辟给你的一些经验,对吧,然后呢,你照着想法你去做,你现在来到这种,这么别人的一些这种经验告诉你,你去做了,你去给女孩牵着你的时候送花,你去表白。
然后你发现这种女孩,并没有和你在一起,并且那也不喜欢你,可能还不你给拉黑了,可能还给你发了个狂产卡,对对,那么就说明一个什么事情呢,也就是说,这个社会有一些二十经验告诉你,他是错的,他是错的。
当你真的去按照这些二十经验去做的时候,你发现他的结果是完全不一样的,能理解了吗,你现在能理解了吗,啊,那么当然,还有一些二十经验是有用的,还有一些二十经验是有用的,我来举一个例子,比如说。
比如说你是一个十岁的一个小孩,你是一个十岁的一个孩子,然后呢,你家里边有一个水壶,那个这个水壶呢,刚上来水这个水壶很套,然后呢,你家长就跟你说,哎呦,千万别动那个水壶,那个水壶很套,然后会带你手给套烧。
那么这也是别人告诉你的一个经验对不对,啊,那么我们把这里这个,家长告诉你的这些话,我们把它称准为什么,我们在称准的一叫做二十经验,别人告诉你了,这个水壶上来水很套,你要用手去碰就很套,大家能不能碰。
碰到,那么这些经验就属于二十经验,你明白了吗,现在明白了吗,明白了这个什么叫做二十经验了吗,啊,啊,那么有一些二十经验,它是正确的,啊,有一些二十经验是正确的,但是呢,有一些二十经验。
现在我刚才觉得那个例子的,别人告诉你说,这个,呃你要去给女孩表白去送给女孩,一些东西或者是关心女孩,那么这些二十经验,它可能往往全部是错的,全部是错的,啊,那么我们要总结就换这个什么来说,你听到了。
从你长这么大,你现在二十多岁你这些三十岁,你从这个社会长接受到了,可能百分之九十的,你从这个社会长接受到,百分之九十的有关于女人这方面的一些经验,我们把它称准为叫做二十经验,全部几乎全是错的。
那么这个概念能理解了,可以调为右看一下,呃,那么这个社会上,几乎有关情感的,有关怎么去追女孩的这些东西,去告诉你的这些东西几乎全是错的,几乎全是错的,那么当然除了我们给你们讲的这些东西。
除了我们给你交给的这些经验,那我们交给这些经验,也属于二十多经验,二十多经验,但是我们交给这些东西真的,是真实的 是能够帮助到你们的,我们讲的这些所的东西都是我亲自经历过的,然后我去把它分享给你。
但是你接触到了这些新西兔,它也属于二十经验,那什么属于一手的经验啊,那什么属于一手的经验啊,我来举一个例子,那么,那么我们再拿刚才那个水壶来举例子,再拿刚才那个水壶来举例子,比如说那个水壶的拉钢上来水。
那个水壶很烫,对不对,如果说你是一个乖的孩子,你家长告诉你说,不要去碰到水壶,你可能长了二十多年,长了三十岁了,对吧,你到目前为止 你一直都是个乖孩子了,你根本就没有尝试去摸一下那个水壶。
那去尝试的真的去做一下跟那个水壶有关的这个事情,真的去碰一下这个水壶,你应该都没有做过,对对这事情为什么呢,这是因为听信了那些二十经验,然后呢,你没有说到伤害,那么当然这是一件好的事情啊。
这是一件好的事情,那么,就算你有一些二十经验,你天线了它,它对你可能是有帮助的,对不对,那么这个社会长有很多的这样的一些二十经验,然后这些二十经验干嘛,它在保护着我们。
所以我们就很容易去相信这些二十经验,能理解吗,就说,比如说这个社会长有些经验告诉你说,对啊,不要去马路上乱跑,不要去闯这个红灯,那你闯的红灯,你跟车这个你就被车装了,对不对,你不要去这个。
跟马路上这个车车装,对的,那么这些二十经验干嘛,它对你有用的呀,它是对你有很大帮助的呀,对不对,那么你这个社会长很多别的方面的一些二十经验,它是有用的,那么所以我们看上了,我们才形成了人类。
所以我们才形成了这个社会,那么这个社会上有很多很多的二十经验,那么这些二十经验可能对你来说,确实有帮助的,对于你生存上来说是有帮助的,但是我们人对不对,我们所有的人都是难做的,我们的人的本性是难做的。
那我们就听信了这些二十经验,就说我们就会有吗,去选择相信所有人告诉我们的二十经验,对不对,我们会听信这个社会上,所有人告诉我们的这些二十经,因为这个社会长大部分的二十经验,可能对你都会有用的,对不对。
除了说听爱的方面的,那么这时候你就会相信,所有人告诉你的二十经验,但是这个时候就悲劲就来了,悲劲就来了,悲劲也就是在这个地方,就说在情感上,在女人这方面,在男人和女人这方面,男人和女人交往这方面。
那所有的二十经验,这个社会长所有告诉你的二十经验,几乎都是错的,几乎全是错的,那当然除了我们告诉你的,除了我们给你讲的这些课程,这些东西,那我们讲的这东西非常正习的,那我们是经过自己牵身经历。
我们自己经历了一手的经验,然后去把它分享给你,所以你们记住这句话,就别人告诉你的有关情在方面的,这些是在直水有关,有关和女人交往这方面的,那些支持90%的内容,全部是错的,可能你按照他的说法去做。
你会试着的起反,本来你想让这个女孩喜欢你,按照你按照一下二十经验去做,反而让这个女孩讨厌你了,比如说你去关心女孩,你去给女孩送东西,可能你常常被拆,可能你去这个去给你好表白,被拒绝了。
所以说反这个社会上,有一些二十经验,你去做了,反而你会试着的起反,反而你会得不到一个结果,能理解了吗,这就是二十经验的一个危害,他对于情感方面的一些危害,在回到我刚才的那个例子上。
那你还是一个实际的小孩,然后有一个,就是这个烫的水壶,这个水壶很烫,对不对,上来水了很烫,然后你听话,就是加拿大告诉你,听话你不去碰它,你问你干嘛,你遵循了这个二十经验,对不对,但是呢。
但是如果说你是一个逃棄的孩子,但是如果说你是一个,非常非常逃棄的一个孩子,你就就是加拿大告诉你,说这个水壶很烫,不要去碰,但是你偏偏不信,你偏偏拿手去碰了一下这个水壶,然后你会发现一手。
烫了一个大包起来,那么这个行为叫做什么,那么这个行为叫什么,这个行为就是你亲自去做了一下,这个衣属的经验,就是别人告诉了每一个二十经验,然后你亲自去尝射去做了,能理解吗,主意亲自去做。
别人告诉你二十经验,你亲自去做了,那么这个经验就变成了你的衣属经验,所以记住,什么是衣属经验,就是你亲自去尝射了某件事情,你亲自去做了某件事情,你就会收获衣属经验,那么当你亲自去做这些事情的时候。
那你说过了一你的这些衣属的经验,它会让你对这个事物,它会让你对这个这件事情,它的理解它的深度和藏色,是远远超过二十四的二水的这个经验的,所以理解了,大家懂吗,当然我这里也不知道你们一定要去摸一下。
那个烫的水壶,那这个行为很傻,我也不知道你们一定要去闯红灯,那这个行为非常的傻,但是呢,我们讲的是对于情感上的一些知识,你一定要亲自去做,你一定要把我们讲的所有的知识点,我们讲的所有的经验,二十个经验。
全部变成你的一种的经验,能理解吗,能理解什么是一种经验的,来敲个一,我看一下,能听懂,是吧,我看还能说你再敲一,那也就是说什么呢,我们这个社会人啊,大部分的二十经验,他可能确实能过去帮助你。
然后我们就听信了很多二十经验,你就没有去尝试没有去做,但是呢,这恰恰来说你的一手经验,就很非常的匱乏,非常的匱乏,那特别是对于女人的方便的一些二十经验,可能全部是错的,所以说我们希望的是。
你要掌握自己的一手经验,你要亲自去做一做,亲自去做一些事情,比如说我们需要给你们的所有的东西,你们要亲自去尝试,把我们讲的这些二十经验,变成你的一手经验,有个兄弟说看别人的水壶,被烫失于这些经验吗。
那么也不是,我给他举个例子,比如说你看别人谈恋,别人是一手经验,但是对于你来说,你不是一手经验,那么当然一定不是让你去摸水壶,这件事情,我们讲的是对于恋爱上来说,你一定要有自己的一手经验。
那我刚才也举例,我也讲话就不要,就是你们不要死板了去理解这个事情,不是说一定要你去摸那个水壶,我是举个例子,就是你去亲自做了这件事情之后,你对于这件事情的理解,对于这件事情的一个深度。
他的层次你会远远超过二十四的,二手的经验了,所以说你要灵活的去理解,我要讲到这些东西能理解吗,就是说你对于情感上的这些东西,你一定要亲自去嘲设,亲自去做,我们交给你的一些知识点,你一定要亲自去做。
你把它变成你的一手经验是非常非常重要的,那么当然不许让你去闯轰,当上去跟车去做,那么这些是不需要去做的,那很傻,你要真的去做很傻,对对,那么对于情感上的一手经验,你要真的亲自去尘,亲自去尘,亲自去做。
亲自掌握属于自己的一手经验,那如果说别人去做了,那只是属于别人的情感上的一手经验,你记得看得别人怎么谈恋,就是说其他方面的,除了前两方面的,其他方面的一些二手经验,对于我们来说。
可能都是能够帮助到我们的,都是能够帮助到我们的,但是我还是有一个建议,我还是建议你们,要亲自去做一些事情,亲自去做一些事情,各个方面,比如说你老板告诉你一些东西,你也可以亲自去尘,尘一下。
别人告诉你一些二手经验,你也可以亲自去尘,亲自去做一做,你会对这件事情,说过自己的一个一手的经验,那理解吧,那么,但是在情感上,在恋爱上,在男人和女人交往这件事情上,那么你要记住一句话。
就是所有的二手经验,你要全部抛弃掉,全部抛弃掉,当然你要把我们讲给你的这些支持,留住,把我们讲的这些支持留住,你要把这个社会上,别人告诉你的一些二手经验,全部的抛弃掉,一些试图家的关,一些现在新的信念。
一些错误的东西,你们要全部抛弃掉,那么你唯一可以做的事情,就是这个吗,你唯一可以做的事情,就是说你需要去建立,属于自己的大量的一手经验,那怎么去建立,属于自己的一手经验呢,你就必须要出门去实践。
这非常的关键,就是我们每天来拿去,很多的一些知识点,给大家讲一些技巧,讲一些招,讲一些理论,你一定要亲自去实验,亲自去实践,然后呢,把它变成你的一手经验,这非常非常的重要,那如果说你不去实践。
我去个例子,比如说一个导师告诉你说,男人和女人交往的准则是1 2 3 4 5 6,这六条,你也明白了这六条,然后呢,你把它对我条都记得,自己的本子上了,并且把这六条放在自己的手机里边。
手机的这个背包里边,天天就看天天就看天天一看,你可能都能够背过,你都能够背过,但是呢,这并没有太大的作用,对你来说并没有太大的作用,因为一旦到了,一旦你到了实践的这么一个现场,一旦你去实战。
一旦你去真的和女孩互动交往,你会发现你脑子,可能还是一片空白,那么这些东西的,告诉你的这些准则,要想走掉,可能就没有用了,而且你会发现即使说你按照这些规则去做,那么这些所谓的二十件他也可能会让你失败。
他也未必一定是准确的,所以说你必须要去建立自己属于,你自己的大人的一手经验,这也是为什么很多兄弟的,不明白,就是说,不明白怎么去两面,或者说为什么我老了两面,为什么我两个人,然后过来学习的原因。
就是因为他们,对吧,学在不往,学到很多有关于女人教那些经验,或者方法全部是错误的,那么有关于你以前知道的有关女人方面的一些要求经验,几乎都没有用,那么真的有效的是一手经验,对于女人的方面。
真的对于你来说有效的是一手经验,非常漂亮的重要,我自己要学了例子,就以前我讲讲课嘛,就是我以前带着的现家课的时候,我会发现就有些学员,这个学员很聪明,这些学员有很好的背景,比如说他是高校毕业的。
清华的北大的山大的,或者负担的或者甚至有些是国外留学的,你会发现他们的学界背景非常好,但是呢,这些学员里边是有那么一类的学员,他们在上这个现家课的时候,他们都记很多很多的笔记,记得非常的详细。
几乎把我说的每一句话都给记下来了,一次不差的记得他的笔记本上,但是你会发现这一部分的学员,他并不是学得非常好的,当然他也不是学得最差的,他并是这女的学得非常好的,那么这是为什么呢。
就是因为你即使你的模特色面里理解了,我讲了一些道理,你知道了这个道理,你知道了这个原理,但是呢,你虽然知道的这些原理并不代表,你真的就能够实践得出来,还有一个就是,你知道的这个道理并不代表。
你明白了这个道理,那怎么才能明白这个道理呢,那么对于男人和女人这方面来想的话,你就干嘛,要去收获属于自己的一手经验,那么在你收获你的一手经验的时候,你会干嘛,你会建立一个。
他的一个概念叫做参考经验经验库,做说在你去实践,你的一手经验的时候,你会建立一个属于你自己的一个参考经验库,什么叫参考经验库呢,我去给他一个例子,比如说你以前你长这么大,你跟三个女孩交往过。
你跟三个女孩约会过,你跟三个女孩谈过恋爱,你跟三个女孩发生过最后一刻的关系,然后成为了男女朋友,也就是你谈过三个女朋友,对啊,谈过三个女朋友,呃,那么你干嘛,那么你就拥有了这。
三次全程和女孩交往的这个经验,对不对,那么我们把这个经验就交到他们了,把这个经验就交到你的参考经验库,或者叫做参考经历,那么明白了吗,这叫参考经验库,这个经验就交到参考经验库,那么当年的这个参考经验库。
他大到一定程度的时候,不如说你跟三个女孩交往过,他跟你和十个女孩交往过,或者说你跟二十个女孩交往过,你这样的参考经验库,是完全不一样的呀,对不对,你跟很越来越多的女孩接触过,你跟很多女孩接触过。
你跟很多女孩真的去轰动过实践过,那么这样的参考经验库,对于你来说,他的作用更大,当你遇到下一个女孩的时候,你在跟下一个女孩交往的时候,你往到这个参考经验库,可以很好的帮助你。
但是如果你以前没有这个参考经验库,非常的缺乏,几乎没有参考经验库,那么可能就就白搭,就没办法,就说知道下一个女孩怎么去做,能理解吗,你想想,如果说你之前只和三个女孩交往过,然后你想凭的这三个女孩交往的。
你想凭这三个女孩交往的这个经验,然后去跟一个新认识的一个女孩去交往,那么可能会比较吃力,你们想一下你们自己,你们可能跟三个女孩交往都没有交往过,你们可能你的参考经验库是零或者是一。
你们的孙虎我们希望你们是建立属于,你自己的大量的参考经验库,然后呢,对吧,当你的这个参考经验库越大的熟,你在遇到女孩的熟,你会变得更加的轻松,更加容易,啊,啊,所以你们要记住啊,就是我们。
我们告诉你的有关女人的一些方面,我们告诉你有关女人的一些方面的一些经验,他是有用的,我们在干嘛,我们在把我们的参考经验库,就分享给你们,我们在把我们和女孩相处熟的一些经历,去告诉你们,那你得到什么。
你得到了大量的二手经验,那么这些二手经验对你来说是有用的,你就是我们讲的这东西啊,我们讲的这东西,我们真实和女孩交往过的一些经历,我们真实和女孩约会过的一些经历,我们真实和女孩互动过的一些经历。
然后我们把它总结出来,我把我的参考经验库去分享给你们,然后你把我的这些参考经验库给吸收,吸入到你的属于你自己的那个二手经验里面去,然后再把你的二手经验变成你的一手经验,那么这时候对吧,他是有用的。
他是有效的,你需要按照我给你们讲的这东西,我给你们的切直到,全衣服一步去实践,就真的去去这个去做,你会发现你会变得越来越成功,那么当可能说,当你去实践的过程当中,你突然之间,想到了。
比如说我哪一阶可能会长到一些知识点,你会想,原来当年这个6天老师长的内句,话是对的,原来当时的天老师长的那个客,原来是这个样子的,我真的感受到了,那当你真的去实践之后,你才会明白。
我讲的那些知识哪些是有用的,是不是有用的,那明白了,那么你在实践的过程当中,你会看到女孩真实的反馈,那你在这种真实的反馈之下,你真对女孩做出了一些回应,那么这个时候,你才会真正地去理解。
我讲的很多的一些知识,那这就是医手经验,和二手经验的一个区别,以及去建立属于你自己的参考经验库,那这就是非常短暂的一个内容,并且非常关键,非常的关键,这个东西就是说为什么你们很多人,一直学习这个东西。
才能学了很长时间,但都没有学会,就是因为你们没有建立属于你们自己的医手经验,以及没有建立属于你们自己的参考经验库,所以说从你们听完这些话,等之后,希望你们每个人真实的去实践,真实的去做。
真实的去收获属于你自己大量的医手经验,然后去建立属于你自己的参考经验库,那我们所讲的这些东西全部是,告诉你们一些我自己的参考经验库,我自己的医手经验去分享给你们,它是有效的,它是有用的。
但是你在社会上去接触到了很多的一些,二手经验,它未必属于用了,它几乎都是错误的,那么我今天讲的这三个知识点,能理解的可以敲6,这三个知识点,一个二手经验一个医手经验一个参考经验库。
这三个知识点明白了去敲6,如果说你也想去建立属于你自己的医手经验,那么可以去报名参加我们的魅力男生的课程,然后去通过学习我们的这些知识,按照我们讲的这些理论去操作,去收获属于你自己的参考经验库。
去收获属于你自己的医手经验,我们会把我们的所有的经验,我们自己的医手经验,我们自己的参考经验库,去分享在我们的课程里边,我们的魅力男生里边,我们的魅力形象,我们的魅力内合我们的聊职长我们的课程。
我们的其他的课程全部会分享在里边,然后你爱着我们讲的这些知识,然后亲自去实践,亲自去操作,你会收获属于你自己的参考经验库,你会发现你和女人交往这方面会变得越来越简单,越来越轻松,那么下面给大家看一下。
这就是我们报名的,这就是报名我们课程的一些学员,然后大家看到这个报名完我们课程的一些学员,它就收获了属于自己的参考经验库,魅子主动到他家里去要去找他,主动到他家里去的,魅子说的。
那你明天中午下班能去你家吗,然后魅子说把他们约好时间,然后去他家里边,那么这就是当你学过课程之后,按照我们讲的这些知识点之后,你就会收获属于你自己的参考经验库,你会亲自去操作,你会成功。
那么这就是我们实践的学员,当然我们的群女人,我们的内部群女人有很多很多这样的学员,如果说你也想跟着这些诗歌诗地们一起去学习,可以去报名参加我们官方的课程,你想成了一个有没有男人,你想真的去了解独懂女人。
可以去学习我们的魅子男人的课程,学会真正的去习近女孩,和女孩聊天,又会得技巧让女孩喜欢让你,OK那么这就是我们今天的内容对吧,没有关注我们公众号,去关注小们的公众号,爱商链学然后回复课程两个字。
关注完公众号,最后回复课程两个字,之后会进入一个我们的课程面铺,进入我们的课程店铺之后,你可以去选择要购买的课程,比如说你要去购买美丽男生课程,比如说你要去购买我们的聊日两个。
或者你要去购买我们其他的课程,都是可以去自由的选择,自由的去购买的,关分的课程,然后购买完我们的课程之后,我们的课服会给你办理入学,会给你进入捞入我们的官方的一个群,然后跟你的试歌师弟们一起去学习。
好那么最后就送给来一首试,谁也关不上,你心里的在身窗,因为残免的网室还远在远方,一起让自己守护一个人的时光,不如让我们灰手在纯时间飘荡,我这两天那么这就是今天的全部内容,我这两天都在这边。
我这两天都在这边 | PypiClean |
/cn_hyperarr-0.0.1.tar.gz/cn_hyperarr-0.0.1/docs/build/html/_static/localization/he/MathMenu.js | MathJax.Localization.addTranslation("he","MathMenu",{version:"2.7.4",isLoaded:true,strings:{Show:"\u05DC\u05D4\u05E6\u05D9\u05D2 \u05E0\u05D5\u05E1\u05D7\u05D0\u05D5\u05EA \u05D1\u05EA\u05D5\u05E8",MathMLcode:"\u05E7\u05D5\u05D3 MathML",OriginalMathML:"MathML \u05DE\u05E7\u05D5\u05E8\u05D9",TeXCommands:"\u05E4\u05E7\u05D5\u05D3\u05D5\u05EA TeX",AsciiMathInput:"\u05E7\u05DC\u05D8 AsciiMathML",Original:"\u05D4\u05E6\u05D5\u05E8\u05D4 \u05D4\u05DE\u05E7\u05D5\u05E8\u05D9\u05EA",ErrorMessage:"\u05D4\u05D5\u05D3\u05E2\u05EA \u05E9\u05D2\u05D9\u05D0\u05D4",Annotation:"\u05E4\u05D9\u05E8\u05D5\u05E9",TeX:"TeX",StarMath:"StarMath",Maple:"Maple",ContentMathML:"Content MathML",OpenMath:"OpenMath",texHints:"\u05DC\u05D4\u05E6\u05D9\u05D2 \u05E8\u05DE\u05D6\u05D9\u05DD \u05E9\u05DC TeX \u05D1\u05BEMathML",Settings:"\u05D4\u05D2\u05D3\u05E8\u05D5\u05EA \u05DE\u05EA\u05DE\u05D8\u05D9\u05E7\u05D4",ZoomTrigger:"\u05DE\u05D4 \u05D2\u05D5\u05E8\u05DD \u05DC\u05E7\u05D9\u05E8\u05D5\u05D1",Hover:"\u05DE\u05E2\u05D1\u05E8 \u05E2\u05DB\u05D1\u05E8",Click:"\u05DC\u05D7\u05D9\u05E6\u05D4",DoubleClick:"\u05DC\u05D7\u05D9\u05E6\u05D4 \u05DB\u05E4\u05D5\u05DC\u05D4",NoZoom:"\u05DC\u05DC\u05D0 \u05E7\u05D9\u05E8\u05D5\u05D1",TriggerRequires:"\u05D4\u05E4\u05E2\u05DC\u05EA \u05D4\u05E7\u05D9\u05E8\u05D5\u05D1 \u05D3\u05D5\u05E8\u05E9\u05EA:",Option:"Option",Alt:"Alt",Command:"Command",Control:"Ctrl",Shift:"Shift",ZoomFactor:"\u05E8\u05DE\u05EA \u05E7\u05D9\u05E8\u05D5\u05D1",Renderer:"\u05DE\u05E6\u05D9\u05D2 \u05E0\u05D5\u05E1\u05D7\u05D0\u05D5\u05EA",MPHandles:"\u05DC\u05D0\u05E4\u05E9\u05E8 \u05DC\u05BEMathPlayer \u05DC\u05D8\u05E4\u05DC \u05D1\u05BE:",MenuEvents:"\u05D0\u05D9\u05E8\u05D5\u05E2\u05D9 \u05EA\u05E4\u05E8\u05D9\u05D8",MouseEvents:"\u05D0\u05D9\u05E8\u05D5\u05E2\u05D9 \u05E2\u05DB\u05D1\u05E8",MenuAndMouse:"\u05D0\u05D9\u05E8\u05D5\u05E2\u05D9 \u05E2\u05DB\u05D1\u05E8 \u05D5\u05D0\u05D9\u05E8\u05D5\u05E2\u05D9 \u05EA\u05E4\u05E8\u05D9\u05D8",FontPrefs:"\u05D4\u05E2\u05D3\u05E4\u05D5\u05EA \u05D2\u05D5\u05E4\u05E0\u05D9\u05DD",ForHTMLCSS:"\u05E2\u05D1\u05D5\u05E8 HTML-CSS:",Auto:"\u05D0\u05D5\u05D8\u05D5\u05DE\u05D8\u05D9",TeXLocal:"TeX (\u05DE\u05E7\u05D5\u05DE\u05D9)",TeXWeb:"TeX (\u05D1\u05E8\u05E9\u05EA)",TeXImage:"TeX (\u05EA\u05DE\u05D5\u05E0\u05D4)",STIXLocal:"STIX (\u05DE\u05E7\u05D5\u05DE\u05D9)",STIXWeb:"STIX (\u05D1\u05E8\u05E9\u05EA)",AsanaMathWeb:"Asana Math (\u05D1\u05E8\u05E9\u05EA)",GyrePagellaWeb:"Gyre Pagella (\u05D1\u05E8\u05E9\u05EA)",GyreTermesWeb:"Gyre Termes (\u05D1\u05E8\u05E9\u05EA)",LatinModernWeb:"Latin Modern (\u05D1\u05E8\u05E9\u05EA)",NeoEulerWeb:"Neo Euler (\u05D1\u05E8\u05E9\u05EA)",ContextMenu:"\u05EA\u05E4\u05E8\u05D9\u05D8 \u05D4\u05B6\u05E7\u05E9\u05E8",Browser:"\u05D3\u05E4\u05D3\u05E4\u05DF",Scale:"\u05DC\u05D4\u05EA\u05D0\u05D9\u05DD \u05D0\u05EA \u05D4\u05D2\u05D5\u05D3\u05DC \u05E9\u05DC \u05DB\u05DC \u05D4\u05E0\u05D5\u05E1\u05D7\u05D0\u05D5\u05EA...",Discoverable:"\u05DC\u05D4\u05D0\u05D9\u05E8 \u05D1\u05DE\u05E2\u05D1\u05E8 \u05E2\u05DB\u05D1\u05E8",Locale:"\u05E9\u05E4\u05D4",LoadLocale:"\u05DC\u05D8\u05E2\u05D5\u05DF \u05DE\u05DB\u05EA\u05D5\u05D1\u05EA...",About:"\u05D0\u05D5\u05D3\u05D5\u05EA MathJax",Help:"\u05E2\u05D6\u05E8\u05D4 \u05E9\u05DC MathJax",localTeXfonts:"\u05DE\u05E9\u05DE\u05E9\u05D9\u05DD \u05D2\u05D5\u05E4\u05E0\u05D9 TeX \u05DE\u05E7\u05D5\u05DE\u05D9\u05D9\u05DD",webTeXfonts:"\u05DE\u05E9\u05DE\u05E9\u05D9\u05DD \u05D2\u05D5\u05E4\u05E0\u05D9 TeX \u05DE\u05D4\u05E8\u05E9\u05EA",imagefonts:"\u05DE\u05E9\u05DE\u05E9\u05D9\u05DD \u05D2\u05D5\u05E4\u05E0\u05D9 \u05EA\u05DE\u05D5\u05E0\u05D4",localSTIXfonts:"\u05DE\u05E9\u05DE\u05E9\u05D9\u05DD \u05D2\u05D5\u05E4\u05E0\u05D9 STIX \u05DE\u05E7\u05D5\u05DE\u05D9\u05D9\u05DD",webSVGfonts:"\u05DE\u05E9\u05DE\u05E9\u05D9\u05DD \u05D2\u05D5\u05E4\u05E0\u05D9 SVG \u05DE\u05D4\u05E8\u05E9\u05EA",genericfonts:"\u05DE\u05E9\u05DE\u05E9\u05D9\u05DD \u05D2\u05D5\u05E4\u05E0\u05D9 \u05D9\u05D5\u05E0\u05D9\u05E7\u05D5\u05D3 \u05DB\u05DC\u05DC\u05D9\u05D9\u05DD",wofforotffonts:"\u05D2\u05D5\u05E4\u05E0\u05D9 woff \u05D0\u05D5 otf",eotffonts:"\u05D2\u05D5\u05E4\u05E0\u05D9 eot",svgfonts:"\u05D2\u05D5\u05E4\u05E0\u05D9 svg",WebkitNativeMMLWarning:"\u05D1\u05D3\u05E4\u05D3\u05E4\u05DF \u05E9\u05DC\u05DA \u05D0\u05D9\u05DF \u05EA\u05DE\u05D9\u05DB\u05D4 \u05DE\u05D5\u05D1\u05E0\u05D9\u05EA \u05D1\u05BEMathML, \u05D0\u05D6 \u05DE\u05E2\u05D1\u05E8 \u05DC\u05E4\u05DC\u05D8 MathML \u05E2\u05DC\u05D5\u05DC \u05DC\u05D4\u05E4\u05D5\u05DA \u05D0\u05EA \u05D4\u05E0\u05D5\u05E1\u05D7\u05D0\u05D5\u05EA \u05DC\u05D1\u05DC\u05EA\u05D9\u05BE\u05E7\u05E8\u05D9\u05D0\u05D5\u05EA",MSIENativeMMLWarning:"\u05D0\u05D9\u05E0\u05D8\u05E8\u05E0\u05D8 \u05D0\u05E7\u05E1\u05E4\u05DC\u05D5\u05E8\u05E8 \u05D3\u05D5\u05E8\u05E9 \u05EA\u05D5\u05E1\u05E3 MathPlayer \u05DB\u05D3\u05D9 \u05DC\u05E2\u05D1\u05D3 \u05E4\u05DC\u05D8 MathML.",OperaNativeMMLWarning:"\u05D4\u05EA\u05DE\u05D9\u05DB\u05D4 \u05E9\u05DC \u05D0\u05D5\u05E4\u05E8\u05D4 \u05D1\u05BEMathML \u05DE\u05D5\u05D2\u05D1\u05DC\u05EA, \u05D0\u05D6 \u05DE\u05E2\u05D1\u05E8 \u05DC\u05BEMathML \u05E2\u05DC\u05D5\u05DC \u05DC\u05D2\u05E8\u05D5\u05DD \u05DC\u05D7\u05DC\u05E7 \u05DE\u05D4\u05D1\u05D9\u05D8\u05D5\u05D9\u05D9\u05DD \u05DC\u05D4\u05D9\u05D5\u05EA \u05DE\u05D5\u05E6\u05D2\u05D9\u05DD \u05D1\u05D0\u05D5\u05E4\u05DF \u05D2\u05E8\u05D5\u05E2.",SafariNativeMMLWarning:"\u05D4\u05EA\u05DE\u05D9\u05DB\u05D4 \u05D4\u05DE\u05D5\u05D1\u05E0\u05D9\u05EA \u05E9\u05DC \u05D4\u05D3\u05E4\u05D3\u05E4\u05DF \u05E9\u05DC\u05DA \u05D1\u05BEMathML \u05D0\u05D9\u05E0\u05D4 \u05DE\u05DE\u05DE\u05E9\u05EA \u05D0\u05EA \u05DB\u05DC \u05D4\u05D9\u05DB\u05D5\u05DC\u05D5\u05EA \u05E9\u05BEMathJax \u05DE\u05E9\u05EA\u05DE\u05E9 \u05D1\u05D4\u05DF, \u05D0\u05D6 \u05D9\u05D9\u05EA\u05DB\u05DF \u05E9\u05D1\u05D9\u05D8\u05D5\u05D9\u05D9\u05DD \u05D0\u05D7\u05D3\u05D9\u05DD \u05DC\u05D0 \u05D9\u05D5\u05E6\u05D2\u05D5 \u05DB\u05E8\u05D0\u05D5\u05D9.",FirefoxNativeMMLWarning:"\u05D4\u05EA\u05DE\u05D9\u05DB\u05D4 \u05D4\u05DE\u05D5\u05D1\u05E0\u05D9\u05EA \u05E9\u05DC \u05D4\u05D3\u05E4\u05D3\u05E4\u05DF \u05E9\u05DC\u05DA \u05D1\u05BEMathML \u05D0\u05D9\u05E0\u05D4 \u05DE\u05DE\u05DE\u05E9\u05EA \u05D0\u05EA \u05DB\u05DC \u05D4\u05D9\u05DB\u05D5\u05DC\u05D5\u05EA \u05E9\u05BEMathJax \u05DE\u05E9\u05EA\u05DE\u05E9 \u05D1\u05D4\u05DF, \u05D0\u05D6 \u05D9\u05D9\u05EA\u05DB\u05DF \u05E9\u05D1\u05D9\u05D8\u05D5\u05D9\u05D9\u05DD \u05D0\u05D7\u05D3\u05D9\u05DD \u05DC\u05D0 \u05D9\u05D5\u05E6\u05D2\u05D5 \u05DB\u05E8\u05D0\u05D5\u05D9.",MSIESVGWarning:"\u05EA\u05DE\u05D9\u05DB\u05D4 \u05D1\u05BESVG \u05D0\u05D9\u05E0\u05D4 \u05DE\u05DE\u05D5\u05DE\u05E9\u05EA \u05D1\u05D0\u05D9\u05E0\u05D8\u05E8\u05E0\u05D8 \u05D0\u05E7\u05E1\u05E4\u05DC\u05D5\u05E8\u05E8 \u05DC\u05E4\u05E0\u05D9 \u05D2\u05E8\u05E1\u05D4 9 \u05D0\u05D5 \u05DB\u05D0\u05E9\u05E8 \u05D4\u05D3\u05E4\u05D3\u05E4\u05DF \u05E4\u05D5\u05E2\u05DC \u05D1\u05DE\u05E6\u05D1 \u05EA\u05D0\u05D9\u05DE\u05D5\u05EA \u05DC\u05D2\u05E8\u05E1\u05D4 8 \u05D5\u05DE\u05D8\u05D4. \u05DE\u05E2\u05D1\u05E8 \u05DC\u05E4\u05DC\u05D8 \u05D1\u05BESVG \u05D9\u05D2\u05E8\u05D5\u05DD \u05DC\u05E0\u05D5\u05E1\u05D7\u05D0\u05D5\u05EA \u05DE\u05EA\u05DE\u05D8\u05D9\u05D5\u05EA \u05DC\u05D0 \u05D4\u05D9\u05D5\u05EA \u05DE\u05D5\u05E6\u05D2\u05D5\u05EA \u05DB\u05E8\u05D0\u05D5\u05D9.",LoadURL:"\u05D8\u05E2\u05D9\u05E0\u05EA \u05E0\u05EA\u05D5\u05E0\u05D9 \u05EA\u05E8\u05D2\u05D5\u05DD \u05DE\u05D4\u05DB\u05EA\u05D5\u05D1\u05EA \u05D4\u05D1\u05D0\u05D4:",BadURL:"\u05D4\u05DB\u05EA\u05D5\u05D1\u05EA \u05E6\u05E8\u05D9\u05DB\u05D4 \u05DC\u05DB\u05E4\u05E0\u05D5\u05EA \u05DC\u05E7\u05D5\u05D1\u05E5 \u05D2'\u05D0\u05D5\u05D5\u05D4 \u05E1\u05E7\u05E8\u05D9\u05E4\u05D8 \u05E9\u05DE\u05D2\u05D3\u05D9\u05E8 \u05E0\u05EA\u05D5\u05E0\u05D9 \u05EA\u05E8\u05D2\u05D5\u05DD \u05E9\u05DC MathJax. \u05E7\u05D5\u05D1\u05E5 \u05D4\u05D2'\u05D0\u05D5\u05D5\u05E1 \u05E1\u05E7\u05E8\u05D9\u05E4\u05D8 \u05D0\u05DE\u05D5\u05E8 \u05DC\u05D4\u05E1\u05EA\u05D9\u05D9\u05DD \u05D1\u05BEjs.",BadData:"\u05E0\u05DB\u05E9\u05DC\u05D4 \u05D8\u05E2\u05D9\u05E0\u05EA \u05E0\u05EA\u05D5\u05E0\u05D9 \u05EA\u05E8\u05D2\u05D5\u05DD \u05DE\u05BE%1",SwitchAnyway:"\u05DC\u05E9\u05E0\u05D5\u05EA \u05D0\u05EA \u05D4\u05DE\u05E6\u05D9\u05D2 \u05D1\u05DB\u05DC \u05D6\u05D0\u05EA?\n\n(\u05D9\u05E9 \u05DC\u05DC\u05D7\u05D5\u05E5 \u05D0\u05D9\u05E9\u05D5\u05E8 \u05DC\u05DE\u05E2\u05D1\u05E8 \u05D0\u05D5 \u05D1\u05D9\u05D8\u05D5\u05DC \u05DC\u05D4\u05DE\u05E9\u05DA \u05E2\u05DD \u05D4\u05E6\u05D9\u05D2 \u05D4\u05E0\u05D5\u05DB\u05D7\u05D9)",ScaleMath:"\u05DC\u05D4\u05EA\u05D0\u05D9\u05DD \u05D0\u05EA \u05D4\u05D2\u05D5\u05D3\u05DC \u05E9\u05DC \u05DB\u05DC \u05D4\u05E0\u05D5\u05E1\u05D7\u05D0\u05D5\u05EA (\u05D9\u05D7\u05E1\u05D9\u05EA \u05DC\u05D8\u05E7\u05E1\u05D8 \u05D4\u05E1\u05DE\u05D5\u05DA) \u05D1\u05BE:",NonZeroScale:"\u05D4\u05D2\u05D5\u05D3\u05DC \u05DC\u05D0 \u05D9\u05DB\u05D5\u05DC \u05DC\u05D4\u05D9\u05D5 \u05D0\u05E4\u05E1",PercentScale:"\u05D4\u05D2\u05D5\u05D3\u05DC \u05D0\u05DE\u05D5\u05E8 \u05DC\u05D4\u05D9\u05D5\u05EA \u05D1\u05D0\u05D7\u05D5\u05D6\u05D9\u05DD (\u05DC\u05DE\u05E9\u05DC 120%%)",IE8warning:"\u05D6\u05D4 \u05D9\u05DB\u05D1\u05D4 \u05D0\u05EA \u05EA\u05E4\u05E8\u05D9\u05D8 MathJax \u05D5\u05D0\u05EA \u05D0\u05E4\u05E9\u05E8\u05D5\u05D9\u05D5\u05EA \u05D4\u05E7\u05D9\u05E8\u05D5\u05D1, \u05D0\u05D1\u05DC \u05D0\u05E4\u05E9\u05E8 \u05DC\u05E2\u05E9\u05D5\u05EA \u05DC\u05D7\u05D9\u05E6\u05D4 \u05E2\u05DD Alt \u05E2\u05DC \u05D1\u05D9\u05D8\u05D5\u05D9 \u05DB\u05D3\u05D9 \u05DC\u05E7\u05D1\u05DC \u05D8\u05EA \u05EA\u05E4\u05E8\u05D9\u05D8 M\uFFFDathJax \u05D1\u05DE\u05E7\u05D5\u05DD \u05D6\u05D4.\n\n\u05D4\u05D0\u05DD \u05D1\u05D0\u05DE\u05EA \u05DC\u05E9\u05E0\u05D5\u05EA \u05D0\u05EA \u05D4\u05D4\u05D2\u05D3\u05E8\u05D5\u05EA \u05E9\u05DC MathJax?",IE9warning:"\u05EA\u05E4\u05E8\u05D9\u05D8 \u05D4\u05D4\u05E7\u05E9\u05E8 \u05E9\u05DC MathJax \u05D9\u05DB\u05D5\u05D1\u05D4, \u05D0\u05D1\u05DC \u05D0\u05E4\u05E9\u05E8 \u05DC\u05E2\u05E9\u05D5\u05EA \u05DC\u05D7\u05D9\u05E6\u05D4 \u05E2\u05DD Alt \u05E2\u05DC \u05D1\u05D9\u05D8\u05D5\u05D7 \u05DB\u05D3\u05D9 \u05DC\u05E7\u05D1\u05DC \u05D0\u05EA \u05EA\u05E4\u05E8\u05D9\u05D8 MathJax.",NoOriginalForm:"\u05D4\u05E6\u05D5\u05E8\u05D4 \u05D4\u05DE\u05E7\u05D5\u05E8\u05D9\u05EA \u05D0\u05D9\u05E0\u05D4 \u05D6\u05DE\u05D9\u05E0\u05D4",Close:"\u05E1\u05D2\u05D9\u05E8\u05D4",EqSource:"\u05DE\u05E7\u05D5\u05E8 \u05D4\u05DE\u05E9\u05D5\u05D5\u05D0\u05D4 \u05E9\u05DC MathJax",CloseAboutDialog:"\u05E1\u05D2\u05D9\u05E8\u05EA \u05D7\u05DC\u05D5\u05DF \u05D3\u05D5\u05BE\u05E9\u05D9\u05D7 \u05D0\u05D5\u05D3\u05D5\u05EA MathJax",FastPreview:"\u05EA\u05E6\u05D5\u05D2\u05D4 \u05DE\u05E7\u05D3\u05D9\u05DE\u05D4 \u05DE\u05D4\u05D9\u05E8\u05D4",AssistiveMML:"MathML \u05E0\u05D2\u05D9\u05E9",InTabOrder:"\u05DC\u05DB\u05DC\u05D5\u05DC \u05D1\u05E1\u05D3\u05E8 \u05D4\u05D8\u05D0\u05D1\u05D9\u05DD"}});MathJax.Ajax.loadComplete("[MathJax]/localization/he/MathMenu.js"); | PypiClean |
/django-uni-form-0.9.0.tar.gz/django-uni-form-0.9.0/uni_form/helper.py | from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.safestring import mark_safe
from utils import render_field
class FormHelpersException(Exception):
"""
This is raised when building a form via helpers throws an error.
We want to catch form helper errors as soon as possible because
debugging templatetags is never fun.
"""
pass
class FormHelper(object):
"""
This class controls the form rendering behavior of the form passed to
the `{% uni_form %}` tag. For doing so you will need to set its attributes
and pass the corresponding helper object to the tag::
{% uni_form form form.helper %}
Let's see what attributes you can set and what form behaviors they apply to:
**form_method**: Specifies form method attribute.
You can see it to 'POST' or 'GET'. Defaults to 'POST'
**form_action**: Applied to the form action attribute:
- Can be a named url in your URLconf that can be executed via the `{% url %}` template tag. \
Example: 'show_my_profile'. In your URLconf you could have something like::
url(r'^show/profile/$', 'show_my_profile_view', name = 'show_my_profile')
- It can simply point to a URL '/whatever/blabla/'.
**form_id**: Generates a form id for dom identification.
If no id provided then no id attribute is created on the form.
**form_class**: String containing separated CSS clases to be applied
to form class attribute. The form will always have by default
'uniForm' class.
**form_tag**: It specifies if <form></form> tags should be rendered when using a Layout.
If set to False it renders the form without the <form></form> tags. Defaults to True.
**form_error_title**: If a form has `non_field_errors` to display, they
are rendered in a div. You can set title's div with this attribute.
Example: "Oooops!" or "Form Errors"
**formset_error_title**: If a formset has `non_form_errors` to display, they
are rendered in a div. You can set title's div with this attribute.
**form_style**: Uni-form has two built in different form styles. You can choose
your favorite. This can be set to "default" or "inline". Defaults to "default".
Public Methods:
**add_input(input)**: You can add input buttons using this method. Inputs
added using this method will be rendered at the end of the form/formset.
**add_layout(layout)**: You can add a `Layout` object to `FormHelper`. The Layout
specifies in a simple, clean and DRY way how the form fields should be rendered.
You can wrap fields, order them, customize pretty much anything in the form.
Best way to add a helper to a form is adding a property named helper to the form
that returns customized `FormHelper` object::
from uni_form import helpers
class MyForm(forms.Form):
title = forms.CharField(_("Title"))
@property
def helper(self):
helper = helpers.FormHelper()
helper.form_id = 'this-form-rocks'
helper.form_class = 'search'
submit = helpers.Submit('submit','Submit')
helper.add_input(submit)
[...]
return helper
You can use it in a template doing::
{% load uni_form_tags %}
<html>
<body>
<div id="where-I-want-the-generated-form">
{% uni_form form form.helper %}
</div>
</body>
</html>
"""
_form_method = 'post'
_form_action = ''
_form_style = 'default'
form_id = ''
form_class = ''
inputs = []
layout = None
form_tag = True
form_error_title = None
formset_error_title = None
def __init__(self):
self.inputs = self.inputs[:]
def get_form_method(self):
return self._form_method
def set_form_method(self, method):
if method.lower() not in ('get', 'post'):
raise FormHelpersException('Only GET and POST are valid in the \
form_method helper attribute')
self._form_method = method.lower()
# we set properties the old way because we want to support pre-2.6 python
form_method = property(get_form_method, set_form_method)
def get_form_action(self):
try:
return reverse(self._form_action)
except NoReverseMatch:
return self._form_action
def set_form_action(self, action):
self._form_action = action
# we set properties the old way because we want to support pre-2.6 python
form_action = property(get_form_action, set_form_action)
def get_form_style(self):
if self._form_style == "default":
return ''
if self._form_style == "inline":
return 'inlineLabels'
def set_form_style(self, style):
if style.lower() not in ('default', 'inline'):
raise FormHelpersException('Only default and inline are valid in the \
form_style helper attribute')
self._form_style = style.lower()
form_style = property(get_form_style, set_form_style)
def add_input(self, input_object):
self.inputs.append(input_object)
def add_layout(self, layout):
self.layout = layout
def render_layout(self, form, context):
"""
Returns safe html of the rendering of the layout
"""
form.rendered_fields = []
html = self.layout.render(form, self.form_style, context)
for field in form.fields.keys():
if not field in form.rendered_fields:
html += render_field(field, form, self.form_style, context)
return mark_safe(html)
def get_attributes(self):
"""
Used by the uni_form_tags to get helper attributes
"""
items = {}
items['form_method'] = self.form_method.strip()
items['form_tag'] = self.form_tag
items['form_style'] = self.form_style.strip()
if self.form_action:
items['form_action'] = self.form_action.strip()
if self.form_id:
items['id'] = self.form_id.strip()
if self.form_class:
items['class'] = self.form_class.strip()
if self.inputs:
items['inputs'] = self.inputs
if self.form_error_title:
items['form_error_title'] = self.form_error_title.strip()
if self.formset_error_title:
items['formset_error_title'] = self.formset_error_title.strip()
return items | PypiClean |
/keysync-0.2.2.tar.gz/keysync-0.2.2/otrapps/chatsecure.py | '''a module for reading and writing ChatSecure's OTR key data'''
from __future__ import print_function
import hashlib
import os
import sys
import pyjavaproperties
import subprocess
import tempfile
if __name__ == '__main__':
sys.path.insert(0, "../") # so the main() test suite can find otrapps module
import otrapps.util
class ChatSecureProperties():
path = '/data/data/info.guardianproject.otr.app.im/files/otr_keystore'
keyfile = 'otr_keystore'
encryptedkeyfile = keyfile + '.ofcaes'
files = (keyfile, encryptedkeyfile)
password = None
@staticmethod
def parse(filename):
'''parse the given file into the standard keydict'''
# the parsing and generation is done in separate passes so that
# multiple properties are combined into a single keydict per account,
# containing all of the fields
p = pyjavaproperties.Properties()
p.load(open(filename))
parsed = []
for item in p.items():
propkey = item[0]
if propkey.endswith('.publicKey'):
id = '.'.join(propkey.split('.')[0:-1])
parsed.append(('public-key', id, item[1]))
elif propkey.endswith('.publicKey.verified'):
keylist = propkey.split('.')
fingerprint = keylist[-3]
id = '.'.join(keylist[0:-3])
parsed.append(('verified', id, fingerprint))
elif propkey.endswith('.privateKey'):
id = '.'.join(propkey.split('.')[0:-1])
parsed.append(('private-key', id, item[1]))
# create blank keys for all IDs
keydict = dict()
for keydata in parsed:
name = keydata[1]
if not name in keydict:
keydict[name] = dict()
keydict[name]['name'] = name
keydict[name]['protocol'] = 'prpl-jabber'
if keydata[0] == 'private-key':
cleaned = keydata[2].replace('\\n', '')
numdict = otrapps.util.ParsePkcs8(cleaned)
for num in ('g', 'p', 'q', 'x'):
keydict[name][num] = numdict[num]
elif keydata[0] == 'verified':
keydict[name]['verification'] = 'verified'
fingerprint = keydata[2].lower()
otrapps.util.check_and_set(keydict[name], 'fingerprint', fingerprint)
elif keydata[0] == 'public-key':
cleaned = keydata[2].replace('\\n', '')
numdict = otrapps.util.ParseX509(cleaned)
for num in ('y', 'g', 'p', 'q'):
keydict[name][num] = numdict[num]
fingerprint = otrapps.util.fingerprint((numdict['y'], numdict['g'], numdict['p'], numdict['q']))
otrapps.util.check_and_set(keydict[name], 'fingerprint', fingerprint)
return keydict
@staticmethod
def write(keydict, savedir, password=None):
'''given a keydict, generate a chatsecure file in the savedir'''
p = pyjavaproperties.Properties()
for name, key in keydict.items():
# only include XMPP keys, since ChatSecure only supports XMPP
# accounts, so we avoid spreading private keys around
if key['protocol'] == 'prpl-jabber' or key['protocol'] == 'prpl-bonjour':
if 'y' in key:
p.setProperty(key['name'] + '.publicKey', otrapps.util.ExportDsaX509(key))
if 'x' in key:
if not password:
h = hashlib.sha256()
h.update(os.urandom(16)) # salt
h.update(bytes(key['x']))
password = h.digest().encode('base64')
p.setProperty(key['name'] + '.privateKey', otrapps.util.ExportDsaPkcs8(key))
if 'fingerprint' in key:
p.setProperty(key['name'] + '.fingerprint', key['fingerprint'])
if 'verification' in key and key['verification'] != None:
p.setProperty(key['name'] + '.' + key['fingerprint'].lower()
+ '.publicKey.verified', 'true')
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
p.store(f)
# if there is no password, then one has not been set, or there
# are not private keys included in the file, so its a lower
# risk file. Encryption only needs to protect the meta data,
# not the private keys. Therefore, its not as bad to generate
# a "random" password here
if not password:
password = os.urandom(32).encode('base64')
# create passphrase file from the first private key
cmd = ['openssl', 'aes-256-cbc', '-pass', 'stdin', '-in', filename,
'-out', os.path.join(savedir, 'otr_keystore.ofcaes')]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
ChatSecureProperties.password = password
print((p.communicate(password)))
@staticmethod
def _decrypt_ofcaes(ofcaes_filename, password):
''' Decrypt an encrypted key file (with user-supplied password).'''
# It might be a bad idea to write out this unencrypted file.
# get a tmp place to put the decrypted file
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.close()
# same as above, but with the -d flag to decrypt
cmd = ['openssl', 'aes-256-cbc', '-d', '-pass', 'stdin', '-in', ofcaes_filename,
'-out', filename]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.communicate(password)
return filename
#------------------------------------------------------------------------------#
# for testing from the command line:
def main(argv):
import pprint
print('ChatSecure stores its files in ' + ChatSecureProperties.path)
if len(sys.argv) == 2:
settingsfile = sys.argv[1]
else:
settingsfile = '../tests/chatsecure/otr_keystore'
p = ChatSecureProperties.parse(settingsfile)
print('----------------------------------------')
pprint.pprint(p)
print('----------------------------------------')
if __name__ == "__main__":
main(sys.argv[1:]) | PypiClean |
/specter_warden-0.6.0.6-py3-none-any.whl/warden/warden_decorators.py | import collections
import functools
import hashlib
import inspect
import os
import time
from functools import wraps
from glob import glob
import pandas as pd
def clean_all():
# pandas memoization clean
del_cached()
def pd_cache(func):
# Caches a Pandas DF into file for later use
# Memoization version for pandas DF
try:
os.mkdir('.pd_cache')
except Exception:
pass
@wraps(func)
def cache(*args, **kw):
# Get raw code of function as str and hash it
func_code = ''.join(inspect.getsourcelines(func)[0]).encode('utf-8')
hsh = hashlib.md5(func_code).hexdigest()[:6]
f = '.pd_cache/' + func.__name__ + '_' + hsh + '.pkl'
if os.path.exists(f):
df = pd.read_pickle(f)
return df
# Delete any file name that has `cached_[func_name]_[6_chars]_.pkl`
for cached in glob('./.pd_cache/'+func.__name__+'_*.pkl'):
if (len(cached) - len(func.__name__)) == 20:
os.remove(cached)
# Write new
df = func(*args, **kw)
df.to_pickle(f)
return df
return cache
def del_cached():
cached = os.listdir('./.pd_cache/')
if len(cached) > 0:
[os.remove(x) for x in cached]
class memoized(object):
# Decorator. Caches a function's return value each time it is called.
# If called later with the same arguments, the cached value is returned
# (not reevaluated).
def __init__(self, func):
# Initiliaze Memoization for this function
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
return self.func.__doc__
def __get__(self, obj, objtype):
return functools.partial(self.__call__, obj)
# Clears the cache - called when there are changes that may affect the result
# of the function
def clear(self):
self.cache = {}
class MWT(object):
# Decorator that caches the result of a function until a given timeout (in seconds)
# Helpful when running complicated calculations that are used more than once
# Source: http://code.activestate.com/recipes/325905-memoize-decorator-with-timeout/
_caches = {}
_timeouts = {}
def __init__(self, timeout=2):
self.timeout = timeout
def collect(self):
# Clear cache of results which have timed out
for func in self._caches:
cache = {}
for key in self._caches[func]:
if (time.time() -
self._caches[func][key][1]) < self._timeouts[func]:
cache[key] = self._caches[func][key]
self._caches[func] = cache
def __call__(self, f):
self.cache = self._caches[f] = {}
self._timeouts[f] = self.timeout
def func(*args, **kwargs):
kw = sorted(kwargs.items())
key = (args, tuple(kw))
try:
# Using memoized function only if still on time
v = self.cache[key]
if (time.time() - v[1]) > self.timeout:
raise KeyError
except (KeyError, TypeError):
# Need to recalculate
try:
v = self.cache[key] = f(*args, **kwargs), time.time()
except TypeError: # Some args passed as list return TypeError, skip
return (f(*args, **kwargs))
return v[0]
func.func_name = f.__name__
return func
def timing(method):
def timed(*args, **kw):
try:
print('\033[1;37;40m[TIMING STARTED] \033[92mFunction ',
method.__name__)
print(' Args: ', *args)
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('\033[1;37;40m[TIME RESULT] \033[92mFunction ',
method.__name__, '\033[95mtime:', round((te - ts) * 1000,
1), 'ms')
print("\033[1;37;40m")
return result
except Exception:
pass
return timed | PypiClean |
/torch_raspi-0.4.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/instancenorm.py | from .batchnorm import _BatchNorm
from .. import functional as F
class _InstanceNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=False,
track_running_stats=False):
super(_InstanceNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
return NotImplemented
def _load_from_state_dict(self, state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs):
try:
version = state_dict._metadata[prefix[:-1]]["version"]
except (AttributeError, KeyError):
version = None
# at version 1: removed running_mean and running_var when
# track_running_stats=False (default)
if version is None and not self.track_running_stats:
running_stats_keys = []
for name in ('running_mean', 'running_var'):
key = prefix + name
if key in state_dict:
running_stats_keys.append(key)
if len(running_stats_keys) > 0:
error_msgs.append(
'Unexpected running stats buffer(s) {names} for {klass} '
'with track_running_stats=False. If state_dict is a '
'checkpoint saved before 0.4.0, this may be expected '
'because {klass} does not track running stats by default '
'since 0.4.0. Please remove these keys from state_dict. If '
'the running stats are actually needed, instead set '
'track_running_stats=True in {klass} to enable them. See '
'the documentation of {klass} for details.'
.format(names=" and ".join('"{}"'.format(k) for k in running_stats_keys),
klass=self.__class__.__name__))
for key in running_stats_keys:
state_dict.pop(key)
super(_InstanceNorm, self)._load_from_state_dict(
state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs)
def forward(self, input):
self._check_input_dim(input)
return F.instance_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
class InstanceNorm1d(_InstanceNorm):
r"""Applies Instance Normalization over a 2D or 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, L)`
- Output: :math:`(N, C, L)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm1d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm1d(100, affine=True)
>>> input = torch.randn(20, 100, 40)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 3:
raise ValueError('expected 3D input (got {}D input)'
.format(input.dim()))
class InstanceNorm2d(_InstanceNorm):
r"""Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm2d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm2d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class InstanceNorm3d(_InstanceNorm):
r"""Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size C (where C is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm3d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm3d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim())) | PypiClean |
/networklab-1.6.2-py3-none-any.whl/netsim/modules/isis.py | from box import Box
from . import _Module,_routing
from . import bfd
from ..utils import log
from ..augment import devices
from ..data import validate
def isis_unnumbered(node: Box, features: Box) -> bool:
for af in ('ipv4','ipv6'):
is_unnumbered = False
for l in node.get('interfaces',[]):
is_unnumbered = is_unnumbered or \
'unnumbered' in l or \
(af in l and isinstance(l[af],bool) and l[af])
if is_unnumbered and not features.isis.unnumbered[af]:
log.error(
f'Device {node.device} used on node {node.name} cannot run IS-IS over {"unnumbered" if af == "ipv4" else "LLA"} {af} interfaces',
log.IncorrectValue,
'interfaces')
return False
OK = True
for l in node.get('interfaces',[]):
unnum_v4 = 'unnumbered' in l or ('ipv4' in l and l.ipv4 is True)
if unnum_v4 and \
len(l.neighbors) > 1 and \
not features.isis.unnumbered.network:
log.error(
f'Device {node.device} used on node {node.name} cannot run IS-IS over\n'+
f'.. unnumbered multi-access interfaces (link {l.name})',
log.IncorrectValue,
'interfaces')
OK = False
return OK
class ISIS(_Module):
def node_post_transform(self, node: Box, topology: Box) -> None:
isis_type = [ 'level-1', 'level-2', 'level-1-2' ]
features = devices.get_device_features(node,topology.defaults)
validate.must_be_string(
node,'isis.type',f'nodes.{node.name}',module='isis',valid_values=isis_type)
if not isis_unnumbered(node,features):
return
bfd.multiprotocol_bfd_link_state(node,'isis')
for l in node.get('interfaces',[]):
if _routing.external(l,'isis') or not (l.get('ipv4',False) or l.get('ipv6',False)):
l.pop('isis',None) # Don't run IS-IS on external interfaces, or l2-only
else:
_routing.passive(l,'isis')
err = _routing.network_type(l,'isis',['point-to-point'])
if err:
log.error(f'{err}\n... node {node.name} link {l}')
validate.must_be_string(
l,'isis.type',f'nodes.{node.name}.interfaces.{l.ifname}',module='isis',valid_values=isis_type)
#
# Final steps:
#
# * remove IS-IS from VRF interfaces
# * Calculate address families
# * Enable BFD
# * Remove ISIS module if there are no IS-IS enabled global interfaces
#
_routing.remove_unaddressed_intf(node,'isis')
_routing.remove_vrf_interfaces(node,'isis')
_routing.routing_af(node,'isis')
_routing.remove_unused_igp(node,'isis') | PypiClean |
/py-evm-0.7.0a3.tar.gz/py-evm-0.7.0a3/eth/db/header.py | import functools
from typing import (
Iterable,
Sequence,
Tuple,
cast,
)
from eth_typing import (
BlockNumber,
Hash32,
)
from eth_utils import (
ValidationError,
encode_hex,
to_tuple,
)
from eth_utils.toolz import (
concat,
first,
sliding_window,
)
import rlp
from eth.abc import (
AtomicDatabaseAPI,
BlockHeaderAPI,
DatabaseAPI,
HeaderDatabaseAPI,
)
from eth.constants import (
GENESIS_PARENT_HASH,
)
from eth.db.chain_gaps import (
GAP_WRITES,
GENESIS_CHAIN_GAPS,
GapChange,
GapInfo,
fill_gap,
reopen_gap,
)
from eth.db.schema import (
SchemaV1,
)
from eth.exceptions import (
CanonicalHeadNotFound,
CheckpointsMustBeCanonical,
HeaderNotFound,
ParentNotFound,
)
from eth.rlp.sedes import (
chain_gaps,
)
from eth.typing import (
ChainGaps,
)
from eth.validation import (
validate_block_number,
validate_word,
)
from eth.vm.header import (
HeaderSedes,
)
class HeaderDB(HeaderDatabaseAPI):
def __init__(self, db: AtomicDatabaseAPI) -> None:
self.db = db
def get_header_chain_gaps(self) -> ChainGaps:
return self._get_header_chain_gaps(self.db)
@classmethod
def _get_header_chain_gaps(cls, db: DatabaseAPI) -> ChainGaps:
try:
encoded_gaps = db[SchemaV1.make_header_chain_gaps_lookup_key()]
except KeyError:
return GENESIS_CHAIN_GAPS
else:
return rlp.decode(encoded_gaps, sedes=chain_gaps)
@classmethod
def _update_header_chain_gaps(
cls,
db: DatabaseAPI,
persisted_header: BlockHeaderAPI,
base_gaps: ChainGaps = None,
) -> GapInfo:
# If we make many updates in a row, we can avoid reloading the integrity info by
# continuously caching it and providing it as a parameter to this API
if base_gaps is None:
base_gaps = cls._get_header_chain_gaps(db)
gap_change, gaps = fill_gap(persisted_header.block_number, base_gaps)
if gap_change is not GapChange.NoChange:
db.set(
SchemaV1.make_header_chain_gaps_lookup_key(),
rlp.encode(gaps, sedes=chain_gaps),
)
return gap_change, gaps
#
# Canonical Chain API
#
def get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32:
return self._get_canonical_block_hash(self.db, block_number)
@staticmethod
def _get_canonical_block_hash(db: DatabaseAPI, block_number: BlockNumber) -> Hash32:
validate_block_number(block_number)
number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(block_number)
try:
encoded_key = db[number_to_hash_key]
except KeyError:
raise HeaderNotFound(
f"No canonical header for block number #{block_number}"
)
else:
return rlp.decode(encoded_key, sedes=rlp.sedes.binary)
def get_canonical_block_header_by_number(
self, block_number: BlockNumber
) -> BlockHeaderAPI:
return self._get_canonical_block_header_by_number(self.db, block_number)
@classmethod
def _get_canonical_block_header_by_number(
cls, db: DatabaseAPI, block_number: BlockNumber
) -> BlockHeaderAPI:
validate_block_number(block_number)
canonical_block_hash = cls._get_canonical_block_hash(db, block_number)
return cls._get_block_header_by_hash(db, canonical_block_hash)
def get_canonical_head(self) -> BlockHeaderAPI:
return self._get_canonical_head(self.db)
@classmethod
def _get_canonical_head(cls, db: DatabaseAPI) -> BlockHeaderAPI:
canonical_head_hash = cls._get_canonical_head_hash(db)
return cls._get_block_header_by_hash(db, canonical_head_hash)
@classmethod
def _get_canonical_head_hash(cls, db: DatabaseAPI) -> Hash32:
try:
return Hash32(db[SchemaV1.make_canonical_head_hash_lookup_key()])
except KeyError:
raise CanonicalHeadNotFound("No canonical head set for this chain")
#
# Header API
#
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeaderAPI:
return self._get_block_header_by_hash(self.db, block_hash)
@staticmethod
def _get_block_header_by_hash(
db: DatabaseAPI, block_hash: Hash32
) -> BlockHeaderAPI:
"""
Returns the requested block header as specified by block hash.
Raises BlockNotFound if it is not present in the db.
"""
validate_word(block_hash, title="Block Hash")
try:
header_rlp = db[block_hash]
except KeyError:
raise HeaderNotFound(f"No header with hash {encode_hex(block_hash)} found")
return _decode_block_header(header_rlp)
def get_score(self, block_hash: Hash32) -> int:
return self._get_score(self.db, block_hash)
@staticmethod
def _get_score(db: DatabaseAPI, block_hash: Hash32) -> int:
try:
encoded_score = db[SchemaV1.make_block_hash_to_score_lookup_key(block_hash)]
except KeyError:
raise HeaderNotFound(f"No header with hash {encode_hex(block_hash)} found")
return rlp.decode(encoded_score, sedes=rlp.sedes.big_endian_int)
def header_exists(self, block_hash: Hash32) -> bool:
return self._header_exists(self.db, block_hash)
@staticmethod
def _header_exists(db: DatabaseAPI, block_hash: Hash32) -> bool:
validate_word(block_hash, title="Block Hash")
return block_hash in db
def persist_header(
self, header: BlockHeaderAPI
) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
return self.persist_header_chain((header,))
def persist_header_chain(
self,
headers: Iterable[BlockHeaderAPI],
genesis_parent_hash: Hash32 = GENESIS_PARENT_HASH,
) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
with self.db.atomic_batch() as db:
return self._persist_header_chain(db, headers, genesis_parent_hash)
def persist_checkpoint_header(self, header: BlockHeaderAPI, score: int) -> None:
with self.db.atomic_batch() as db:
return self._persist_checkpoint_header(db, header, score)
@classmethod
def _set_hash_scores_to_db(
cls, db: DatabaseAPI, header: BlockHeaderAPI, score: int
) -> int:
difficulty = header.difficulty
new_score = (
# In PoS, difficulty = 0 and score values do not need to work the same way
score + difficulty
if difficulty != 0
else score + header.block_number
)
db.set(
SchemaV1.make_block_hash_to_score_lookup_key(header.hash),
rlp.encode(new_score, sedes=rlp.sedes.big_endian_int),
)
return new_score
@classmethod
def _persist_checkpoint_header(
cls, db: DatabaseAPI, header: BlockHeaderAPI, score: int
) -> None:
db.set(
header.hash,
rlp.encode(header),
)
# Add new checkpoint header
previous_checkpoints = cls._get_checkpoints(db)
new_checkpoints = previous_checkpoints + (header.hash,)
db.set(
SchemaV1.make_checkpoint_headers_key(),
b"".join(new_checkpoints),
)
difficulty = header.difficulty
previous_score = (
# In PoS, difficulty = 0 and score values do not need to work the same way
score - difficulty
if difficulty != 0
else score - header.block_number
)
cls._set_hash_scores_to_db(db, header, previous_score)
cls._set_as_canonical_chain_head(db, header, GENESIS_PARENT_HASH)
_, gaps = cls._update_header_chain_gaps(db, header)
# check if the parent block number exists, and is not a match
# for checkpoint.parent_hash
parent_block_num = BlockNumber(header.block_number - 1)
try:
parent_hash = cls._get_canonical_block_hash(db, parent_block_num)
except HeaderNotFound:
# no parent to check
pass
else:
# User is asserting that the checkpoint must be canonical, so if the parent
# doesn't match, then the parent must not be canonical,
# and should be de-canonicalized.
if parent_hash != header.parent_hash:
# does the correct header exist in the database?
try:
true_parent = cls._get_block_header_by_hash(db, header.parent_hash)
except HeaderNotFound:
# True parent unavailable, just delete the now non-canonical one
cls._decanonicalize_single(db, parent_block_num, gaps)
else:
# True parent should have already been canonicalized during
# _set_as_canonical_chain_head()
raise ValidationError(
f"Why was a non-matching parent header {parent_hash!r} left as "
"canonical after _set_as_canonical_chain_head() and "
f"{true_parent} is available?"
)
cls._decanonicalize_descendant_orphans(db, header, new_checkpoints)
@classmethod
def _decanonicalize_descendant_orphans(
cls, db: DatabaseAPI, header: BlockHeaderAPI, checkpoints: Tuple[Hash32, ...]
) -> None:
# Determine if any children need to be de-canonicalized because they are not
# children of the new chain head
new_gaps = starting_gaps = cls._get_header_chain_gaps(db)
child_number = BlockNumber(header.block_number + 1)
try:
child = cls._get_canonical_block_header_by_number(db, child_number)
except HeaderNotFound:
# There is no canonical block here
next_invalid_child = None
else:
if child.parent_hash != header.hash:
if child.hash in checkpoints:
raise CheckpointsMustBeCanonical(
f"Trying to decanonicalize {child} while making "
f"{header} the chain tip"
)
else:
next_invalid_child = child
else:
next_invalid_child = None
while next_invalid_child:
# decanonicalize, and add gap for tracking
db.delete(SchemaV1.make_block_number_to_hash_lookup_key(child_number))
new_gaps = reopen_gap(child_number, new_gaps)
# find next child
child_number = BlockNumber(child_number + 1)
try:
# All contiguous children must now be made invalid
next_invalid_child = cls._get_canonical_block_header_by_number(
db, child_number
)
except HeaderNotFound:
# Found the end of this streak of canonical blocks
break
else:
if next_invalid_child.hash in checkpoints:
raise CheckpointsMustBeCanonical(
f"Trying to decanonicalize {next_invalid_child} while making "
f"{header} the chain tip"
)
if new_gaps != starting_gaps:
db.set(
SchemaV1.make_header_chain_gaps_lookup_key(),
rlp.encode(new_gaps, sedes=chain_gaps),
)
@classmethod
def _decanonicalize_single(
cls, db: DatabaseAPI, block_num: BlockNumber, base_gaps: ChainGaps
) -> ChainGaps:
"""
A single block number was found to no longer be canonical. At doc-time,
this only happens because it does not link up with a checkpoint header.
So de-canonicalize this block number and insert a gap in the tracked
chain gaps.
"""
db.delete(SchemaV1.make_block_number_to_hash_lookup_key(block_num))
new_gaps = reopen_gap(block_num, base_gaps)
if new_gaps != base_gaps:
db.set(
SchemaV1.make_header_chain_gaps_lookup_key(),
rlp.encode(new_gaps, sedes=chain_gaps),
)
return new_gaps
@classmethod
def _persist_header_chain(
cls,
db: DatabaseAPI,
headers: Iterable[BlockHeaderAPI],
genesis_parent_hash: Hash32,
) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
headers_iterator = iter(headers)
try:
first_header = first(headers_iterator)
except StopIteration:
return (), ()
is_genesis = first_header.parent_hash == genesis_parent_hash
if not is_genesis and not cls._header_exists(db, first_header.parent_hash):
raise ParentNotFound(
f"Cannot persist block header ({encode_hex(first_header.hash)}) "
f"with unknown parent ({encode_hex(first_header.parent_hash)})"
)
if is_genesis:
score = 0
else:
score = cls._get_score(db, first_header.parent_hash)
curr_chain_head = first_header
db.set(
curr_chain_head.hash,
rlp.encode(curr_chain_head),
)
score = cls._set_hash_scores_to_db(db, curr_chain_head, score)
base_gaps = cls._get_header_chain_gaps(db)
gap_info = cls._update_header_chain_gaps(db, curr_chain_head, base_gaps)
gaps = cls._handle_gap_change(
db, gap_info, curr_chain_head, genesis_parent_hash
)
orig_headers_seq = concat([(first_header,), headers_iterator])
for parent, child in sliding_window(2, orig_headers_seq):
if parent.hash != child.parent_hash:
raise ValidationError(
f"Non-contiguous chain. Expected {encode_hex(child.hash)} "
f"to have {encode_hex(parent.hash)} as parent "
f"but was {encode_hex(child.parent_hash)}"
)
curr_chain_head = child
db.set(
curr_chain_head.hash,
rlp.encode(curr_chain_head),
)
score = cls._set_hash_scores_to_db(db, curr_chain_head, score)
gap_info = cls._update_header_chain_gaps(db, curr_chain_head, gaps)
gaps = cls._handle_gap_change(
db, gap_info, curr_chain_head, genesis_parent_hash
)
try:
previous_canonical_head = cls._get_canonical_head_hash(db)
head_score = cls._get_score(db, previous_canonical_head)
except CanonicalHeadNotFound:
return cls._set_as_canonical_chain_head(
db, curr_chain_head, genesis_parent_hash
)
if score > head_score:
return cls._set_as_canonical_chain_head(
db, curr_chain_head, genesis_parent_hash
)
return (), ()
@classmethod
def _handle_gap_change(
cls,
db: DatabaseAPI,
gap_info: GapInfo,
header: BlockHeaderAPI,
genesis_parent_hash: Hash32,
) -> ChainGaps:
gap_change, gaps = gap_info
if gap_change not in GAP_WRITES:
return gaps
# Check if this change will link up the chain to the right
if gap_change in (GapChange.GapFill, GapChange.GapRightShrink):
next_child_number = BlockNumber(header.block_number + 1)
expected_child = cls._get_canonical_block_header_by_number(
db, next_child_number
)
if header.hash != expected_child.parent_hash:
# Must not join a canonical chain that is not linked from parent to
# child. If the child is a checkpoint, reject this fill as an uncle.
checkpoints = cls._get_checkpoints(db)
if expected_child.hash in checkpoints:
raise CheckpointsMustBeCanonical(
f"Cannot make {header} canonical, because it is not the parent "
f"of declared checkpoint: {expected_child}"
)
else:
# If the child is *not* a checkpoint,
# then re-open a gap in the chain
gaps = cls._decanonicalize_single(
db, expected_child.block_number, gaps
)
# We implicitly assert that persisted headers are canonical here.
# This assertion is made when persisting headers that are known to be part of a
# gap in the canonical chain.
# What if this assertion is later found to be false? At gap fill time, we can
# detect if the chains don't link (and raise a ValidationError). Also, when a
# true canonical header is added eventually, we need to canonicalize all the
# true headers.
cls._canonicalize_header(db, header, genesis_parent_hash)
return gaps
@classmethod
def _canonicalize_header(
cls,
db: DatabaseAPI,
header: BlockHeaderAPI,
genesis_parent_hash: Hash32,
) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
"""
Force this header to be canonical,
and adjust its ancestors/descendants as necessary
:raises CheckpointsMustBeCanonical: if trying to set a head that would
de-canonicalize a checkpoint
"""
new_canonical_headers = cast(
Tuple[BlockHeaderAPI, ...],
tuple(reversed(cls._find_new_ancestors(db, header, genesis_parent_hash))),
)
old_canonical_headers = cls._find_headers_to_decanonicalize(
db,
[h.block_number for h in new_canonical_headers],
)
# Reject if this would make a checkpoint non-canonical
checkpoints = cls._get_checkpoints(db)
attempted_checkpoint_overrides = {
old for old in old_canonical_headers if old.hash in checkpoints
}
if len(attempted_checkpoint_overrides):
raise CheckpointsMustBeCanonical(
"Tried to switch chain away from checkpoint(s) "
f"{attempted_checkpoint_overrides!r} by inserting new canonical "
f"headers {new_canonical_headers}"
)
for ancestor in new_canonical_headers:
cls._add_block_number_to_hash_lookup(db, ancestor)
if len(new_canonical_headers):
cls._decanonicalize_descendant_orphans(
db, new_canonical_headers[-1], checkpoints
)
return new_canonical_headers, old_canonical_headers
@classmethod
def _set_as_canonical_chain_head(
cls,
db: DatabaseAPI,
header: BlockHeaderAPI,
genesis_parent_hash: Hash32,
) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
"""
Sets the canonical chain HEAD to the block header as specified by the
given block hash.
:return: a tuple of the headers that are newly in the canonical chain, and the
headers that are no longer in the canonical chain
:raises CheckpointsMustBeCanonical: if trying to set a head that would
de-canonicalize a checkpoint
"""
try:
current_canonical_head = cls._get_canonical_head_hash(db)
except CanonicalHeadNotFound:
current_canonical_head = None
new_canonical_headers: Tuple[BlockHeaderAPI, ...]
old_canonical_headers: Tuple[BlockHeaderAPI, ...]
if current_canonical_head and header.parent_hash == current_canonical_head:
# the calls to _find_new_ancestors and _find_headers_to_decanonicalize are
# relatively expensive, it's better to skip them in this case, where we're
# extending the canonical chain by a header
new_canonical_headers = (header,)
old_canonical_headers = ()
cls._add_block_number_to_hash_lookup(db, header)
else:
(
new_canonical_headers,
old_canonical_headers,
) = cls._canonicalize_header(db, header, genesis_parent_hash)
db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash)
return new_canonical_headers, old_canonical_headers
@classmethod
def _get_checkpoints(cls, db: DatabaseAPI) -> Tuple[Hash32, ...]:
concatenated_checkpoints = db.get(SchemaV1.make_checkpoint_headers_key())
if concatenated_checkpoints is None:
return ()
else:
return tuple(
Hash32(concatenated_checkpoints[index : index + 32])
for index in range(0, len(concatenated_checkpoints), 32)
)
@classmethod
@to_tuple
def _find_headers_to_decanonicalize(
cls, db: DatabaseAPI, numbers_to_decanonicalize: Sequence[BlockNumber]
) -> Iterable[BlockHeaderAPI]:
for block_number in numbers_to_decanonicalize:
try:
old_canonical_hash = cls._get_canonical_block_hash(db, block_number)
except HeaderNotFound:
# no old_canonical block, but due to checkpointing, more may be possible
continue
else:
yield cls._get_block_header_by_hash(db, old_canonical_hash)
@classmethod
@to_tuple
def _find_new_ancestors(
cls, db: DatabaseAPI, header: BlockHeaderAPI, genesis_parent_hash: Hash32
) -> Iterable[BlockHeaderAPI]:
"""
Returns the chain leading up from the given header until (but not including)
the first ancestor it has in common with our canonical chain.
If D is the canonical head in the following chain, and F is the new header,
then this function returns (F, E).
A - B - C - D
\
E - F
"""
h = header
while True:
try:
orig = cls._get_canonical_block_header_by_number(db, h.block_number)
except HeaderNotFound:
# This just means the block is not on the canonical chain.
pass
else:
if orig.hash == h.hash:
# Found the common ancestor, stop.
break
# Found a new ancestor
yield h
if h.parent_hash == genesis_parent_hash:
break
else:
try:
h = cls._get_block_header_by_hash(db, h.parent_hash)
except HeaderNotFound:
# We must have hit a checkpoint parent, return early
break
@staticmethod
def _add_block_number_to_hash_lookup(
db: DatabaseAPI, header: BlockHeaderAPI
) -> None:
"""
Sets a record in the database to allow looking up this header by its
block number.
"""
block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(
header.block_number
)
db.set(
block_number_to_hash_key,
rlp.encode(header.hash, sedes=rlp.sedes.binary),
)
# When performing a chain sync (either fast or regular modes), we'll very often need to
# look up recent block headers to validate the chain, and decoding their RLP
# representation is relatively expensive so we cache that here, but use a small cache
# because we *should* only be looking up recent blocks.
@functools.lru_cache(128)
def _decode_block_header(header_rlp: bytes) -> BlockHeaderAPI:
# Use a deserialization class that can handle any type of header.
# This feels a little hack-y, but we don't know the shape of the header
# at this point. It could be a pre-London header, or a post-London
# header, which includes the base fee. So we use a class that knows how to
# decode both.
return rlp.decode(header_rlp, sedes=HeaderSedes) | PypiClean |
/hera_workflows-5.6.0-py3-none-any.whl/hera/workflows/models/io/k8s/apimachinery/pkg/apis/meta/v1.py |
from __future__ import annotations
from datetime import datetime
from typing import Dict, List, Optional
from hera.shared._base_model import BaseModel
from pydantic import Field
class CreateOptions(BaseModel):
dry_run: Optional[List[str]] = Field(
default=None,
alias="dryRun",
title=(
"When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun"
" directive will\nresult in an error response and no further processing of the\nrequest. Valid values"
" are:\n- All: all dry run stages will be processed\n+optional"
),
)
field_manager: Optional[str] = Field(
default=None,
alias="fieldManager",
title=(
"fieldManager is a name associated with the actor or entity\nthat is making these changes. The value must"
" be less than or\n128 characters long, and only contain printable characters,\nas defined by"
" https://golang.org/pkg/unicode/#IsPrint.\n+optional"
),
)
field_validation: Optional[str] = Field(
default=None,
alias="fieldValidation",
title=(
"fieldValidation instructs the server on how to handle\nobjects in the request (POST/PUT/PATCH) containing"
" unknown\nor duplicate fields, provided that the `ServerSideFieldValidation`\nfeature gate is also"
" enabled. Valid values are:\n- Ignore: This will ignore any unknown fields that are silently\ndropped"
" from the object, and will ignore all but the last duplicate\nfield that the decoder encounters. This is"
" the default behavior\nprior to v1.23 and is the default behavior when the\n`ServerSideFieldValidation`"
" feature gate is disabled.\n- Warn: This will send a warning via the standard warning response\nheader"
" for each unknown field that is dropped from the object, and\nfor each duplicate field that is"
" encountered. The request will\nstill succeed if there are no other errors, and will only persist\nthe"
" last of any duplicate fields. This is the default when the\n`ServerSideFieldValidation` feature gate is"
" enabled.\n- Strict: This will fail the request with a BadRequest error if\nany unknown fields would be"
" dropped from the object, or if any\nduplicate fields are present. The error returned from the"
" server\nwill contain all unknown and duplicate fields encountered.\n+optional"
),
)
class FieldsV1(BaseModel):
pass
class GroupVersionResource(BaseModel):
group: Optional[str] = None
resource: Optional[str] = None
version: Optional[str] = None
class LabelSelectorRequirement(BaseModel):
key: str = Field(..., description="key is the label key that the selector applies to.")
operator: str = Field(
...,
description=(
"operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and"
" DoesNotExist."
),
)
values: Optional[List[str]] = Field(
default=None,
description=(
"values is an array of string values. If the operator is In or NotIn, the values array must be non-empty."
" If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during"
" a strategic merge patch."
),
)
class ListMeta(BaseModel):
continue_: Optional[str] = Field(
default=None,
alias="continue",
description=(
"continue may be set if the user set a limit on the number of items returned, and indicates that the"
" server has more data available. The value is opaque and may be used to issue another request to the"
" endpoint that served this list to retrieve the next set of available objects. Continuing a consistent"
" list may not be possible if the server configuration has changed or more than a few minutes have passed."
" The resourceVersion field returned when using this continue value will be identical to the value in the"
" first response, unless you have received this token from an error message."
),
)
remaining_item_count: Optional[int] = Field(
default=None,
alias="remainingItemCount",
description=(
"remainingItemCount is the number of subsequent items in the list which are not included in this list"
" response. If the list request contained label or field selectors, then the number of remaining items is"
" unknown and the field will be left unset and omitted during serialization. If the list is complete"
" (either because it is not chunking or because this is the last chunk), then there are no more remaining"
" items and this field will be left unset and omitted during serialization. Servers older than v1.15 do"
" not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection."
" Clients should not rely on the remainingItemCount to be set or to be exact."
),
)
resource_version: Optional[str] = Field(
default=None,
alias="resourceVersion",
description=(
"String that identifies the server's internal version of this object that can be used by clients to"
" determine when objects have changed. Value must be treated as opaque by clients and passed unmodified"
" back to the server. Populated by the system. Read-only. More info:"
" https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"
),
)
self_link: Optional[str] = Field(
default=None,
alias="selfLink",
description=(
"selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes"
" will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."
),
)
class MicroTime(BaseModel):
__root__: datetime = Field(..., description="MicroTime is version of Time with microsecond level precision.")
class OwnerReference(BaseModel):
api_version: str = Field(..., alias="apiVersion", description="API version of the referent.")
block_owner_deletion: Optional[bool] = Field(
default=None,
alias="blockOwnerDeletion",
description=(
'If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from'
" the key-value store until this reference is removed. Defaults to false. To set this field, a user needs"
' "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.'
),
)
controller: Optional[bool] = Field(
default=None, description="If true, this reference points to the managing controller."
)
kind: str = Field(
...,
description=(
"Kind of the referent. More info:"
" https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
),
)
name: str = Field(
..., description="Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names"
)
uid: str = Field(
..., description="UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
)
class StatusCause(BaseModel):
field: Optional[str] = Field(
default=None,
description=(
"The field of the resource that has caused this error, as named by its JSON serialization. May include dot"
" and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once"
' in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n "name" - the field'
' "name" on the current resource\n "items[0].name" - the field "name" on the first array entry in "items"'
),
)
message: Optional[str] = Field(
default=None,
description=(
"A human-readable description of the cause of the error. This field may be presented as-is to a reader."
),
)
reason: Optional[str] = Field(
default=None,
description=(
"A machine-readable description of the cause of the error. If this value is empty there is no information"
" available."
),
)
class Time(BaseModel):
__root__: datetime = Field(
...,
description=(
"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are"
" provided for many of the factory methods that the time package offers."
),
)
class LabelSelector(BaseModel):
match_expressions: Optional[List[LabelSelectorRequirement]] = Field(
default=None,
alias="matchExpressions",
description="matchExpressions is a list of label selector requirements. The requirements are ANDed.",
)
match_labels: Optional[Dict[str, str]] = Field(
default=None,
alias="matchLabels",
description=(
"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to"
' an element of matchExpressions, whose key field is "key", the operator is "In", and the values array'
' contains only "value". The requirements are ANDed.'
),
)
class ManagedFieldsEntry(BaseModel):
api_version: Optional[str] = Field(
default=None,
alias="apiVersion",
description=(
"APIVersion defines the version of this resource that this field set applies to. The format is"
' "group/version" just like the top-level APIVersion field. It is necessary to track the version of a'
" field set because it cannot be automatically converted."
),
)
fields_type: Optional[str] = Field(
default=None,
alias="fieldsType",
description=(
"FieldsType is the discriminator for the different fields format and version. There is currently only one"
' possible value: "FieldsV1"'
),
)
fields_v1: Optional[FieldsV1] = Field(
default=None,
alias="fieldsV1",
description='FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.',
)
manager: Optional[str] = Field(
default=None, description="Manager is an identifier of the workflow managing these fields."
)
operation: Optional[str] = Field(
default=None,
description=(
"Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid"
" values for this field are 'Apply' and 'Update'."
),
)
subresource: Optional[str] = Field(
default=None,
description=(
"Subresource is the name of the subresource used to update that object, or empty string if the object was"
" updated through the main resource. The value of this field is used to distinguish between managers, even"
" if they share the same name. For example, a status update will be distinct from a regular update using"
" the same manager name. Note that the APIVersion field is not related to the Subresource field and it"
" always corresponds to the version of the main resource."
),
)
time: Optional[Time] = Field(
default=None,
description=(
"Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'"
),
)
class ObjectMeta(BaseModel):
annotations: Optional[Dict[str, str]] = Field(
default=None,
description=(
"Annotations is an unstructured key value map stored with a resource that may be set by external tools to"
" store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying"
" objects. More info: http://kubernetes.io/docs/user-guide/annotations"
),
)
cluster_name: Optional[str] = Field(
default=None,
alias="clusterName",
description=(
"The name of the cluster which the object belongs to. This is used to distinguish resources with same name"
" and namespace in different clusters. This field is not set anywhere right now and apiserver is going to"
" ignore it if set in create or update request."
),
)
creation_timestamp: Optional[Time] = Field(
default=None,
alias="creationTimestamp",
description=(
"CreationTimestamp is a timestamp representing the server time when this object was created. It is not"
" guaranteed to be set in happens-before order across separate operations. Clients may not set this value."
" It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists."
" More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
),
)
deletion_grace_period_seconds: Optional[int] = Field(
default=None,
alias="deletionGracePeriodSeconds",
description=(
"Number of seconds allowed for this object to gracefully terminate before it will be removed from the"
" system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."
),
)
deletion_timestamp: Optional[Time] = Field(
default=None,
alias="deletionTimestamp",
description=(
"DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by"
" the server when a graceful deletion is requested by the user, and is not directly settable by a client."
" The resource is expected to be deleted (no longer visible from resource lists, and not reachable by"
" name) after the time in this field, once the finalizers list is empty. As long as the finalizers list"
" contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or"
" be set further into the future, although it may be shortened or the resource may be deleted prior to"
" this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react"
" by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the"
" Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod"
" from the API. In the presence of network partitions, this object may still exist after this timestamp,"
" until an administrator or automated process can determine the resource is fully terminated. If not set,"
" graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful"
" deletion is requested. Read-only. More info:"
" https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
),
)
finalizers: Optional[List[str]] = Field(
default=None,
description=(
"Must be empty before the object is deleted from the registry. Each entry is an identifier for the"
" responsible component that will remove the entry from the list. If the deletionTimestamp of the object"
" is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any"
" order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is"
" a shared field, any actor with permission can reorder it. If the finalizer list is processed in order,"
" then this can lead to a situation in which the component responsible for the first finalizer in the list"
" is waiting for a signal (field value, external system, or other) produced by a component responsible for"
" a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to"
" order amongst themselves and are not vulnerable to ordering changes in the list."
),
)
generate_name: Optional[str] = Field(
default=None,
alias="generateName",
description=(
"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field"
" has not been provided. If this field is used, the name returned to the client will be different than the"
" name passed. This value will also be combined with a unique suffix. The provided value has the same"
" validation rules as the Name field, and may be truncated by the length of the suffix required to make"
" the value unique on the server.\n\nIf this field is specified and the generated name exists, the server"
" will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout"
" indicating a unique name could not be found in the time allotted, and the client should retry"
" (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not"
" specified. More info:"
" https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"
),
)
generation: Optional[int] = Field(
default=None,
description=(
"A sequence number representing a specific generation of the desired state. Populated by the system."
" Read-only."
),
)
labels: Optional[Dict[str, str]] = Field(
default=None,
description=(
"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May"
" match selectors of replication controllers and services. More info:"
" http://kubernetes.io/docs/user-guide/labels"
),
)
managed_fields: Optional[List[ManagedFieldsEntry]] = Field(
default=None,
alias="managedFields",
description=(
"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This"
" is mostly for internal housekeeping, and users typically shouldn't need to set or understand this"
" field. A workflow can be the user's name, a controller's name, or the name of a specific apply path"
' like "ci-cd". The set of fields is always in the version that the workflow used when modifying the'
" object."
),
)
name: Optional[str] = Field(
default=None,
description=(
"Name must be unique within a namespace. Is required when creating resources, although some resources may"
" allow a client to request the generation of an appropriate name automatically. Name is primarily"
" intended for creation idempotence and configuration definition. Cannot be updated. More info:"
" http://kubernetes.io/docs/user-guide/identifiers#names"
),
)
namespace: Optional[str] = Field(
default=None,
description=(
"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to"
' the "default" namespace, but "default" is the canonical representation. Not all objects are required to'
" be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a"
" DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
),
)
owner_references: Optional[List[OwnerReference]] = Field(
default=None,
alias="ownerReferences",
description=(
"List of objects depended by this object. If ALL objects in the list have been deleted, this object will"
" be garbage collected. If this object is managed by a controller, then an entry in this list will point"
" to this controller, with the controller field set to true. There cannot be more than one managing"
" controller."
),
)
resource_version: Optional[str] = Field(
default=None,
alias="resourceVersion",
description=(
"An opaque value that represents the internal version of this object that can be used by clients to"
" determine when objects have changed. May be used for optimistic concurrency, change detection, and the"
" watch operation on a resource or set of resources. Clients must treat these values as opaque and passed"
" unmodified back to the server. They may only be valid for a particular resource or set of"
" resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More"
" info:"
" https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"
),
)
self_link: Optional[str] = Field(
default=None,
alias="selfLink",
description=(
"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes"
" will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."
),
)
uid: Optional[str] = Field(
default=None,
description=(
"UID is the unique in time and space value for this object. It is typically generated by the server on"
" successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the"
" system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
),
) | PypiClean |
/pm4ngs-0.0.21.tar.gz/pm4ngs-0.0.21/docs/source/pipelines/ubuntu.rst | .. _ubuntu:
################
PM4NGS on Ubuntu
################
Runs these commands on a terminal to to prepare the instance to run PM4NGS
.. code-block:: bash
veraalva@perseo:~$ sudo apt-get update
veraalva@perseo:~$ sudo apt-get install docker.io python3 python3-pip python3-venv python3-dev poppler-utils gcc nodejs tree
veraalva@perseo:~$ sudo usermod -aG docker $USER
Close and reopen the terminal to set the docker group in the user.
Installing PM4NGS
-----------------
Creates a Python virtual environment named: **pm4ngs_venv** for installing PM4NGS
.. code-block:: bash
veraalva@perseo:~$ python3 -m venv pm4ngs_venv
veraalva@perseo:~$ source pm4ngs_venv/bin/activate
(pm4ngs_venv) veraalva@perseo:~$ pip install wheel
(pm4ngs_venv) veraalva@perseo:~$ pip install pm4ngs
Using PM4NGS
------------
Open a terminal and activate the **pm4ngs_venv** virtual environment
.. code-block:: bash
veraalva@perseo:~$ source pm4ngs_venv/bin/activate
(pm4ngs_venv) veraalva@perseo:~$ pm4ngs-chipexo --version
PM4NGS version: 0.0.4
(pm4ngs_venv) veraalva@perseo:~$
Running the ChIP-exo demo
-------------------------
Open a terminal and activate the **pm4ngs_venv** virtual environment
.. code-block:: bash
veraalva@perseo:~$ source pm4ngs_venv/bin/activate
(pm4ngs_venv) veraalva@perseo:~$ pm4ngs-chipexo-demo
Generating demo for ChIP-exo data analysis project
Downloading file: pm4ngs_chipexo_demo_config.yaml
Downloading file: pm4ngs_chipexo_demo_sample_data.csv
Using config file: pm4ngs_chipexo_demo_config.yaml
{
"author_name": "Roberto Vera Alvarez",
"user_email": "[email protected]",
"project_name": "pm4ngs-chipexo",
"dataset_name": "PRJNA338159",
"is_data_in_SRA": "y",
"sequencing_technology": "single-end",
"create_demo": "n",
"number_spots": "1000000",
"organism": "Escherichia coli",
"genome_name": "NC_000913.3",
"genome_dir": "{{ cookiecutter.genome_name}}",
"aligner_index_dir": "{{ cookiecutter.genome_dir}}/BWA/",
"genome_fasta": "{{ cookiecutter.genome_dir}}/NC_000913.3.fa",
"genome_gtf": "{{ cookiecutter.genome_dir}}/NC_000913.3.gtf",
"genome_chromsizes": "{{ cookiecutter.genome_dir}}/NC_000913.3.sizes",
"use_docker": "y",
"max_number_threads": "32"
}
Cloning Git repo: https://github.com/ncbi/cwl-ngs-workflows-cbb to /home/veraalva/pm4ngs-chipexo/bin/cwl
Updating CWLs dockerPull and SoftwareRequirement from: /home/veraalva/pm4ngs-chipexo/requirements/conda-env-dependencies.yaml
bamscale with version 0.0.3 update image to: quay.io/biocontainers/bamscale:0.0.3--ha85820d_0
/Users/veraalva/my_ngs_project/bin/cwl/tools/bamscale/bamscale-docker.yml with old image replaced: quay.io/biocontainers/bamscale:0.0.5--h18f8b1d_1
bedtools with version 2.29.2 update image to: quay.io/biocontainers/bedtools:2.29.2--hc088bd4_0
/Users/veraalva/my_ngs_project/bin/cwl/tools/bedtools/bedtools-docker.yml with old image replaced: quay.io/biocontainers/bedtools:2.28.0--hdf88d34_0
bioconductor-diffbind with version 2.16.0 update image to: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_0
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/deseq2-pca.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/macs-cutoff.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/dga_heatmaps.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/diffbind.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/edgeR-2conditions.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/volcano_plot.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/readQC.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/R/deseq2-2conditions.cwl with old image replaced: quay.io/biocontainers/bioconductor-diffbind:2.16.0--r40h5f743cb_2
bwa with version 0.7.17 update image to: quay.io/biocontainers/bwa:0.7.17--hed695b0_7
/Users/veraalva/my_ngs_project/bin/cwl/tools/bwa/bwa-docker.yml with old image replaced: quay.io/biocontainers/bwa:0.7.17--h84994c4_5
There is not biocontainer image for gffread version 0.12.1
homer with version 4.11 update image to: quay.io/biocontainers/homer:4.11--pl526h9a982cc_2
/Users/veraalva/my_ngs_project/bin/cwl/tools/homer/homer-docker.yml with old image replaced: quay.io/biocontainers/homer:4.11--pl526h2bce143_2
mace with version 1.2 update image to: quay.io/biocontainers/mace:1.2--py27h99da42f_0
/Users/veraalva/my_ngs_project/bin/cwl/tools/mace/mace-docker.yml with old image replaced: quay.io/biocontainers/mace:1.2--py27h99da42f_1
meme with version 5.1.1 update image to: quay.io/biocontainers/meme:5.1.1--py37pl526h072abfd_3
/Users/veraalva/my_ngs_project/bin/cwl/tools/meme/meme-docker.yml with old image replaced: quay.io/biocontainers/meme:5.1.1--py27pl526h53063a7_3
Copying file /Users/veraalva/Work/Developer/Python/pm4ngs/pm4ngs-chipexo/example/pm4ngs_chipexo_demo_sample_data.csv to /Users/veraalva/my_ngs_project/data/my_dataset_name/sample_table.csv
6 files loaded
Using table:
sample_name file condition replicate
0 SRR4011416 Exp_O2_growth_no_rifampicin 1
1 SRR4011417 Exp_O2_growth_no_rifampicin 2
2 SRR4011421 Exp_O2_growth_rifampicin 1
3 SRR4011425 Exp_O2_growth_rifampicin 2
4 SRR4011418 Stat_02_growth_no_rifampicin 1
5 SRR4011419 Stat_02_growth_no_rifampicin 2
Done
Running the Jupyter Server
--------------------------
Open a terminal and activate the **pm4ngs_venv** virtual environment
.. code-block:: bash
veraalva@perseo:~$ source pm4ngs_venv/bin/activate
(pm4ngs_venv) veraalva@perseo:~$ jupyter notebook --no-browser
[I 17:04:45.633 NotebookApp] Serving notebooks from local directory: /home/veraalva
[I 17:04:45.633 NotebookApp] Jupyter Notebook 6.1.4 is running at:
[I 17:04:45.634 NotebookApp] http://localhost:8888/?token=90bcbcda87e5421cf451e6a58d88bfa212355b36f0ed7f1a
[I 17:04:45.634 NotebookApp] or http://127.0.0.1:8888/?token=90bcbcda87e5421cf451e6a58d88bfa212355b36f0ed7f1a
[I 17:04:45.634 NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[C 17:04:45.637 NotebookApp]
To access the notebook, open this file in a browser:
file:///home/veraalva/.local/share/jupyter/runtime/nbserver-522-open.html
Or copy and paste one of these URLs:
http://localhost:8888/?token=90bcbcda87e5421cf451e6a58d88bfa212355b36f0ed7f1a
or http://127.0.0.1:8888/?token=90bcbcda87e5421cf451e6a58d88bfa212355b36f0ed7f1a
Copy the URL with localhost in a browser.
| PypiClean |
/waffle_hub-0.2.11-py3-none-any.whl/waffle_hub/dataset/adapter/coco.py | import logging
import warnings
from pathlib import Path
from typing import Union
import tqdm
from pycocotools.coco import COCO
from waffle_utils.file import io
from waffle_hub import TaskType
from waffle_hub.schema.fields import Annotation, Category, Image
from waffle_hub.utils.conversion import convert_rle_to_polygon
def _export_coco(
self,
export_dir: Path,
train_ids: list,
val_ids: list,
test_ids: list,
unlabeled_ids: list,
):
"""Export dataset to COCO format
Args:
export_dir (Path): Path to export directory
train_ids (list): List of train ids
val_ids (list): List of validation ids
test_ids (list): List of test ids
unlabeled_ids (list): List of unlabeled ids
"""
io.make_directory(export_dir)
image_dir = export_dir / "images"
for split, image_ids in zip(
["train", "val", "test", "unlabeled"],
[train_ids, val_ids, test_ids, unlabeled_ids],
):
if len(image_ids) == 0:
continue
coco = {
"categories": [
{
"id": category.category_id,
"name": category.name,
"supercategory": category.supercategory,
}
for category in self.get_categories()
],
"images": [],
"annotations": [],
}
for image in self.get_images(image_ids):
image_path = self.raw_image_dir / image.file_name
image_dst_path = image_dir / image.file_name
io.copy_file(image_path, image_dst_path, create_directory=True)
d = image.to_dict()
image_id = d.pop("image_id")
coco["images"].append({"id": image_id, **d})
annotations = self.get_annotations(image_id)
for annotation in annotations:
d = annotation.to_dict()
if d.get("segmentation", None):
if isinstance(d["segmentation"], dict):
d["segmentation"] = convert_rle_to_polygon(d["segmentation"])
annotation_id = d.pop("annotation_id")
coco["annotations"].append({"id": annotation_id, **d})
io.save_json(coco, export_dir / f"{split}.json", create_directory=True)
def export_coco(self, export_dir: Union[str, Path]) -> str:
"""Export dataset to COCO format
Args:
export_dir (str): Path to export directory
Returns:
str: Path to export directory
"""
export_dir = Path(export_dir)
train_ids, val_ids, test_ids, _ = self.get_split_ids()
if self.task == TaskType.CLASSIFICATION:
_export_coco(self, export_dir, train_ids, val_ids, test_ids, [])
elif self.task == TaskType.OBJECT_DETECTION:
_export_coco(self, export_dir, train_ids, val_ids, test_ids, [])
elif self.task == TaskType.INSTANCE_SEGMENTATION:
_export_coco(self, export_dir, train_ids, val_ids, test_ids, [])
else:
raise ValueError(f"Unsupported task type: {self.task}")
return str(export_dir)
def import_coco(self, coco_files: list[str], coco_root_dirs: list[str]):
"""
Import coco dataset
Args:
coco_files (list[str]): List of coco annotation files
coco_root_dirs (list[str]): List of coco root directories
"""
if len(coco_files) == 1:
set_names = [None]
elif len(coco_files) == 2:
set_names = ["train", "val"]
elif len(coco_files) == 3:
set_names = ["train", "val", "test"]
else:
raise ValueError("coco_file should have 1, 2, or 3 files.")
cocos = [COCO(coco_file) for coco_file in coco_files]
# categories should be same between coco files
categories = cocos[0].loadCats(cocos[0].getCatIds())
for coco in cocos[1:]:
if categories != coco.loadCats(coco.getCatIds()):
raise ValueError("categories should be same between coco files.")
coco_cat_id_to_waffle_cat_id = {}
for i, category in enumerate(categories, start=1):
coco_category_id = category.pop("id")
coco_cat_id_to_waffle_cat_id[coco_category_id] = i
self.add_categories([Category.from_dict({**category, "category_id": i}, task=self.task)])
# import coco dataset
total_length = sum([len(coco.getImgIds()) for coco in cocos])
logging.info(f"Importing coco dataset. Total length: {total_length}")
pgbar = tqdm.tqdm(total=total_length, desc="Importing coco dataset")
image_id = 1
annotation_id = 1
# parse coco annotation file
for coco, coco_root_dir, set_name in tqdm.tqdm(zip(cocos, coco_root_dirs, set_names)):
image_ids = []
for coco_image_id, annotation_dicts in coco.imgToAnns.items():
if len(annotation_dicts) == 0:
warnings.warn(f"image_id {coco_image_id} has no annotations.")
continue
image_dict = coco.loadImgs(coco_image_id)[0]
image_dict.pop("id")
file_name = image_dict.pop("file_name")
image_path = Path(coco_root_dir) / file_name
if not image_path.exists():
raise FileNotFoundError(f"{image_path} does not exist.")
if set_name:
file_name = f"{set_name}/{file_name}"
self.add_images(
[Image.from_dict({**image_dict, "image_id": image_id, "file_name": file_name})]
)
io.copy_file(image_path, self.raw_image_dir / file_name, create_directory=True)
for annotation_dict in annotation_dicts:
annotation_dict.pop("id")
self.add_annotations(
[
Annotation.from_dict(
{
**annotation_dict,
"image_id": image_id,
"annotation_id": annotation_id,
"category_id": coco_cat_id_to_waffle_cat_id[
annotation_dict["category_id"]
],
},
task=self.task,
)
]
)
annotation_id += 1
image_ids.append(image_id)
image_id += 1
pgbar.update(1)
if set_name:
io.save_json(image_ids, self.set_dir / f"{set_name}.json", create_directory=True)
pgbar.close() | PypiClean |
/CC_distributions-0.1.tar.gz/CC_distributions-0.1/CC_distributions/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/Pyrseas-0.9.1.tar.gz/Pyrseas-0.9.1/docs/rule.rst | Rules
=====
.. module:: pyrseas.dbobject.rule
The :mod:`rule` module defines two classes, :class:`Rule` and
:class:`RuleDict`, derived from :class:`DbSchemaObject` and
:class:`DbObjectDict`, respectively.
Rule
----
:class:`Rule` is derived from
:class:`~pyrseas.dbobject.DbSchemaObject` and represents a `Postgres
rewrite rule
<https://www.postgresql.org/docs/current/static/rules.html>`_.
.. autoclass:: Rule
.. automethod:: Rule.identifier
.. automethod:: Rule.to_map
.. automethod:: Rule.create
Rule Dictionary
---------------
:class:`RuleDict` is derived from
:class:`~pyrseas.dbobject.DbObjectDict`. It is a dictionary that
represents the collection of rewrite rules in a database.
.. autoclass:: RuleDict
.. automethod:: RuleDict.from_map
| PypiClean |
/dsin100daysv32-6.0.1.tar.gz/dsin100daysv32-6.0.1/notebook/static/components/MathJax/localization/vi/MathML.js | MathJax.Localization.addTranslation("vi","MathML",{version:"2.7.7",isLoaded:true,strings:{BadMglyph:"mglyph h\u1ECFng: %1",BadMglyphFont:"Ph\u00F4ng ch\u1EEF h\u1ECFng: %1",MathPlayer:"MathJax kh\u00F4ng th\u1EC3 thi\u1EBFt l\u1EADp MathPlayer.\n\nN\u1EBFu MathPlayer ch\u01B0a \u0111\u01B0\u1EE3c c\u00E0i \u0111\u1EB7t, b\u1EA1n c\u1EA7n ph\u1EA3i c\u00E0i \u0111\u1EB7t n\u00F3 tr\u01B0\u1EDBc ti\u00EAn.\nN\u1EBFu kh\u00F4ng, c\u00E1c t\u00F9y ch\u1ECDn b\u1EA3o m\u1EADt c\u1EE7a b\u1EA1n c\u00F3 th\u1EC3 ng\u0103n tr\u1EDF c\u00E1c \u0111i\u1EC1u khi\u1EC3n ActiveX. H\u00E3y ch\u1ECDn T\u00F9y ch\u1ECDn Internet trong tr\u00ECnh \u0111\u01A1n C\u00F4ng c\u1EE5, qua th\u1EBB B\u1EA3o m\u1EADt, v\u00E0 b\u1EA5m n\u00FAt M\u1EE9c t\u00F9y ch\u1EC9nh. Ki\u1EC3m c\u00E1c h\u1ED9p \u201CCh\u1EA1y \u0111i\u1EC1u khi\u1EC3n ActiveX\u201D v\u00E0 \u201CH\u00E0nh vi nh\u1ECB ph\u00E2n v\u00E0 k\u1ECBch b\u1EA3n\u201D.\n\nHi\u1EC7n t\u1EA1i b\u1EA1n s\u1EBD g\u1EB7p c\u00E1c th\u00F4ng b\u00E1o l\u1ED7i thay v\u00EC to\u00E1n h\u1ECDc \u0111\u01B0\u1EE3c k\u1EBFt xu\u1EA5t.",CantCreateXMLParser:"MathJax kh\u00F4ng th\u1EC3 t\u1EA1o ra b\u1ED9 ph\u00E2n t\u00EDch XML cho MathML. H\u00E3y ch\u1ECDn T\u00F9y ch\u1ECDn Internet trong tr\u00ECnh \u0111\u01A1n C\u00F4ng c\u1EE5, qua th\u1EBB B\u1EA3o m\u1EADt, v\u00E0 b\u1EA5m n\u00FAt M\u1EE9c t\u00F9y ch\u1EC9nh. Ki\u1EC3m h\u1ED9p \u201CScript c\u00E1c \u0111i\u1EC1u khi\u1EC3n ActiveX \u0111\u01B0\u1EE3c \u0111\u00E1nh d\u1EA5u l\u00E0 an to\u00E0n\u201D.\n\nMathJax s\u1EBD kh\u00F4ng th\u1EC3 x\u1EED l\u00FD c\u00E1c ph\u01B0\u01A1ng tr\u00ECnh MathML.",UnknownNodeType:"Ki\u1EC3u n\u00FAt kh\u00F4ng r\u00F5: %1",UnexpectedTextNode:"N\u00FAt v\u0103n b\u1EA3n b\u1EA5t ng\u1EEB: %1",ErrorParsingMathML:"L\u1ED7i khi ph\u00E2n t\u00EDch MathML",ParsingError:"L\u1ED7i khi ph\u00E2n t\u00EDch MathML: %1",MathMLSingleElement:"MathML ph\u1EA3i ch\u1EC9 c\u00F3 m\u1ED9t ph\u1EA7n t\u1EED g\u1ED1c",MathMLRootElement:"Ph\u1EA7n t\u1EED g\u1ED1c c\u1EE7a MathML ph\u1EA3i l\u00E0 \u003Cmath\u003E, ch\u1EE9 kh\u00F4ng ph\u1EA3i %1"}});MathJax.Ajax.loadComplete("[MathJax]/localization/vi/MathML.js"); | PypiClean |
/howdou-3.0.6.tar.gz/howdou-3.0.6/README.md | Howdou
====================================================
This is a fork of Benjamin Gleitzman's excellent
[Howdoi](https://github.com/gleitz/howdoi) tool.
It's been extended to support a local indexed cache of answers using
[Elasticsearch](http://en.wikipedia.org/wiki/Elasticsearch) as the backend
search server. This allows faster searches and the ability to add custom answer
annotations and documentation via a local
[YAML](http://en.wikipedia.org/wiki/YAML) file.
I made this modification when I realized that howdoi is ideal for finding
common one-liners when what I really needed was help finding less common guides
and solutions for subtle, but more complex problems. In these cases, I found
that the solutions provided by howdoi still required I do more research and
write notes, notes that I needed to store somewhere for later reference.
My solution was to organized my notes in a YAML file, index this file with
Elasticsearch, and modify howdoi to refer to my Elasticsearch index first.
Instant coding answers via the command line
-------------------------------------------
Are you a hack programmer? Do you find yourself constantly Googling for
how to do basic programing tasks?
Suppose you want to know how to format a date in bash. Why open your browser
and read through blogs (risking major distraction) when you can simply stay
in the console and ask howdou:
$ howdou format date bash
> DATE=`date +%Y-%m-%d`
howdou will answer all sorts of queries:
$ howdou print stack trace python
> import traceback
>
> try:
> 1/0
> except:
> print '>>> traceback <<<'
> traceback.print_exc()
> print '>>> end of traceback <<<'
> traceback.print_exc()
$ howdou convert mp4 to animated gif
> video=/path/to/video.avi
> outdir=/path/to/output.gif
> mplayer "$video" \
> -ao null \
> -ss "00:01:00" \ # starting point
> -endpos 10 \ # duration in second
> -vo gif89a:fps=13:output=$outdir \
> -vf scale=240:180
$ howdou create tar archive
> tar -cf backup.tar --exclude "www/subf3" www
Installation
------------
First, install Elasticsearch. On Ubuntu, this is simply:
sudo apt-get install default-jre elasticsearch
or:
cd /tmp
wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/deb/elasticsearch/2.0.0/elasticsearch-2.0.0.deb
sudo dpkg -i elasticsearch-2.0.0.deb
sudo service elasticsearch start
Enable inline script searches:
sudo sh -c "echo 'script.engine.groovy.inline.search: on' >> /etc/elasticsearch/elasticsearch.yml"
sudo service elasticsearch restart
You may also need to enable the service to start at boot with:
sudo update-rc.d elasticsearch defaults
sudo update-rc.d elasticsearch enable
Make sure that the version of ElasticSearch matches the version of the elasticsearch Python package installed in your virtualenv.
Then install howdou via pip with:
pip install howdou
or
pip install git+https://github.com/chrisspen/howdou.git#egg=howdou
or
python setup.py install
https://elasticsearch-py.readthedocs.org/en/master/
Usage
-----
The command line is pretty straight-forward:
usage: howdou.py [-h] [-p POS] [-a] [-l] [-c] [-n NUM_ANSWERS] QUERY [QUERY ...]
instant coding answers via the command line
positional arguments:
QUERY the question to answer
optional arguments:
-h, --help show this help message and exit
-p POS, --pos POS select answer in specified position (default: 1)
-a, --all display the full text of the answer
-l, --link display only the answer link
-c, --color enable colorized output
-n NUM_ANSWERS, --num-answers NUM_ANSWERS
number of answers to return
-C, --clear-cache clear the cache
To take full advantage of howdou, you'll need to maintain a local howdou.yml
file, which is a simple serialized list of QA-sets that look like:
- questions:
- format date bash
answers:
- weight: 1
date: 2014-5-14
source: http://stackoverflow.com/questions/1401482/yyyy-mm-dd-format-date-in-shell-script
formatter: bash
text: |-
DATE=`date +%Y-%m-%d`
Note each item is an association of many-questions to many-answers.
This is because there are many ways to ask the same thing, and we want the
index to be as likely as possible to correctly match your question to an
answer.
There's also an explicit weight value, which will be incorporated into
Elasticsearch's own search weight to control the order that results are shown.
To automatically reindex your changes, checking every 5 minutes, run:
crontab -e
and add these lines:
# Do a quick frequent update.
*/5 * * * * . /home/yourusername/.bash_aliases; howdou --action=reindex
# Do a slower but more thorough update less frequently.
0 6 * * * . /home/yourusername/.bash_aliases; howdou --action=reindex --force
Elasticsearch
-------------
A caveat with the Elasticsearch backend is that the default configuration can be a huge memory hog.
You'll probably want to edit `/etc/elasticsearch/jvm.options` and drastically reduce the values for `Xms` and `Xmx`.
Development
-----------
Tests require the Python development headers to be installed, which you can install on Ubuntu with:
sudo add-apt-repository ppa:fkrull/deadsnakes
sudo apt-get update
sudo apt-get install python-dev python3-dev python3.4-minimal python3.4-dev python3.5-minimal python3.5-dev pandoc
To run all [tests](http://tox.readthedocs.org/en/latest/):
export TESTNAME=; tox
To run tests for a specific environment (e.g. Python 2.7):
export TESTNAME=; tox -e py27
To run a specific test:
export TESTNAME=:HowdouTestCase.test_unicode_answer; tox -e py27
| PypiClean |
/tf_gpu-2.11.0.2301-cp38-cp38-manylinux2014_x86_64.whl/tensorflow/python/trackable/layer_utils.py | """Utilities related to layer/model functionality."""
# TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils
# once __init__ files no longer require all of tf.keras to be imported together.
import collections
import functools
import weakref
from tensorflow.python.util import object_identity
try:
# typing module is only used for comment type annotations.
import typing # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def is_layer(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
return hasattr(obj, "_is_layer") and not isinstance(obj, type)
def has_weights(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
has_weight = (hasattr(type(obj), "trainable_weights")
and hasattr(type(obj), "non_trainable_weights"))
return has_weight and not isinstance(obj, type)
def invalidate_recursive_cache(key):
"""Convenience decorator to invalidate the cache when setting attributes."""
def outer(f):
@functools.wraps(f)
def wrapped(self, value):
sentinel = getattr(self, "_attribute_sentinel") # type: AttributeSentinel
sentinel.invalidate(key)
return f(self, value)
return wrapped
return outer
class MutationSentinel(object):
"""Container for tracking whether a property is in a cached state."""
_in_cached_state = False
def mark_as(self, value): # type: (MutationSentinel, bool) -> bool
may_affect_upstream = (value != self._in_cached_state)
self._in_cached_state = value
return may_affect_upstream
@property
def in_cached_state(self):
return self._in_cached_state
class AttributeSentinel(object):
"""Container for managing attribute cache state within a Layer.
The cache can be invalidated either on an individual basis (for instance when
an attribute is mutated) or a layer-wide basis (such as when a new dependency
is added).
"""
def __init__(self, always_propagate=False):
self._parents = weakref.WeakSet()
self.attributes = collections.defaultdict(MutationSentinel)
# The trackable data structure containers are simple pass throughs. They
# don't know or care about particular attributes. As a result, they will
# consider themselves to be in a cached state, so it's up to the Layer
# which contains them to terminate propagation.
self.always_propagate = always_propagate
def __repr__(self):
return "{}\n {}".format(
super(AttributeSentinel, self).__repr__(),
{k: v.in_cached_state for k, v in self.attributes.items()})
def add_parent(self, node):
# type: (AttributeSentinel, AttributeSentinel) -> None
# Properly tracking removal is quite challenging; however since this is only
# used to invalidate a cache it's alright to be overly conservative. We need
# to invalidate the cache of `node` (since it has implicitly gained a child)
# but we don't need to invalidate self since attributes should not depend on
# parent Layers.
self._parents.add(node)
node.invalidate_all()
def get(self, key):
# type: (AttributeSentinel, str) -> bool
return self.attributes[key].in_cached_state
def _set(self, key, value):
# type: (AttributeSentinel, str, bool) -> None
may_affect_upstream = self.attributes[key].mark_as(value)
if may_affect_upstream or self.always_propagate:
for node in self._parents: # type: AttributeSentinel
node.invalidate(key)
def mark_cached(self, key):
# type: (AttributeSentinel, str) -> None
self._set(key, True)
def invalidate(self, key):
# type: (AttributeSentinel, str) -> None
self._set(key, False)
def invalidate_all(self):
# Parents may have different keys than their children, so we locally
# invalidate but use the `invalidate_all` method of parents.
for key in self.attributes.keys():
self.attributes[key].mark_as(False)
for node in self._parents:
node.invalidate_all()
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers and uniquify."""
# TODO(b/130381733): Make this an attribute in base_layer.Layer.
existing = object_identity.ObjectIdentitySet()
to_visit = layer_list[::-1]
while to_visit:
obj = to_visit.pop()
if obj in existing:
continue
existing.add(obj)
if is_layer(obj):
yield obj
else:
sub_layers = getattr(obj, "layers", None) or []
# Trackable data structures will not show up in ".layers" lists, but
# the layers they contain will.
to_visit.extend(sub_layers[::-1]) | PypiClean |
/gaboost-1.7.5.tar.gz/gaboost-1.7.5/funboost/utils/dependency_packages_in_pythonpath/func_timeout/StoppableThread.py | import os
import ctypes
import threading
__all__ = ('StoppableThread', 'JoinThread')
class StoppableThread(threading.Thread):
'''
StoppableThread - A thread that can be stopped by forcing an exception in the execution context.
This works both to interrupt code that is in C or in python code, at either the next call to a python function,
or the next line in python code.
It is recommended that if you call stop ( @see StoppableThread.stop ) that you use an exception that inherits BaseException, to ensure it likely isn't caught.
Also, beware unmarked exception handlers in your code. Code like this:
while True:
try:
doSomething()
except:
continue
will never be able to abort, because the exception you raise is immediately caught.
The exception is raised over and over, with a specifed delay (default 2.0 seconds)
'''
def _stopThread(self, exception, raiseEvery=2.0):
'''
_stopThread - @see StoppableThread.stop
'''
if self.is_alive() is False:
return True
self._stderr = open(os.devnull, 'w')
# Create "joining" thread which will raise the provided exception
# on a repeat, until the thread stops.
joinThread = JoinThread(self, exception, repeatEvery=raiseEvery)
# Try to prevent spurrious prints
joinThread._stderr = self._stderr
joinThread.start()
joinThread._stderr = self._stderr
def stop(self, exception, raiseEvery=2.0):
'''
Stops the thread by raising a given exception.
@param exception <Exception type> - Exception to throw. Likely, you want to use something
that inherits from BaseException (so except BaseException as e: continue; isn't a problem)
This should be a class/type, NOT an instance, i.e. MyExceptionType not MyExceptionType()
@param raiseEvery <float> Default 2.0 - We will keep raising this exception every #raiseEvery seconds,
until the thread terminates.
If your code traps a specific exception type, this will allow you #raiseEvery seconds to cleanup before exit.
If you're calling third-party code you can't control, which catches BaseException, set this to a low number
to break out of their exception handler.
@return <None>
'''
return self._stopThread(exception, raiseEvery)
class JoinThread(threading.Thread):
'''
JoinThread - The workhouse that stops the StoppableThread.
Takes an exception, and upon being started immediately raises that exception in the current context
of the thread's execution (so next line of python gets it, or next call to a python api function in C code ).
@see StoppableThread for more details
'''
def __init__(self, otherThread, exception, repeatEvery=2.0):
'''
__init__ - Create a JoinThread (don't forget to call .start() ! )
@param otherThread <threading.Thread> - A thread
@param exception <BaseException> - An exception. Should be a BaseException, to prevent "catch Exception as e: continue" type code
from never being terminated. If such code is unavoidable, you can try setting #repeatEvery to a very low number, like .00001,
and it will hopefully raise within the context of the catch, and be able to break free.
@param repeatEvery <float> Default 2.0 - After starting, the given exception is immediately raised. Then, every #repeatEvery seconds,
it is raised again, until the thread terminates.
'''
threading.Thread.__init__(self)
self.otherThread = otherThread
self.exception = exception
self.repeatEvery = repeatEvery
self.daemon = True
def run(self):
'''
run - The thread main. Will attempt to stop and join the attached thread.
'''
# Try to silence default exception printing.
self.otherThread._Thread__stderr = self._stderr
if hasattr(self.otherThread, '_Thread__stop'):
# If py2, call this first to start thread termination cleanly.
# Python3 does not need such ( nor does it provide.. )
self.otherThread._Thread__stop()
while self.otherThread.is_alive():
# We loop raising exception incase it's caught hopefully this breaks us far out.
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.otherThread.ident), ctypes.py_object(self.exception))
self.otherThread.join(self.repeatEvery)
try:
self._stderr.close()
except:
pass
# vim: set ts=4 sw=4 expandtab : | PypiClean |
/zensols.medcat-1.3.0-py3-none-any.whl/medcat/utils/normalizers.py | import re
import spacy
from medcat.pipeline.pipe_runner import PipeRunner
CONTAINS_NUMBER = re.compile('[0-9]+')
class BasicSpellChecker(object):
r'''
'''
def __init__(self, cdb_vocab, config, data_vocab=None):
self.vocab = cdb_vocab
self.config = config
self.data_vocab = data_vocab
def P(self, word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
cnt = self.vocab.get(word, 0)
if cnt != 0:
return -1 / cnt
else:
return 0
def __contains__(self, word):
if word in self.vocab:
return True
elif self.data_vocab is not None and word in self.data_vocab:
return False
else:
return False
def fix(self, word):
"Most probable spelling correction for word."
fix = max(self.candidates(word), key=self.P)
if fix != word:
return fix
else:
return None
def candidates(self, word):
"Generate possible spelling corrections for word."
if self.config.general['spell_check_deep']:
# This will check a two letter edit distance
return self.known([word]) or self.known(self.edits1(word)) or self.known(self.edits2(word)) or [word]
else:
# Will check only one letter edit distance
return self.known([word]) or self.known(self.edits1(word)) or [word]
def known(self, words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in self.vocab)
def edits1(self, word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
if self.config.general['diacritics']:
letters += 'àáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(self, word):
"All edits that are two edits away from `word`."
return (e2 for e1 in self.edits1(word) for e2 in self.edits1(e1))
def edits3(self, word):
"All edits that are two edits away from `word`."
# Do d3 edits
pass
class TokenNormalizer(PipeRunner):
r''' Will normalize all tokens in a spacy document.
Args:
config
spell_checker
'''
# Custom pipeline component name
name = 'token_normalizer'
# Override
def __init__(self, config, spell_checker=None):
self.config = config
self.spell_checker = spell_checker
self.nlp = spacy.load(config.general['spacy_model'], disable=config.general['spacy_disabled_components'])
super().__init__(self.config.general['workers'])
# Override
def __call__(self, doc):
for token in doc:
if len(token.lower_) < self.config.preprocessing['min_len_normalize']:
token._.norm = token.lower_
elif (self.config.preprocessing.get('do_not_normalize', set())) and token.tag_ is not None and \
token.tag_ in self.config.preprocessing.get('do_not_normalize'):
token._.norm = token.lower_
elif token.lemma_ == '-PRON-':
token._.norm = token.lemma_
token._.to_skip = True
else:
token._.norm = token.lemma_.lower()
if self.config.general['spell_check']:
# Fix the token if necessary
if len(token.text) >= self.config.general['spell_check_len_limit'] and not token._.is_punct \
and token.lower_ not in self.spell_checker and not CONTAINS_NUMBER.search(token.lower_):
fix = self.spell_checker.fix(token.lower_)
if fix is not None:
tmp = self.nlp(fix)[0]
if len(token.lower_) < self.config.preprocessing['min_len_normalize']:
token._.norm = tmp.lower_
else:
token._.norm = tmp.lemma_.lower()
return doc | PypiClean |
/cool_cache-0.3.6-py3-none-any.whl/cool_cache/__dependencies__/__sources__/file_system_py/settings/during_start/094_000_jeffs_git_shortcuts.sh |
git_checkout () {
#
# if its a branch, then use switch
#
__temp_var__branches="$(git branch -a | sed -e 's/remotes\/origin\/HEAD .*//' | sed -e 's/remotes\/origin\//origin /')"
printf '%s' "$__temp_var__branches" | grep "$1" 2>/dev/null 1>/dev/null && {
unset __temp_var__branches
git switch "$@"
return
}
# if second arg exists
if [ -n "$2" ]
then
printf '%s' "$__temp_var__branches" | grep "$2" 2>/dev/null 1>/dev/null && {
unset __temp_var__branches
git switch "$@"
return
}
unset __temp_var__branches
fi
#
# otherwise use checkout
#
git checkout "$@"
return
}
git_checkout_pr () {
pr_number="$1"
git_delete_branch '@__temp__/pull_request'
git fetch origin "pull/$pr_number/head:@__temp__/pull_request"
git checkout '@__temp__/pull_request'
}
git_commit_hashes () {
git log --reflog --oneline | sed -e 's/ .*//'
}
git_log () {
git log --first-parent --date=short --pretty=format:"%Cblue%ad %h%Cgreen %s %Creset%d"
}
git_current_commit_hash () {
# https://stackoverflow.com/questions/949314/how-to-retrieve-the-hash-for-the-current-commit-in-git
git rev-parse HEAD
}
git_oldest_commit_hash () {
git log --reverse --oneline | head -n1 | sed -e 's/ .*//'
}
git_squash_all () {
git reset --soft $(git_oldest_commit_hash)
}
git_squash_to () {
commit_hash="$1"
commit_message="$2"
git reset --soft "$commit_hash" && git add -A && git commit -m "$commit_message" && echo "squash complete"
}
git_squash () {
args="$@"
git reset --soft HEAD~2 && git add -A && git commit -m "$args" && echo "squash complete"
git_log | head -n5
}
#
# sync
#
git_sync () { # git push && git pull
args="$@"
if [[ "$args" = "" ]]; then
args="-"
fi
# https://stackoverflow.com/questions/3745135/push-git-commits-tags-simultaneously
git add -A; git commit -m "$args"; git pull --no-edit; git submodule update --init --recursive --progress && git push
}
git_force_push () {
args="$@"
git push origin $args --force
}
git_force_pull () {
# get the latest
git fetch --all
# delete changes
git_delete_changes &>/dev/null
# reset to match origin
git reset --hard "origin/$(git_current_branch_name)"
}
git_delete_changes () {
# reset all the submodules
git submodule foreach --recursive 'git stash save --keep-index --include-untracked'
git submodule foreach --recursive 'git reset --hard'
git submodule update --init --recursive # https://stackoverflow.com/questions/7882603/how-to-revert-a-git-submodule-pointer-to-the-commit-stored-in-the-containing-rep
# unstage everything
git reset --
__temp_var__result="$(git stash save --keep-index --include-untracked)"
# stash everything and delete stash
if [[ "$__temp_var__result" == "No local changes to save" ]]
then
echo "no changes to delete (just FYI)"
else
git stash drop
fi
unset __temp_var__result
}
git_keep_mine () {
git checkout --ours .
git add -u
git commit -m "_Keeping all existing changes $@"
}
git_keep_theirs () { # git keep theirs
git checkout --theirs .
git add -u
git commit -m "_Accepting all incoming changes $@"
}
git_add_upstream () {
remote_name="$1"
remote_url="$2"
if [ -z "$remote_name" ]
then
echo "what should the upstream source be called?"
read remote_name
fi
if [ -z "$remote_url" ]
then
echo "what is the url to the upstream source?"
read remote_url
fi
git remote add "$remote_name" "$remote_url"
}
git_change_origin () {
remote_url="$1"
if [ -z "$remote_url" ]
then
echo "what is the url to the upstream source?"
read remote_url
fi
# change origin
git remote set-url "origin" "$remote_url"
}
#
# Branch
#
git_current_branch_name () {
git rev-parse --abbrev-ref HEAD
}
git_new_branch () {
branch_name="$1"
git switch "$(git_current_branch_name)" && git checkout -b "$branch_name" && git push --set-upstream origin "$branch_name"
}
git_delete_branch () {
git push origin --delete "$@"
git branch -D "$@"
}
git_delete_local_branch () {
git branch -D "$@"
}
absolute_path () {
echo "$(builtin cd "$(dirname "$1")"; pwd)/$(basename "$1")"
}
git_folder_as_new_branch () {
new_branch_name="$1"
target_folder="$2"
if ! [ -d ".git" ]
then
echo "need to be in the same directory as the .git folder"
exit 1
fi
if ! [ -d "$target_folder" ]
then
echo "second argument needs to be a folder that you want to be a branch"
exit 1
fi
target_folder="$(absolute_path "$target_folder")"
# create an empty branch (actually quite a tricky task)
mkdir -p ./.cache/tmp/brancher
cp -r ".git" "./.cache/tmp/brancher/.git"
touch "./.cache/tmp/brancher/.keep"
cd "./.cache/tmp/brancher/"
git checkout --orphan "$new_branch_name"
git add -A
git commit -m "init"
git push --set-upstream origin "$new_branch_name"
# copy all the files
cp -R "$target_folder"/. .
# now add and push
git add -A
git commit -m "first real branch commit"
git push
cd ../../..
git fetch origin "$new_branch_name"
rm -rf "./.cache/tmp"
}
git_add_external_branch () {
# example:
# git_add_external_branch "slowfast" 'https://github.com/facebookresearch/SlowFast.git' 'master'
# git checkout 'slowfast/master'
__temp_var__name_for_repo="$1"
__temp_var__url_for_repo="$2"
__temp_var__branch_of_repo="$3"
if [[ -z "$__temp_var__branch_of_repo" ]]
then
__temp_var__branch_of_repo="master"
fi
git remote add "@$__temp_var__name_for_repo" "$__temp_var__url_for_repo"
git fetch "@$__temp_var__name_for_repo" "$__temp_var__branch_of_repo"
git checkout -b "@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" "remotes/@$__temp_var__name_for_repo/$__temp_var__branch_of_repo"
echo "new branch is named: @$__temp_var__name_for_repo/$__temp_var__branch_of_repo"
}
git_steal_external_branch () {
# example:
# git_steal_external_branch "slowfast" 'https://github.com/facebookresearch/SlowFast.git' 'master'
# git checkout 'slowfast/master'
__temp_var__name_for_repo="$1"
__temp_var__url_for_repo="$2"
__temp_var__branch_of_repo="$3"
if [[ -z "$__temp_var__branch_of_repo" ]]
then
__temp_var__branch_of_repo="master"
fi
__temp_var__new_branch_name="$__temp_var__name_for_repo/$__temp_var__branch_of_repo"
#
# create the local/origin one
#
echo "create an empty local branch" && \
git checkout --orphan "$__temp_var__new_branch_name" && \
echo "ignoring any files from other branches" && \
echo "making initial commit, otherwise things break" && \
git reset && \
touch .keep && \
git add .keep && \
git commit -m 'init' && \
echo "creating upstream branch" && \
git push --set-upstream origin "$__temp_var__new_branch_name"
#
# create the external one with @
#
echo "pulling in the external data" && \
git remote add "@$__temp_var__name_for_repo" "$__temp_var__url_for_repo" && \
git fetch "@$__temp_var__name_for_repo" "$__temp_var__branch_of_repo" && \
git checkout -b "@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" "remotes/@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" && \
echo "merging external branch with local branch" && \
git switch "$__temp_var__new_branch_name" && \
git merge --allow-unrelated-histories --no-edit heads/"@$__temp_var__new_branch_name" && \
git push && \
git status && \
echo "you're now on branch: $__temp_var__new_branch_name" && \
echo "" && \
echo "you probably want to add all the untracked^ files to the .gitignore file"
}
git_steal_into_submodule () {
# example:
# git_steal_into_submodule "slowfast" 'https://github.com/facebookresearch/SlowFast.git' 'master' ./submodules/slow_fast
# git checkout '@slowfast/master'
__temp_var__name_for_repo="$1"
__temp_var__url_for_repo="$2"
__temp_var__branch_of_repo="$3"
__temp_var__path_to_submodule="$4"
if [[ -z "$__temp_var__branch_of_repo" ]]
then
__temp_var__branch_of_repo="master"
fi
__temp_var__branch_to_go_back_to="$(git_current_branch_name)"
# FIXME: follow the git_steal_external_branch pattern
# echo "#" && \
# echo "# adding remote as ""@$__temp_var__name_for_repo" && \
# echo "#" && \
# git remote add "@$__temp_var__name_for_repo" "$__temp_var__url_for_repo" && \
# echo "#" && \
# echo "# fetching that branch" && \
# echo "#" && \
# git fetch "@$__temp_var__name_for_repo" "$__temp_var__branch_of_repo" && \
# echo "#" && \
# echo "# creating our branch: ""@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" && \
# echo "#" && \
# git checkout -b "@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" "remotes/@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" && \
# git push --set-upstream origin "@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" --force && \
# echo "#" && \
# echo "# uploading their commits: ""@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" && \
# echo "#" && \
# git push && \
# echo "#" && \
# echo "# switching back to original branch: $__temp_var__branch_to_go_back_to" && \
# echo "#" && \
# git checkout "$__temp_var__branch_to_go_back_to" && \
# echo "#" && \
# echo "# adding submodule: $__temp_var__path_to_submodule" && \
# echo "#" && \
# git submodule add -b "@$__temp_var__name_for_repo/$__temp_var__branch_of_repo" -- "$(git config --get remote.origin.url)" "$__temp_var__path_to_submodule"
}
#
# submodules
#
git_pull_submodules () {
git submodule update --init --recursive
git submodule update --recursive --remote
}
git_push_submodules () {
args="$@"
if [[ "$args" = "" ]]; then
args="-"
fi
git submodule foreach --recursive 'git add -A && git commit -m "'"$args"'"; git push'
}
#
# tags
#
git_new_tag () {
tag_name="$1"
# local
git tag "$tag_name"
# remote
git push origin "$tag_name"
}
git_move_tag () {
tag_name="$1"
new_commit_hash="$2"
if [[ -z "$2" ]]
then
new_commit_hash="$(git_current_commit_hash)"
fi
git tag -f "$tag_name" "$new_commit_hash"
git push --force origin "$tag_name"
}
git_delete_tag () {
tag_name="$1"
# global
git push --delete origin "$tag_name"
# local
git tag --delete "$tag_name"
}
#
# misc
#
git_delete_large_file () {
filepath="$1"
if [[ -z "$filepath" ]]
then
echo "what is the path to the file you want to permantly delete?"
read filepath
fi
# check if file exists
if ! [[ -f "$filepath" ]]
then
echo "That file doesn't seem to exist"
fi
echo
echo "PLEASE make a backup (copy whole folder to somewhere else)"
echo "this is a risky operation EVEN if you're sure you want to delete the file"
echo
echo "are you sure you want to continue?";read ANSWER;echo
if [[ ! "$ANSWER" =~ ^[Yy] ]]
then
exit 0
fi
oldest_commit_with_file="$(git log --all --pretty=format:%H -- "$filepath" | tail -n 1)"
echo "$oldest_commit_with_file"
rm -rf .git/refs/original/
FILTER_BRANCH_SQUELCH_WARNING=1 git filter-branch --index-filter "git rm -rf --cached --ignore-unmatch '$filepath'" "$oldest_commit_with_file"..HEAD
echo
echo "Now you need to destroy everyone else's progress by force pushing if you want remote to have the fix"
echo
}
git_mixin () {
url="$1"
branch="$2"
commit="$3"
if [[ -z "$url" ]]
then
echo "What is the url to the mixin?"
read url
fi
if [[ -z "$branch" ]]
then
echo "What is the branch you want to mixin? (default=master)"
read branch
if [[ -z "$branch" ]]
then
branch="master"
fi
fi
# remove any leftover ones (caused by git merge conflicts)
git remote remove "@__temp__" &>/dev/null
git remote add "@__temp__" "$url"
git fetch "@__temp__" "$branch"
# if there was a commit
if ! [[ -z "$commit" ]]
then
# only merge that one commit
git cherry-pick "$commit"
else
# merge the entire history
git pull --allow-unrelated-histories "@__temp__" "$branch"
fi
git submodule update --init --recursive
git remote remove "@__temp__" &>/dev/null
}
git_list_untracked () {
git add -A -n
}
git_list_untracked_or_ignored () {
git add -fAn
}
git_url_of_origin () {
git config --get remote.origin.url
}
# self submodule
# git submodule add -b jirl --name "jirl" -- https://github.com/jeff-hykin/model_racer.git ./source/jirl
#
# short names
#
alias gs="git status"
alias gl="git_log"
alias gp="git_sync"
alias gm="git merge"
alias gfpull="git_force_pull"
alias gfpush="git_force_push"
alias gc="git_checkout"
alias gb="git branch -a"
alias gnb="git_new_branch"
alias gd="git_delete_changes"
alias gcp="git add -A;git stash"
alias gct="git add -A;git stash"
alias gpst="git stash pop;git add -A"
alias gundo="git reset --soft HEAD~1"
alias gurl="git_url_of_origin" | PypiClean |
/tornado-gcp-6.1.3.tar.gz/tornado-gcp-6.1.3/tornado/_locale_data.py | LOCALE_NAMES = {
"af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
"am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"},
"ar_AR": {"name_en": u"Arabic", "name": u"العربية"},
"bg_BG": {"name_en": u"Bulgarian", "name": u"Български"},
"bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"},
"bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
"ca_ES": {"name_en": u"Catalan", "name": u"Català"},
"cs_CZ": {"name_en": u"Czech", "name": u"Čeština"},
"cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
"da_DK": {"name_en": u"Danish", "name": u"Dansk"},
"de_DE": {"name_en": u"German", "name": u"Deutsch"},
"el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"},
"en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
"en_US": {"name_en": u"English (US)", "name": u"English (US)"},
"es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"},
"es_LA": {"name_en": u"Spanish", "name": u"Español"},
"et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
"eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
"fa_IR": {"name_en": u"Persian", "name": u"فارسی"},
"fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
"fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"},
"fr_FR": {"name_en": u"French", "name": u"Français"},
"ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
"gl_ES": {"name_en": u"Galician", "name": u"Galego"},
"he_IL": {"name_en": u"Hebrew", "name": u"עברית"},
"hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"},
"hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
"hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
"id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
"is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"},
"it_IT": {"name_en": u"Italian", "name": u"Italiano"},
"ja_JP": {"name_en": u"Japanese", "name": u"日本語"},
"ko_KR": {"name_en": u"Korean", "name": u"한국어"},
"lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"},
"lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"},
"mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"},
"ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"},
"ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
"nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"},
"nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
"nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
"pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"},
"pl_PL": {"name_en": u"Polish", "name": u"Polski"},
"pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"},
"pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"},
"ro_RO": {"name_en": u"Romanian", "name": u"Română"},
"ru_RU": {"name_en": u"Russian", "name": u"Русский"},
"sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"},
"sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"},
"sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
"sr_RS": {"name_en": u"Serbian", "name": u"Српски"},
"sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
"sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
"ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"},
"te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"},
"th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"},
"tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
"tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"},
"uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"},
"vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"},
"zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"},
"zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"},
} | PypiClean |
/DISPbind-1.0.2.tar.gz/DISPbind-1.0.2/dispbind/bam2bw.py | import sys
import os
import os.path
import numpy.core.multiarray
import pysam
import pybedtools
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
from helper import logger, which
__author__ = [
'Rui Dong ([email protected])'
]
__all__ = ['align']
#@logger
def bam2bw(options):
# check index files
if not options['--bam']:
sys.exit('Alignment requires Bam file!')
if not options['--gsize']:
sys.exit('Alignment requires genome size file!')
# check output directory
out_dir = check_outdir(options['--output'])
generate_bw(out_dir, options['--name'], options['--mquality'], options['--bam'], options['--gsize'])
def check_outdir(out_dir):
'''
Create directory if not exists
'''
print('Check output directory...')
# create output directory if not exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
dir_path = os.path.abspath(out_dir)
return dir_path
def generate_bw(out_dir, name, mquality, bam, gsize):
'''
Create BigWig file
'''
# selece high quality reads
filter_bam = 'samtools view -b -F2308 -q '
filter_bam += ' %s %s > %s/%s ' % (mquality, bam, out_dir, name + '.filter.bam')
return_code = os.system(filter_bam) >> 8
if return_code:
sys.exit('Error: cannot filter bam file!')
# sort bam
sort_bam = 'samtools sort -T '
sort_bam += ' %s/%s -o %s/%s %s/%s ' % (out_dir, name + '.sorted', out_dir, name + '.sorted.bam', out_dir, name + '.filter.bam')
return_code = os.system(sort_bam) >> 8
if return_code:
sys.exit('Error: cannot sort bam file!')
# remove duplicate
rm_dup = 'samtools rmdup -s '
rm_dup += ' %s/%s %s/%s ' % (out_dir, name + '.sorted.bam', out_dir, name + '.sorted.deduped.bam')
return_code = os.system(rm_dup) >> 8
if return_code:
sys.exit('Error: cannot remove dup!')
# bam to bedpe and fill gaps
bam2bedpe = 'samtools view -b -f2 '
bam2bedpe += ' %s/%s | bedtools bamtobed -bedpe 2>/dev/null | cut -f 1,2,6 |sort -k1,1 -k2,2n > %s/%s' % (out_dir, name + '.sorted.deduped.bam', out_dir, name + '.bed')
return_code = os.system(bam2bedpe) >> 8
if return_code:
sys.exit('Error: cannot remove dup!')
# create Bigwig file
if which('bedGraphToBigWig') is not None:
print('Create BigWig file...')
map_bam_fname = '%s/%s' % (out_dir, name + '.sorted.deduped.bam')
# index bam if not exist
if not os.path.isfile(map_bam_fname + '.bai'):
pysam.index(map_bam_fname)
map_bam = pysam.AlignmentFile(map_bam_fname, 'rb')
# scale to HPB
mapped_reads = map_bam.mapped
s = 10000000.0 / mapped_reads
map_bed_fname = '%s/%s' % (out_dir, name + '.bed')
map_bed = pybedtools.BedTool(map_bed_fname)
bedgraph_fname = '%s/%s' % (out_dir,name + '.bg')
with open(bedgraph_fname, 'w') as bedgraph_f:
for line in map_bed.genome_coverage(bg=True,
g=gsize,
scale=s, split=True):
value = str(int(float(line[3]) + 0.5))
bedgraph_f.write('\t'.join(line[:3]) + '\t%s\n' % value)
# sort bedgraph
sort_bg = 'LC_COLLATE=C sort -k1,1 -k2,2n %s/%s > %s/%s ' % (out_dir,name + '.bg', out_dir,name + '.sorted.bg')
return_code = os.system(sort_bg) >> 8
if return_code:
sys.exit('Error: cannot sort bg!')
# bg 2 bigwig
bedgraph_sname = '%s/%s' % (out_dir,name + '.sorted.bg')
bigwig_fname = '%s/%s' % (out_dir,name + '.bw')
return_code = os.system('bedGraphToBigWig %s %s %s' %
(bedgraph_sname, gsize,
bigwig_fname)) >> 8
if return_code:
sys.exit('Error: cannot convert bedGraph to BigWig!')
file2rm = '%s/%s,%s/%s,%s/%s,%s/%s,%s/%s' % (out_dir, name + '.filter.bam', out_dir, name + '.sorted.bam', \
out_dir, name + '.bg', out_dir, name + '.sorted.bg', out_dir, name + '.bed')
for f in file2rm.strip().split(","):
os.remove(f)
else:
print('Could not find bedGraphToBigWig, so skip this step!') | PypiClean |
/nnisgf-0.4-py3-none-manylinux1_x86_64.whl/nnisgf-0.4.data/data/nni/node_modules/tar/lib/pack.js | 'use strict'
const Buffer = require('./buffer.js')
// A readable tar stream creator
// Technically, this is a transform stream that you write paths into,
// and tar format comes out of.
// The `add()` method is like `write()` but returns this,
// and end() return `this` as well, so you can
// do `new Pack(opt).add('files').add('dir').end().pipe(output)
// You could also do something like:
// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
class PackJob {
constructor (path, absolute) {
this.path = path || './'
this.absolute = absolute
this.entry = null
this.stat = null
this.readdir = null
this.pending = false
this.ignore = false
this.piped = false
}
}
const MiniPass = require('minipass')
const zlib = require('minizlib')
const ReadEntry = require('./read-entry.js')
const WriteEntry = require('./write-entry.js')
const WriteEntrySync = WriteEntry.Sync
const WriteEntryTar = WriteEntry.Tar
const Yallist = require('yallist')
const EOF = Buffer.alloc(1024)
const ONSTAT = Symbol('onStat')
const ENDED = Symbol('ended')
const QUEUE = Symbol('queue')
const CURRENT = Symbol('current')
const PROCESS = Symbol('process')
const PROCESSING = Symbol('processing')
const PROCESSJOB = Symbol('processJob')
const JOBS = Symbol('jobs')
const JOBDONE = Symbol('jobDone')
const ADDFSENTRY = Symbol('addFSEntry')
const ADDTARENTRY = Symbol('addTarEntry')
const STAT = Symbol('stat')
const READDIR = Symbol('readdir')
const ONREADDIR = Symbol('onreaddir')
const PIPE = Symbol('pipe')
const ENTRY = Symbol('entry')
const ENTRYOPT = Symbol('entryOpt')
const WRITEENTRYCLASS = Symbol('writeEntryClass')
const WRITE = Symbol('write')
const ONDRAIN = Symbol('ondrain')
const fs = require('fs')
const path = require('path')
const warner = require('./warn-mixin.js')
const Pack = warner(class Pack extends MiniPass {
constructor (opt) {
super(opt)
opt = opt || Object.create(null)
this.opt = opt
this.cwd = opt.cwd || process.cwd()
this.maxReadSize = opt.maxReadSize
this.preservePaths = !!opt.preservePaths
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '')
this.linkCache = opt.linkCache || new Map()
this.statCache = opt.statCache || new Map()
this.readdirCache = opt.readdirCache || new Map()
this[WRITEENTRYCLASS] = WriteEntry
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
this.zip = null
if (opt.gzip) {
if (typeof opt.gzip !== 'object')
opt.gzip = {}
this.zip = new zlib.Gzip(opt.gzip)
this.zip.on('data', chunk => super.write(chunk))
this.zip.on('end', _ => super.end())
this.zip.on('drain', _ => this[ONDRAIN]())
this.on('resume', _ => this.zip.resume())
} else
this.on('drain', this[ONDRAIN])
this.portable = !!opt.portable
this.noDirRecurse = !!opt.noDirRecurse
this.follow = !!opt.follow
this.noMtime = !!opt.noMtime
this.mtime = opt.mtime || null
this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
this[QUEUE] = new Yallist
this[JOBS] = 0
this.jobs = +opt.jobs || 4
this[PROCESSING] = false
this[ENDED] = false
}
[WRITE] (chunk) {
return super.write(chunk)
}
add (path) {
this.write(path)
return this
}
end (path) {
if (path)
this.write(path)
this[ENDED] = true
this[PROCESS]()
return this
}
write (path) {
if (this[ENDED])
throw new Error('write after end')
if (path instanceof ReadEntry)
this[ADDTARENTRY](path)
else
this[ADDFSENTRY](path)
return this.flowing
}
[ADDTARENTRY] (p) {
const absolute = path.resolve(this.cwd, p.path)
if (this.prefix)
p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '')
// in this case, we don't have to wait for the stat
if (!this.filter(p.path, p))
p.resume()
else {
const job = new PackJob(p.path, absolute, false)
job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
job.entry.on('end', _ => this[JOBDONE](job))
this[JOBS] += 1
this[QUEUE].push(job)
}
this[PROCESS]()
}
[ADDFSENTRY] (p) {
const absolute = path.resolve(this.cwd, p)
if (this.prefix)
p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '')
this[QUEUE].push(new PackJob(p, absolute))
this[PROCESS]()
}
[STAT] (job) {
job.pending = true
this[JOBS] += 1
const stat = this.follow ? 'stat' : 'lstat'
fs[stat](job.absolute, (er, stat) => {
job.pending = false
this[JOBS] -= 1
if (er)
this.emit('error', er)
else
this[ONSTAT](job, stat)
})
}
[ONSTAT] (job, stat) {
this.statCache.set(job.absolute, stat)
job.stat = stat
// now we have the stat, we can filter it.
if (!this.filter(job.path, stat))
job.ignore = true
this[PROCESS]()
}
[READDIR] (job) {
job.pending = true
this[JOBS] += 1
fs.readdir(job.absolute, (er, entries) => {
job.pending = false
this[JOBS] -= 1
if (er)
return this.emit('error', er)
this[ONREADDIR](job, entries)
})
}
[ONREADDIR] (job, entries) {
this.readdirCache.set(job.absolute, entries)
job.readdir = entries
this[PROCESS]()
}
[PROCESS] () {
if (this[PROCESSING])
return
this[PROCESSING] = true
for (let w = this[QUEUE].head;
w !== null && this[JOBS] < this.jobs;
w = w.next) {
this[PROCESSJOB](w.value)
if (w.value.ignore) {
const p = w.next
this[QUEUE].removeNode(w)
w.next = p
}
}
this[PROCESSING] = false
if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
if (this.zip)
this.zip.end(EOF)
else {
super.write(EOF)
super.end()
}
}
}
get [CURRENT] () {
return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
}
[JOBDONE] (job) {
this[QUEUE].shift()
this[JOBS] -= 1
this[PROCESS]()
}
[PROCESSJOB] (job) {
if (job.pending)
return
if (job.entry) {
if (job === this[CURRENT] && !job.piped)
this[PIPE](job)
return
}
if (!job.stat) {
if (this.statCache.has(job.absolute))
this[ONSTAT](job, this.statCache.get(job.absolute))
else
this[STAT](job)
}
if (!job.stat)
return
// filtered out!
if (job.ignore)
return
if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
if (this.readdirCache.has(job.absolute))
this[ONREADDIR](job, this.readdirCache.get(job.absolute))
else
this[READDIR](job)
if (!job.readdir)
return
}
// we know it doesn't have an entry, because that got checked above
job.entry = this[ENTRY](job)
if (!job.entry) {
job.ignore = true
return
}
if (job === this[CURRENT] && !job.piped)
this[PIPE](job)
}
[ENTRYOPT] (job) {
return {
onwarn: (msg, data) => {
this.warn(msg, data)
},
noPax: this.noPax,
cwd: this.cwd,
absolute: job.absolute,
preservePaths: this.preservePaths,
maxReadSize: this.maxReadSize,
strict: this.strict,
portable: this.portable,
linkCache: this.linkCache,
statCache: this.statCache,
noMtime: this.noMtime,
mtime: this.mtime
}
}
[ENTRY] (job) {
this[JOBS] += 1
try {
return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
.on('end', () => this[JOBDONE](job))
.on('error', er => this.emit('error', er))
} catch (er) {
this.emit('error', er)
}
}
[ONDRAIN] () {
if (this[CURRENT] && this[CURRENT].entry)
this[CURRENT].entry.resume()
}
// like .pipe() but using super, because our write() is special
[PIPE] (job) {
job.piped = true
if (job.readdir)
job.readdir.forEach(entry => {
const p = this.prefix ?
job.path.slice(this.prefix.length + 1) || './'
: job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
const source = job.entry
const zip = this.zip
if (zip)
source.on('data', chunk => {
if (!zip.write(chunk))
source.pause()
})
else
source.on('data', chunk => {
if (!super.write(chunk))
source.pause()
})
}
pause () {
if (this.zip)
this.zip.pause()
return super.pause()
}
})
class PackSync extends Pack {
constructor (opt) {
super(opt)
this[WRITEENTRYCLASS] = WriteEntrySync
}
// pause/resume are no-ops in sync streams.
pause () {}
resume () {}
[STAT] (job) {
const stat = this.follow ? 'statSync' : 'lstatSync'
this[ONSTAT](job, fs[stat](job.absolute))
}
[READDIR] (job, stat) {
this[ONREADDIR](job, fs.readdirSync(job.absolute))
}
// gotta get it all in this tick
[PIPE] (job) {
const source = job.entry
const zip = this.zip
if (job.readdir)
job.readdir.forEach(entry => {
const p = this.prefix ?
job.path.slice(this.prefix.length + 1) || './'
: job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
if (zip)
source.on('data', chunk => {
zip.write(chunk)
})
else
source.on('data', chunk => {
super[WRITE](chunk)
})
}
}
Pack.Sync = PackSync
module.exports = Pack | PypiClean |
/azureml_contrib_automl_dnn_forecasting-1.53.0-py3-none-any.whl/forecast/forecast/data/dataset.py |
import bisect
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from torch.utils.data import Dataset
from forecast.data import (
FUTURE_DEP_KEY,
FUTURE_DEP_KEY_UTF,
FUTURE_IND_KEY,
FUTURE_IND_KEY_UTF,
PAST_DEP_KEY,
PAST_DEP_KEY_UTF,
PAST_IND_KEY,
PAST_IND_KEY_UTF,
)
from .transforms import AbstractTransform, TSample
class TimeSeriesDatasetBase(Dataset):
def __init__(
self,
*,
window_size: int,
forecast_horizon: int,
num_features: int,
targets: Sequence[int],
step: int = 1,
transform: Optional[AbstractTransform] = None,
future_regressors: Optional[Sequence[int]] = None,
include_untransformed: bool = False,
):
self._window_size = window_size
self._forecast_period = forecast_horizon
self._full_sample_size = self._window_size + self._forecast_period
self._step = step
self._transform = transform
self._targets = list(targets)
target_set = set(targets)
self._regressors = [i for i in range(num_features) if i not in target_set]
self._future_regressors = future_regressors if future_regressors else self._regressors
self._include_utf = include_untransformed
@property
def transform(self) -> Optional[AbstractTransform]:
return self._transform
class PrecomputedTimeSeriesDataset(TimeSeriesDatasetBase):
"""Provides a moving window view into a list of time series."""
def __init__(
self,
time_series: Union[np.ndarray, Sequence[np.ndarray]],
window_size: int,
forecast_horizon: int,
targets: List[int],
step: int = 1,
transform: Optional[AbstractTransform] = None,
future_regressors: Optional[List[int]] = None,
include_untransformed: bool = False,
):
"""Creates a time series dataset.
Parameters
----------
time_series: np.ndarray | Sequence[np.ndarray]
List of time series arrays
window_size: int
Number of samples used as input for forecasting.
forecast_horizon: int
Number of samples to forecast.
targets: List[int]
A list of row indices of the forecast targets
step: int
Number of samples between consecutive examples from the same time series.
transform: AbstractTransform, optional
A transform to apply to the data (defaults to None)
future_regressors: List[int], optional
The future regressors available for prediction (defaults to all non-targets)
include_untransformed: bool, optional
Determines whether untransformed values are also included in a sample (default is False)
"""
super().__init__(
window_size=window_size,
forecast_horizon=forecast_horizon,
num_features=time_series[0].shape[0],
targets=targets,
step=step,
transform=transform,
future_regressors=future_regressors,
include_untransformed=include_untransformed,
)
self._data = time_series
# store (ts_index, start_ind) in list
# __getitem__ will use this to slice the cached TS data
self._sample_ids: List[Tuple[int, int]] = []
n_dropped = 0
for i, ts in enumerate(self._data):
# convert a single time series into a series of sequential samples
if ts.shape[-1] < self._forecast_period:
# we can't forecast N samples if we have < N samples to serve as ground truth
n_dropped += 1
continue
elif ts.shape[-1] < self._full_sample_size:
# If the time series is too short, we will zero pad the input
# TODO: revisit whether we should pad
num_examples = 1
else:
# truncate incomplete samples at the end
num_examples = (ts.shape[-1] - self._full_sample_size + self._step) // self._step
# store (ts_index, start_ind)
for j in range(num_examples):
self._sample_ids.append((i, j * self._step))
# Inform user about time series that were too short
if n_dropped > 0:
print(f"Dropped {n_dropped} time series due to length.")
def __len__(self) -> int:
"""Provides the length of the dataset.
Returns
-------
int
The number of examples in the dataset
"""
return len(self._sample_ids)
def __getitem__(self, idx: int) -> TSample:
"""Retrieves an example from the dataset.
Parameters
----------
idx: int
The index of the example to retrieve
Returns
-------
The transformed sample
"""
# Get time series
ts_id, offset = self._sample_ids[idx]
ts = self._data[ts_id]
# Prepare input and target. Zero pad if necessary.
if ts.shape[-1] < self._full_sample_size:
# If the time series is too short, zero-pad on the left
# TODO: revisit whether we should pad
X_past = ts[self._regressors, : -self._forecast_period]
X_past = np.pad(
X_past,
pad_width=((0, 0), (self._window_size - X_past.shape[-1], 0)),
mode="constant",
constant_values=0,
)
y_past = ts[self._targets, : -self._forecast_period]
y_past = np.pad(
y_past,
pad_width=((0, 0), (self._window_size - y_past.shape[-1], 0)),
mode="constant",
constant_values=0,
)
X_fut = ts[self._future_regressors, -self._forecast_period :]
y_fut = ts[self._targets, -self._forecast_period :]
else:
X_past = ts[self._regressors, offset : offset + self._window_size]
y_past = ts[self._targets, offset : offset + self._window_size]
X_fut = ts[self._future_regressors, offset + self._window_size : offset + self._full_sample_size]
y_fut = ts[self._targets, offset + self._window_size : offset + self._full_sample_size]
# Create the input and output for the sample
# X_past: (num_features, window_size)
# y_past: (num_targets, window_size)
# X_fut: (num_fut_features, horizon)
# y_fut: (num_targets, horizon)
sample = {PAST_IND_KEY: X_past, PAST_DEP_KEY: y_past, FUTURE_IND_KEY: X_fut, FUTURE_DEP_KEY: y_fut}
if self.transform:
sample = self.transform(sample)
if self._include_utf:
sample[PAST_IND_KEY_UTF] = X_past
sample[PAST_DEP_KEY_UTF] = y_past
sample[FUTURE_IND_KEY_UTF] = X_fut
sample[FUTURE_DEP_KEY_UTF] = y_fut
return sample
class OnlineTimeSeriesDataset(TimeSeriesDatasetBase):
def __init__(
self,
time_series: np.ndarray,
window_size: int,
forecast_horizon: int,
targets: List[int],
*,
step: int = 1,
ts_id_idx: Optional[int] = None,
sample_offset: int = 0,
transform: Optional[AbstractTransform] = None,
future_regressors: Optional[List[int]] = None,
include_untransformed: bool = False,
):
"""Creates a time series dataset.
Parameters
----------
time_series: np.ndarray
A (potentially memmap'd) numpy array
window_size: int
Number of samples used as input for forecasting.
forecast_horizon: int
Number of samples to forecast.
targets: List[int]
A list of row indices of the forecast targets
step: int
Number of samples between consecutive examples from the same time series.
ts_id_idx: Optional[int]
The column corresponding to the time series id, defaults to None (single time series dataset)
transform: AbstractTransform, optional
A transform to apply to the data (defaults to None)
future_regressors: List[int], optional
The future regressors available for prediction (defaults to all non-targets)
include_untransformed: bool, optional
Determines whether untransformed values are also included in a sample (default is False)
"""
super().__init__(
window_size=window_size,
forecast_horizon=forecast_horizon,
num_features=time_series.shape[1],
targets=targets,
step=step,
transform=transform,
future_regressors=future_regressors,
include_untransformed=include_untransformed,
)
self._data = time_series
self._ts_id_idx = ts_id_idx
self._sample_offset = sample_offset
if self._ts_id_idx is not None:
# if we have >1 time series, we assume
# ts_id's range from min_series_id -> min_series_id + N_ts - 1
# rows are ordered by ts_id first and date second
# data is dense (no missing dates)
min_series_id = self._data[:, self._ts_id_idx].min()
num_series = self._data[:, self._ts_id_idx].max() - min_series_id + 1
ts_inds = np.arange(min_series_id, min_series_id + num_series)[None, :]
# get the first occurrence of the time series index
# index 0 --> min_series_id
self._series_start_row_ind = np.argmax(self._data[:, self._ts_id_idx][:, None] == ts_inds, axis=0)
assert len(self._series_start_row_ind) == num_series
# if there is only 1 series, np.diff returns a 1d array of length 0 which will concat with the final value
# as expected
num_elem_by_series = np.concatenate(
[np.diff(self._series_start_row_ind), np.array([self._data.shape[0] - self._series_start_row_ind[-1]])]
)
num_sample_by_series = (num_elem_by_series - self._full_sample_size + self._step) // self._step
assert len(num_sample_by_series) == num_series
num_empty = (num_sample_by_series == 0).sum()
if num_empty > 0:
print(f"Dropping {num_empty} series which lack {self._full_sample_size} time steps.")
if num_empty == num_series:
raise RuntimeError("All series lack enough time steps to generate a full sample.")
# compute the first sample of each series
# note: series with 0 samples will have the same start index as the next series. bisect_right will handle
# this though as it'll use the last instance in the 0-sample run. The only exception is if the dataset
# ends with 1 or more 0-sample runs. We handle this case explicitly.
cumul_sample = num_sample_by_series.cumsum()
# this also works correctly if cumul_sample is of length 1
self._series_start_sample_ind = np.concatenate([np.array([0]), cumul_sample[:-1]])
self._max_sample = cumul_sample[-1] + sample_offset
# manually override the start_sample_ind to inf for any trailing series that have 0 samples
# this ensures that bisect always stays to their left for all finite values
if num_sample_by_series[-1] == 0:
for i in range(1, num_series):
if num_sample_by_series[-i] == 0:
self._series_start_sample_ind[-i] = np.inf
else:
break
else:
# otherwise, we have a single time series and assume:
# rows are ordered by date
# data is dense (no missing dates)
self._series_start_row_ind = self._series_start_sample_ind = None
self._max_sample = (len(self._data) - self._full_sample_size + self._step) // self._step + sample_offset
def __len__(self) -> int:
# TODO: fix for sharded datasets
return self._max_sample
def __getitem__(self, idx: int) -> TSample:
"""Retrieves an example from the dataset.
Parameters
----------
idx: int
The index of the example to retrieve
Returns
-------
The transformed sample
"""
if idx < self._sample_offset or idx >= self._max_sample:
raise IndexError(f"Index {idx} if out of the bounds [{self._sample_offset}, {self._max_sample})")
idx -= self._sample_offset
if self._series_start_sample_ind is not None:
series_idx = bisect.bisect_right(self._series_start_sample_ind, idx) - 1
series_start_row_ind = self._series_start_row_ind[series_idx]
sample_start_row_ind = series_start_row_ind + (idx - self._series_start_sample_ind[series_idx]) * self._step
else:
sample_start_row_ind = idx * self._step
X_past = self._data[sample_start_row_ind : sample_start_row_ind + self._window_size, self._regressors]
y_past = self._data[sample_start_row_ind : sample_start_row_ind + self._window_size, self._targets]
X_fut = self._data[
sample_start_row_ind + self._window_size : sample_start_row_ind + self._full_sample_size,
self._future_regressors,
]
y_fut = self._data[
sample_start_row_ind + self._window_size : sample_start_row_ind + self._full_sample_size, self._targets
]
# Create the input and output for the sample
# X_past: (num_features, window_size)
# y_past: (num_targets, window_size)
# X_fut: (num_fut_features, horizon)
# y_fut: (num_targets, horizon)
sample = {PAST_IND_KEY: X_past.T, PAST_DEP_KEY: y_past.T, FUTURE_IND_KEY: X_fut.T, FUTURE_DEP_KEY: y_fut.T}
if self.transform:
sample = self.transform(sample)
if self._include_utf:
sample[PAST_IND_KEY_UTF] = X_past.T
sample[PAST_DEP_KEY_UTF] = y_past.T
sample[FUTURE_IND_KEY_UTF] = X_fut.T
sample[FUTURE_DEP_KEY_UTF] = y_fut.T
return sample
# for backwards compatibility
TimeSeriesDataset = PrecomputedTimeSeriesDataset | PypiClean |
/fsleyes-plugin-mrs-0.1.0.tar.gz/fsleyes-plugin-mrs-0.1.0/fsleyes_plugin_mrs/mrsviewprofile.py | import wx
import fsl.data.image as fslimage
from fsleyes.profiles import plotprofile
from matplotlib.text import Text
class MRSViewProfile(plotprofile.PlotProfile):
"""The ``MRSViewProfile`` is a :class:`.PlotProfile` for use with the
:class:`.MRSView`.
In addition to the ``panzoom`` mode provided by the :class:`.PlotProfile`
class, the ``MRSViewProfile`` class implements a ``phasing`` mode, in
which the user is able to click/drag on a plot to change the
zero and first order phase for the currently selected overlay.
"""
@staticmethod
def supportedView():
"""Returns the :class:`.MRSView` class. """
from .plugin import MRSView
return MRSView
@staticmethod
def tempModes():
"""Returns the temporary mode map for the ``MRSViewProfile``,
which controls the use of modifier keys to temporarily enter other
interaction modes.
"""
return {('panzoom', wx.WXK_CONTROL): 'phasing0',
('panzoom', wx.WXK_SHIFT): 'phasing1',
('panzoom', (wx.WXK_CONTROL, wx.WXK_SHIFT)): 'phasingBoth',
('panzoom', (wx.WXK_ALT, wx.WXK_CONTROL)): 'phasing0_all',
('panzoom', (wx.WXK_ALT, wx.WXK_SHIFT)): 'phasing1_all',
('panzoom', (wx.WXK_ALT, wx.WXK_CONTROL, wx.WXK_SHIFT)): 'phasingBoth_all'}
def __init__(self, viewPanel, overlayList, displayCtx):
"""Create a ``MRSViewProfile``.
:arg viewPanel: A :class:`.MRSView` instance.
:arg overlayList: The :class:`.OverlayList` instance.
:arg displayCtx: The :class:`.DisplayContext` instance.
"""
plotprofile.PlotProfile.__init__(self,
viewPanel,
overlayList,
displayCtx,
['phasing0', 'phasing1', 'phasing'])
self._guideText = None
self._p0 = 0
self._p1 = 0
self._tempmodetext = None
def __phasingModeCompatible(self):
"""Returns ``True`` if phasing can currently be carried out, ``False``
otherwise.
"""
overlay = self.displayCtx.getSelectedOverlay()
if not isinstance(overlay, fslimage.Image):
return False
if not overlay.iscomplex:
return False
if len(overlay.shape) < 4 or overlay.shape[3] == 1:
return False
return True
def __updatePhase(self, xvalue, yvalue, p0, p1, all):
"""Called by the ``phasing`` event handlers.
Updates the zeroth and first order phase.
:arg xvalue: Normalised x position (0 to 1)
:arg yvalue: Normalised y position (0 to 1)
:arg bool p0: If true then 0th order phase will be updated
:arg bool p1: If true then 1st order phase will be updated
:arg bool all: If true then all dataseries will be updated, else only the selected.
"""
if xvalue is None or yvalue is None:
return
if p0:
self._p0 = 1.1 * 360 * (xvalue - 0.5)
if p1:
self._p1 = 0.004 * (yvalue - 0.5)
mrsPanel = self.viewPanel
if all:
# Loop through all dataseries to update the phase
for ds in mrsPanel.getDataSeriesToPlot():
ds.zeroOrderPhaseCorrection = self._p0
ds.firstOrderPhaseCorrection = self._p1
else:
# Identify the single spectrum to update.
overlay = self.displayCtx.getSelectedOverlay()
spec = mrsPanel.getDataSeries(overlay)
# Update
spec.zeroOrderPhaseCorrection = self._p0
spec.firstOrderPhaseCorrection = self._p1
def __createTextString(self):
'''Create the text for the on-screen guide'''
p1_ms = 1000 * self._p1
return f'Mode: {self._tempmodetext}\n'\
f'0th: {self._p0:0.1f} degrees, 1st: {p1_ms:0.2f} ms\n'\
'Use ctrl for 0th only (left/right).\n'\
'Use shift for 1st only (up/down).\n'\
'Use ctrl+shift for both (l/r/u/d).\n'\
'+ alt to phase all.'
def __createText(self, draw=True):
"""Create the on screen text guide.
:arg bool draw: If true then draw will be called at end. Defaults to True.
"""
text = self.__createTextString()
# Create text, use pixel positioning.
self._guideText = Text(20, 20, text, transform=None)
# Add to artists and draw
canvas = self.viewPanel.canvas
canvas.artists.append(self._guideText)
if draw:
canvas.drawArtists(immediate=True)
def __removeText(self, draw=True):
"""Remove the on screen text guide.
:arg bool draw: If true then draw will be called at end. Defaults to True.
"""
mpl_canvas = self.viewPanel.canvas
if self._guideText in mpl_canvas.artists:
mpl_canvas.artists.remove(self._guideText)
if draw:
mpl_canvas.drawArtists(immediate=True)
def __updateText(self):
"""Update (remove and recreate) the on screen text guide.
Draw not called, wait for an update.
"""
self.__removeText(draw=False)
self.__createText(draw=False)
def _phasingModeLeftMouseDown(self, ev, canvas, mousePos, canvasPos):
"""On first mouse down, draws on screen text guide. """
if not self.__phasingModeCompatible():
return
# Create text for first time
self.__createText()
def _phasingModeLeftMouseDrag(self, ev, canvas, mousePos, canvasPos, p0=True, p1=True, all=False):
"""Updates the phase of the selected overlay and on screen text guide. """
if mousePos is None:
xvalue, yvalue = None, None
else:
canvas_x, canvas_y = canvas.get_width_height()
xvalue = mousePos[0] / canvas_x
yvalue = mousePos[1] / canvas_y
self.__updatePhase(xvalue, yvalue, p0, p1, all)
self.__updateText()
def _phasingModeLeftMouseUp(self, ev, canvas, mousePos, canvasPos):
"""On final mouse up, removes on screen text guide. """
# Clear the temp mode text.
self._tempmodetext = None
# Remove text at end of phasing
self.__removeText()
# Handle all the temp mode options
# 0th order only - phasing0
def _phasing0ModeLeftMouseDown(self, *a, **kwa):
self._tempmodetext = '0th order only'
self._phasingModeLeftMouseDown(*a, **kwa)
def _phasing0ModeLeftMouseDrag(self, *a, **kwa):
self._phasingModeLeftMouseDrag(*a, p0=True, p1=False, **kwa)
def _phasing0ModeLeftMouseUp(self, *a, **kwa):
self._phasingModeLeftMouseUp(*a, **kwa)
# 1st order only - phasing1
def _phasing1ModeLeftMouseDown(self, *a, **kwa):
self._tempmodetext = '1st order only'
self._phasingModeLeftMouseDown(*a, **kwa)
def _phasing1ModeLeftMouseDrag(self, *a, **kwa):
self._phasingModeLeftMouseDrag(*a, p0=False, p1=True, **kwa)
def _phasing1ModeLeftMouseUp(self, *a, **kwa):
self._phasingModeLeftMouseUp(*a, **kwa)
# 0th and 1st order - phasingBoth
def _phasingBothModeLeftMouseDown(self, *a, **kwa):
self._tempmodetext = '0th & 1st order'
self._phasingModeLeftMouseDown(*a, **kwa)
def _phasingBothModeLeftMouseDrag(self, *a, **kwa):
self._phasingModeLeftMouseDrag(*a, **kwa)
def _phasingBothModeLeftMouseUp(self, *a, **kwa):
self._phasingModeLeftMouseUp(*a, **kwa)
# 0th order only, all dataseries - phasing0_all
def _phasing0_allModeLeftMouseDown(self, *a, **kwa):
self._tempmodetext = '0th order only, all'
self._phasingModeLeftMouseDown(*a, **kwa)
def _phasing0_allModeLeftMouseDrag(self, *a, **kwa):
self._phasingModeLeftMouseDrag(*a, p0=True, p1=False, all=True, **kwa)
def _phasing0_allModeLeftMouseUp(self, *a, **kwa):
self._phasingModeLeftMouseUp(*a, **kwa)
# 1st order only, all dataseries - phasing1_all
def _phasing1_allModeLeftMouseDown(self, *a, **kwa):
self._tempmodetext = '1st order only, all'
self._phasingModeLeftMouseDown(*a, **kwa)
def _phasing1_allModeLeftMouseDrag(self, *a, **kwa):
self._phasingModeLeftMouseDrag(*a, p0=False, p1=True, all=True, **kwa)
def _phasing1_allModeLeftMouseUp(self, *a, **kwa):
self._phasingModeLeftMouseUp(*a, **kwa)
# 0th and 1st order, all dataseries - phasingBoth_all
def _phasingBoth_allModeLeftMouseDown(self, *a, **kwa):
self._tempmodetext = '0th & 1st order, all'
self._phasingModeLeftMouseDown(*a, **kwa)
def _phasingBoth_allModeLeftMouseDrag(self, *a, **kwa):
self._phasingModeLeftMouseDrag(*a, all=True, **kwa)
def _phasingBoth_allModeLeftMouseUp(self, *a, **kwa):
self._phasingModeLeftMouseUp(*a, **kwa) | PypiClean |
/mkvbatchmultiplex-2.0.1-cp38-none-any.whl/MKVBatchMultiplex/jobs/JobQueue.py |
import copy
import logging
from collections import deque
from datetime import datetime
from time import time
from PySide2.QtCore import QObject, Slot, Signal
from vsutillib.mkv import MKVCommandParser
from .. import config
from ..models import TableProxyModel
from .jobKeys import JobStatus, JobKey
from .RunJobs import RunJobs
MODULELOG = logging.getLogger(__name__)
MODULELOG.addHandler(logging.NullHandler())
class JobInfo: # pylint: disable=too-many-instance-attributes
"""
JobInfo Information for a job
Args:
status (str, optional): job status. Defaults to "".
index ([type], optional): index on job table. Defaults to None.
job (list, optional): row on job table. Defaults to None.
errors (list, optional): errors on job execution. Defaults to None.
output (list, optional): job output. Defaults to None.
"""
def __init__(
self, jobRowNumber, jobRow, tableModel, errors=None, output=None, log=False,
):
self.__jobRow = []
self.jobRowNumber = jobRowNumber
self.jobRow = jobRow
self.oCommand = copy.deepcopy(
tableModel.dataset.data[jobRowNumber][JobKey.Command].obj
)
if not self.oCommand:
command = tableModel.dataset[jobRowNumber, JobKey.Command]
self.oCommand = MKVCommandParser(command, log=log)
if log:
MODULELOG.debug(
"JBQ0001: Job %s- Bad MKVCommandParser object.", jobRow[JobKey.ID]
)
self.date = datetime.today()
self.addTime = time()
self.startTime = None
self.endTime = None
self.errors = [] if errors is None else errors
self.output = [] if output is None else output
@property
def jobRow(self):
return self.__jobRow
@jobRow.setter
def jobRow(self, value):
if isinstance(value, list):
self.__jobRow = []
for cell in value:
self.__jobRow.append(cell)
@property
def status(self):
return self.jobRow[JobKey.Status]
@status.setter
def status(self, value):
if isinstance(value, str):
self.jobRow[JobKey.Status] = value
class JobQueue(QObject):
"""
__init__ JobQueue - manage jobs
Args:
jobWorkQueue (collections.dequeue, optional): set external dequeue. Defaults to None.
"""
# Class logging state
__log = False
__firstRun = True
__jobID = 10
statusUpdateSignal = Signal(object, str)
runSignal = Signal()
addQueueItemSignal = Signal(object)
addWaitingItemSignal = Signal()
queueEmptiedSignal = Signal()
statusChangeSignal = Signal(object)
def __init__(
self,
parent,
proxyModel=None,
funcProgress=None,
jobWorkQueue=None,
controlQueue=None,
log=None,
):
super(JobQueue, self).__init__(parent)
self.__log = None
self.__progress = None
self.__model = None
self.__proxyModel = None
self.parent = parent
self.proxyModel = proxyModel
self.progress = funcProgress
self.controlQueue = controlQueue
if jobWorkQueue is None:
self._workQueue = deque()
else:
self._workQueue = jobWorkQueue
self.log = log
self.runJobs = RunJobs(
self, self, controlQueue=self.controlQueue, log=self.log
) # progress function is a late bind
self.statusUpdateSignal.connect(self.statusUpdate)
self.runSignal.connect(self.run)
jobID = config.data.get("JobID")
if jobID:
if jobID > 9999:
# Roll over
jobID = 1
self.__jobID = jobID
def __bool__(self):
if self._workQueue:
return True
return False
def __len__(self):
return len(self._workQueue)
@classmethod
def classLog(cls, setLogging=None):
"""
get/set logging at class level
every class instance will log
unless overwritten
Args:
setLogging (bool):
- True class will log
- False turn off logging
- None returns current Value
Returns:
bool:
returns the current value set
"""
if setLogging is not None:
if isinstance(setLogging, bool):
cls.__log = setLogging
return cls.__log
@property
def log(self):
"""
class property can be used to override the class global
logging setting
Returns:
bool:
True if logging is enable False otherwise
"""
if self.__log is not None:
return self.__log
return JobQueue.classLog()
@log.setter
def log(self, value):
"""set instance log variable"""
if isinstance(value, bool) or value is None:
self.__log = value
@property
def model(self):
return self.__model
@property
def proxyModel(self):
return self.__proxyModel
@proxyModel.setter
def proxyModel(self, value):
if isinstance(value, TableProxyModel):
self.__proxyModel = value
self.__model = value.sourceModel()
@property
def progress(self):
return self.__progress
@progress.setter
def progress(self, value):
self.__progress = value
@Slot(object, str)
def statusUpdate(self, job, status):
index = self.model.index(job.jobRowNumber, JobKey.Status)
self.model.setData(index, status)
def append(self, jobRow):
"""
append append job to queue
Args:
jobRow (QModelIndex): index for job status on dataset
oCommand (list): job row on dataset
Returns:
bool: True if append successful False otherwise
"""
status = self.model.dataset[jobRow,][JobKey.Status]
if status != JobStatus.AddToQueue:
if status == JobStatus.Waiting:
self.addWaitingItemSignal.emit()
return False
jobID = self.model.dataset[jobRow,][JobKey.ID]
jobIndex = self.model.index(jobRow, JobKey.ID)
if not jobID:
self.model.setData(jobIndex, self.__jobID)
self.__jobID += 1
config.data.set(config.ConfigKey.JobID, self.__jobID)
newJob = JobInfo(jobRow, self.model.dataset[jobRow,], self.model, log=self.log,)
self._workQueue.append(newJob)
index = self.model.index(jobRow, JobKey.Status)
self.model.setData(index, JobStatus.Queue)
if self._workQueue:
index = self.model.index(jobRow, JobKey.ID)
self.addQueueItemSignal.emit(index)
return True
return False
def clear(self):
"""Clear the job queue"""
while job := self.pop():
print("Clearing the way {}".format(job.job[JobKey.ID]))
def popLeft(self):
"""
pop return next job in queue
Returns:
JobInfo: next job in queue
"""
if self._workQueue:
element = self._workQueue.popleft()
self._checkEmptied()
return element
return None
def popRight(self):
"""
pop return next job in queue
Returns:
JobInfo: next job in queue
"""
if self._workQueue:
element = self._workQueue.pop()
self._checkEmptied()
return element
return None
def pop(self):
"""
pop return next job in queue
Returns:
JobInfo: next job in queue
"""
if self._workQueue:
element = self._workQueue.popleft()
self._checkEmptied()
return element
return None
def _checkEmptied(self):
if not self._workQueue:
self.queueEmptiedSignal.emit()
@Slot()
def run(self):
"""
run test run worker thread
"""
self.runJobs.proxyModel = self.proxyModel
self.runJobs.progress = self.progress
self.runJobs.output = self.output
self.runJobs.log = self.log
if JobQueue.__firstRun:
self.parent.jobsOutput.setAsCurrentTab()
JobQueue.__firstRun = False
self.runJobs.run() | PypiClean |
/simapy-4.4.3.tar.gz/simapy-4.4.3/src/sima/riflex/riflexeigenvaluecalculationparameters.py | from typing import Dict,Sequence,List
from dmt.blueprint import Blueprint
from .blueprints.riflexeigenvaluecalculationparameters import RIFLEXEigenvalueCalculationParametersBlueprint
from typing import Dict
from sima.riflex.eigenvalueanalysisparameters import EigenvalueAnalysisParameters
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class RIFLEXEigenvalueCalculationParameters(MOAO):
"""
Keyword arguments
-----------------
description : str
(default "")
scriptableValues : List[ScriptableValue]
eigenvalueAnalysisParameters : EigenvalueAnalysisParameters
"""
def __init__(self , description="", **kwargs):
super().__init__(**kwargs)
self.description = description
self.scriptableValues = list()
self.eigenvalueAnalysisParameters = None
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return RIFLEXEigenvalueCalculationParametersBlueprint()
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = value
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def eigenvalueAnalysisParameters(self) -> EigenvalueAnalysisParameters:
""""""
return self.__eigenvalueAnalysisParameters
@eigenvalueAnalysisParameters.setter
def eigenvalueAnalysisParameters(self, value: EigenvalueAnalysisParameters):
"""Set eigenvalueAnalysisParameters"""
self.__eigenvalueAnalysisParameters = value | PypiClean |