code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import collections
import decimal
import json
import logging
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value
from django.db.models.functions import Cast, Coalesce
from django.db.models.signals import post_delete
from django.dispatch.dispatcher import receiver
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.core.fields import StreamField
from addressfield.fields import ADDRESS_FIELDS_ORDER
from hypha.apply.funds.models.mixins import AccessFormData
from hypha.apply.stream_forms.blocks import FormFieldsBlock
from hypha.apply.stream_forms.files import StreamFieldDataEncoder
from hypha.apply.stream_forms.models import BaseStreamForm
from hypha.apply.utils.storage import PrivateStorage
from .vendor import Vendor
logger = logging.getLogger(__name__)
def contract_path(instance, filename):
return f'projects/{instance.project_id}/contracts/{filename}'
def document_path(instance, filename):
return f'projects/{instance.project_id}/supporting_documents/{filename}'
COMMITTED = 'committed'
CONTRACTING = 'contracting'
IN_PROGRESS = 'in_progress'
CLOSING = 'closing'
COMPLETE = 'complete'
PROJECT_STATUS_CHOICES = [
(COMMITTED, _('Committed')),
(CONTRACTING, _('Contracting')),
(IN_PROGRESS, _('In Progress')),
(CLOSING, _('Closing')),
(COMPLETE, _('Complete')),
]
class ProjectQuerySet(models.QuerySet):
def active(self):
# Projects that are not finished.
return self.exclude(status=COMPLETE)
def in_progress(self):
# Projects that users need to interact with, submitting reports or payment request.
return self.filter(
status__in=(IN_PROGRESS, CLOSING,)
)
def complete(self):
return self.filter(status=COMPLETE)
def in_approval(self):
return self.filter(
is_locked=True,
status=COMMITTED,
approvals__isnull=True,
)
def by_end_date(self, desc=False):
order = getattr(F('proposed_end'), 'desc' if desc else 'asc')(nulls_last=True)
return self.order_by(order)
def with_amount_paid(self):
return self.annotate(
amount_paid=Coalesce(Sum('invoices__paid_value'), Value(0)),
)
def with_last_payment(self):
return self.annotate(
last_payment_request=Max('invoices__requested_at'),
)
def with_outstanding_reports(self):
Report = apps.get_model('application_projects', 'Report')
return self.annotate(
outstanding_reports=Subquery(
Report.objects.filter(
project=OuterRef('pk'),
).to_do().order_by().values('project').annotate(
count=Count('pk'),
).values('count'),
output_field=models.IntegerField(),
)
)
def with_start_date(self):
return self.annotate(
start=Cast(
Subquery(
Contract.objects.filter(
project=OuterRef('pk'),
).approved().order_by(
'approved_at'
).values('approved_at')[:1]
),
models.DateField(),
)
)
def for_table(self):
return self.with_amount_paid().with_last_payment().with_outstanding_reports().select_related(
'report_config',
'submission__page',
'lead',
)
class Project(BaseStreamForm, AccessFormData, models.Model):
lead = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL, related_name='lead_projects')
submission = models.OneToOneField("funds.ApplicationSubmission", on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='owned_projects')
title = models.TextField()
vendor = models.ForeignKey(
"application_projects.Vendor",
on_delete=models.SET_NULL,
null=True, blank=True, related_name='projects'
)
value = models.DecimalField(
default=0,
max_digits=10,
decimal_places=2,
validators=[MinValueValidator(decimal.Decimal('0.01'))],
)
proposed_start = models.DateTimeField(_('Proposed Start Date'), null=True)
proposed_end = models.DateTimeField(_('Proposed End Date'), null=True)
status = models.TextField(choices=PROJECT_STATUS_CHOICES, default=COMMITTED)
form_data = JSONField(encoder=StreamFieldDataEncoder, default=dict)
form_fields = StreamField(FormFieldsBlock(), null=True)
# tracks read/write state of the Project
is_locked = models.BooleanField(default=False)
# tracks updates to the Projects fields via the Project Application Form.
user_has_updated_details = models.BooleanField(default=False)
activities = GenericRelation(
'activity.Activity',
content_type_field='source_content_type',
object_id_field='source_object_id',
related_query_name='project',
)
created_at = models.DateTimeField(auto_now_add=True)
sent_to_compliance_at = models.DateTimeField(null=True)
objects = ProjectQuerySet.as_manager()
def __str__(self):
return self.title
@property
def status_display(self):
return self.get_status_display()
def get_address_display(self):
try:
address = json.loads(self.vendor.address)
except (json.JSONDecodeError, AttributeError):
return ''
else:
return ', '.join(
address.get(field)
for field in ADDRESS_FIELDS_ORDER
if address.get(field)
)
@classmethod
def create_from_submission(cls, submission):
"""
Create a Project from the given submission.
Returns a new Project or the given ApplicationSubmissions existing
Project.
"""
if not settings.PROJECTS_ENABLED:
logging.error(f'Tried to create a Project for Submission ID={submission.id} while projects are disabled')
return None
# OneToOne relations on the targetted model cannot be accessed without
# an exception when the relation doesn't exist (is None). Since we
# want to fail fast here, we can use hasattr instead.
if hasattr(submission, 'project'):
return submission.project
# See if there is a form field named "legal name", if not use user name.
legal_name = submission.get_answer_from_label('legal name') or submission.user.full_name
vendor, _ = Vendor.objects.get_or_create(
user=submission.user
)
vendor.name = legal_name
vendor.address = submission.form_data.get('address', '')
vendor.save()
return Project.objects.create(
submission=submission,
user=submission.user,
title=submission.title,
vendor=vendor,
value=submission.form_data.get('value', 0),
)
@property
def start_date(self):
# Assume project starts when OTF are happy with the first signed contract
first_approved_contract = self.contracts.approved().order_by('approved_at').first()
if not first_approved_contract:
return None
return first_approved_contract.approved_at.date()
@property
def end_date(self):
# Aiming for the proposed end date as the last day of the project
# If still ongoing assume today is the end
return max(
self.proposed_end.date(),
timezone.now().date(),
)
def paid_value(self):
return self.invoices.paid_value()
def unpaid_value(self):
return self.invoices.unpaid_value()
def clean(self):
if self.proposed_start is None:
return
if self.proposed_end is None:
return
if self.proposed_start > self.proposed_end:
raise ValidationError(_('Proposed End Date must be after Proposed Start Date'))
def save(self, *args, **kwargs):
creating = not self.pk
if creating:
files = self.extract_files()
else:
self.process_file_data(self.form_data)
super().save(*args, **kwargs)
if creating:
self.process_file_data(files)
def editable_by(self, user):
if self.editable:
return True
# Approver can edit it when they are approving
return user.is_approver and self.can_make_approval
@property
def editable(self):
if self.status not in (CONTRACTING, COMMITTED):
return True
# Someone has approved the project - consider it locked while with contracting
if self.approvals.exists():
return False
# Someone must lead the project to make changes
return self.lead and not self.is_locked
def get_absolute_url(self):
if settings.PROJECTS_ENABLED:
return reverse('apply:projects:detail', args=[self.id])
return '#'
@property
def can_make_approval(self):
return self.is_locked and self.status == COMMITTED
def can_request_funding(self):
"""
Should we show this Project's funding block?
"""
return self.status in (CLOSING, IN_PROGRESS)
@property
def can_send_for_approval(self):
"""
Wrapper to expose the pending approval state
We don't want to expose a "Sent for Approval" state to the end User so
we infer it from the current status being "Comitted" and the Project
being locked.
"""
correct_state = self.status == COMMITTED and not self.is_locked
return correct_state and self.user_has_updated_details
@property
def requires_approval(self):
return not self.approvals.exists()
def get_missing_document_categories(self):
"""
Get the number of documents required to meet each DocumentCategorys minimum
"""
# Count the number of documents in each category currently
existing_categories = DocumentCategory.objects.filter(packet_files__project=self)
counter = collections.Counter(existing_categories)
# Find the difference between the current count and recommended count
for category in DocumentCategory.objects.all():
current_count = counter[category]
difference = category.recommended_minimum - current_count
if difference > 0:
yield {
'category': category,
'difference': difference,
}
@property
def is_in_progress(self):
return self.status == IN_PROGRESS
@property
def has_deliverables(self):
return self.deliverables.exists()
# def send_to_compliance(self, request):
# """Notify Compliance about this Project."""
# messenger(
# MESSAGES.SENT_TO_COMPLIANCE,
# request=request,
# user=request.user,
# source=self,
# )
# self.sent_to_compliance_at = timezone.now()
# self.save(update_fields=['sent_to_compliance_at'])
@register_setting
class ProjectSettings(BaseSetting):
compliance_email = models.TextField("Compliance Email")
vendor_setup_required = models.BooleanField(default=True)
class Approval(models.Model):
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="approvals")
by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="approvals")
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ['project', 'by']
def __str__(self):
return _('Approval of {project} by {user}').format(project=self.project, user=self.by)
class ContractQuerySet(models.QuerySet):
def approved(self):
return self.filter(is_signed=True, approver__isnull=False)
class Contract(models.Model):
approver = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL, related_name='contracts')
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="contracts")
file = models.FileField(upload_to=contract_path, storage=PrivateStorage())
is_signed = models.BooleanField("Signed?", default=False)
created_at = models.DateTimeField(auto_now_add=True)
approved_at = models.DateTimeField(null=True)
objects = ContractQuerySet.as_manager()
@property
def state(self):
return _('Signed') if self.is_signed else _('Unsigned')
def __str__(self):
return _('Contract for {project} ({state})').format(project=self.project, state=self.state)
def get_absolute_url(self):
return reverse('apply:projects:contract', args=[self.project.pk, self.pk])
class PacketFile(models.Model):
category = models.ForeignKey("DocumentCategory", null=True, on_delete=models.CASCADE, related_name="packet_files")
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="packet_files")
title = models.TextField()
document = models.FileField(upload_to=document_path, storage=PrivateStorage())
def __str__(self):
return _('Project file: {title}').format(title=self.title)
def get_remove_form(self):
"""
Get an instantiated RemoveDocumentForm with this class as `instance`.
This allows us to build instances of the RemoveDocumentForm for each
instance of PacketFile in the supporting documents template. The
standard Delegated View flow makes it difficult to create these forms
in the view or template.
"""
from ..forms import RemoveDocumentForm
return RemoveDocumentForm(instance=self)
@receiver(post_delete, sender=PacketFile)
def delete_packetfile_file(sender, instance, **kwargs):
# Remove the file and don't save the base model
instance.document.delete(False)
class DocumentCategory(models.Model):
name = models.CharField(max_length=254)
recommended_minimum = models.PositiveIntegerField()
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
verbose_name_plural = 'Document Categories'
class Deliverable(models.Model):
name = models.TextField()
available_to_invoice = models.IntegerField(default=1)
unit_price = models.DecimalField(
max_digits=10,
decimal_places=2,
validators=[MinValueValidator(decimal.Decimal('0.01'))],
)
project = models.ForeignKey(
Project,
null=True, blank=True,
on_delete=models.CASCADE,
related_name='deliverables'
)
def __str__(self):
return self.name
|
[
"logging.getLogger",
"django.db.models.DateField",
"django.contrib.postgres.fields.JSONField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.Count",
"django.utils.translation.gettext_lazy",
"django.urls.reverse",
"logging.error",
"django.db.models.Max",
"django.apps.apps.get_model",
"django.contrib.contenttypes.fields.GenericRelation",
"django.db.models.Sum",
"django.db.models.ForeignKey",
"django.db.models.F",
"django.utils.timezone.now",
"django.db.models.OuterRef",
"django.db.models.DateTimeField",
"django.db.models.Value",
"hypha.apply.utils.storage.PrivateStorage",
"django.db.models.CharField",
"django.db.models.OneToOneField",
"json.loads",
"hypha.apply.stream_forms.blocks.FormFieldsBlock",
"django.db.models.BooleanField",
"django.db.models.PositiveIntegerField",
"django.dispatch.dispatcher.receiver",
"collections.Counter",
"decimal.Decimal"
] |
[((1227, 1254), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1244, 1254), False, 'import logging\n'), ((14369, 14409), 'django.dispatch.dispatcher.receiver', 'receiver', (['post_delete'], {'sender': 'PacketFile'}), '(post_delete, sender=PacketFile)\n', (14377, 14409), False, 'from django.dispatch.dispatcher import receiver\n'), ((4016, 4132), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'null': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""lead_projects"""'}), "(settings.AUTH_USER_MODEL, null=True, on_delete=models.\n SET_NULL, related_name='lead_projects')\n", (4033, 4132), False, 'from django.db import models\n'), ((4145, 4222), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""funds.ApplicationSubmission"""'], {'on_delete': 'models.CASCADE'}), "('funds.ApplicationSubmission', on_delete=models.CASCADE)\n", (4165, 4222), False, 'from django.db import models\n'), ((4234, 4351), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'related_name': '"""owned_projects"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null\n =True, related_name='owned_projects')\n", (4251, 4351), False, 'from django.db import models\n'), ((4360, 4378), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (4376, 4378), False, 'from django.db import models\n'), ((4392, 4519), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""application_projects.Vendor"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)', 'related_name': '"""projects"""'}), "('application_projects.Vendor', on_delete=models.SET_NULL,\n null=True, blank=True, related_name='projects')\n", (4409, 4519), False, 'from django.db import models\n'), ((4886, 4953), 'django.db.models.TextField', 'models.TextField', ([], {'choices': 'PROJECT_STATUS_CHOICES', 'default': 'COMMITTED'}), '(choices=PROJECT_STATUS_CHOICES, default=COMMITTED)\n', (4902, 4953), False, 'from django.db import models\n'), ((4971, 5026), 'django.contrib.postgres.fields.JSONField', 'JSONField', ([], {'encoder': 'StreamFieldDataEncoder', 'default': 'dict'}), '(encoder=StreamFieldDataEncoder, default=dict)\n', (4980, 5026), False, 'from django.contrib.postgres.fields import JSONField\n'), ((5149, 5183), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5168, 5183), False, 'from django.db import models\n'), ((5294, 5328), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5313, 5328), False, 'from django.db import models\n'), ((5347, 5500), 'django.contrib.contenttypes.fields.GenericRelation', 'GenericRelation', (['"""activity.Activity"""'], {'content_type_field': '"""source_content_type"""', 'object_id_field': '"""source_object_id"""', 'related_query_name': '"""project"""'}), "('activity.Activity', content_type_field=\n 'source_content_type', object_id_field='source_object_id',\n related_query_name='project')\n", (5362, 5500), False, 'from django.contrib.contenttypes.fields import GenericRelation\n'), ((5548, 5587), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5568, 5587), False, 'from django.db import models\n'), ((5617, 5648), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (5637, 5648), False, 'from django.db import models\n'), ((11829, 11865), 'django.db.models.TextField', 'models.TextField', (['"""Compliance Email"""'], {}), "('Compliance Email')\n", (11845, 11865), False, 'from django.db import models\n'), ((11894, 11927), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (11913, 11927), False, 'from django.db import models\n'), ((11974, 12059), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Project"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""approvals"""'}), "('Project', on_delete=models.CASCADE, related_name='approvals'\n )\n", (11991, 12059), False, 'from django.db import models\n'), ((12064, 12163), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""approvals"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='approvals')\n", (12081, 12163), False, 'from django.db import models\n'), ((12178, 12217), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (12198, 12217), False, 'from django.db import models\n'), ((12579, 12691), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'null': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""contracts"""'}), "(settings.AUTH_USER_MODEL, null=True, on_delete=models.\n SET_NULL, related_name='contracts')\n", (12596, 12691), False, 'from django.db import models\n'), ((12701, 12786), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Project"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""contracts"""'}), "('Project', on_delete=models.CASCADE, related_name='contracts'\n )\n", (12718, 12786), False, 'from django.db import models\n'), ((12879, 12924), 'django.db.models.BooleanField', 'models.BooleanField', (['"""Signed?"""'], {'default': '(False)'}), "('Signed?', default=False)\n", (12898, 12924), False, 'from django.db import models\n'), ((12942, 12981), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (12962, 12981), False, 'from django.db import models\n'), ((13000, 13031), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (13020, 13031), False, 'from django.db import models\n'), ((13466, 13573), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""DocumentCategory"""'], {'null': '(True)', 'on_delete': 'models.CASCADE', 'related_name': '"""packet_files"""'}), "('DocumentCategory', null=True, on_delete=models.CASCADE,\n related_name='packet_files')\n", (13483, 13573), False, 'from django.db import models\n'), ((13584, 13672), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Project"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""packet_files"""'}), "('Project', on_delete=models.CASCADE, related_name=\n 'packet_files')\n", (13601, 13672), False, 'from django.db import models\n'), ((13681, 13699), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (13697, 13699), False, 'from django.db import models\n'), ((14605, 14637), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (14621, 14637), False, 'from django.db import models\n'), ((14664, 14693), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (14691, 14693), False, 'from django.db import models\n'), ((14887, 14905), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (14903, 14905), False, 'from django.db import models\n'), ((14933, 14963), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (14952, 14963), False, 'from django.db import models\n'), ((15136, 15244), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Project'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE', 'related_name': '"""deliverables"""'}), "(Project, null=True, blank=True, on_delete=models.CASCADE,\n related_name='deliverables')\n", (15153, 15244), False, 'from django.db import models\n'), ((1647, 1661), 'django.utils.translation.gettext_lazy', '_', (['"""Committed"""'], {}), "('Committed')\n", (1648, 1661), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1682, 1698), 'django.utils.translation.gettext_lazy', '_', (['"""Contracting"""'], {}), "('Contracting')\n", (1683, 1698), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1719, 1735), 'django.utils.translation.gettext_lazy', '_', (['"""In Progress"""'], {}), "('In Progress')\n", (1720, 1735), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1752, 1764), 'django.utils.translation.gettext_lazy', '_', (['"""Closing"""'], {}), "('Closing')\n", (1753, 1764), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1782, 1795), 'django.utils.translation.gettext_lazy', '_', (['"""Complete"""'], {}), "('Complete')\n", (1783, 1795), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2891, 2939), 'django.apps.apps.get_model', 'apps.get_model', (['"""application_projects"""', '"""Report"""'], {}), "('application_projects', 'Report')\n", (2905, 2939), False, 'from django.apps import apps\n'), ((4760, 4784), 'django.utils.translation.gettext_lazy', '_', (['"""Proposed Start Date"""'], {}), "('Proposed Start Date')\n", (4761, 4784), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4837, 4859), 'django.utils.translation.gettext_lazy', '_', (['"""Proposed End Date"""'], {}), "('Proposed End Date')\n", (4838, 4859), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5057, 5074), 'hypha.apply.stream_forms.blocks.FormFieldsBlock', 'FormFieldsBlock', ([], {}), '()\n', (5072, 5074), False, 'from hypha.apply.stream_forms.blocks import FormFieldsBlock\n'), ((10737, 10777), 'collections.Counter', 'collections.Counter', (['existing_categories'], {}), '(existing_categories)\n', (10756, 10777), False, 'import collections\n'), ((13349, 13416), 'django.urls.reverse', 'reverse', (['"""apply:projects:contract"""'], {'args': '[self.project.pk, self.pk]'}), "('apply:projects:contract', args=[self.project.pk, self.pk])\n", (13356, 13416), False, 'from django.urls import reverse\n'), ((5900, 5931), 'json.loads', 'json.loads', (['self.vendor.address'], {}), '(self.vendor.address)\n', (5910, 5931), False, 'import json\n'), ((6480, 6595), 'logging.error', 'logging.error', (['f"""Tried to create a Project for Submission ID={submission.id} while projects are disabled"""'], {}), "(\n f'Tried to create a Project for Submission ID={submission.id} while projects are disabled'\n )\n", (6493, 6595), False, 'import logging\n'), ((9531, 9579), 'django.urls.reverse', 'reverse', (['"""apply:projects:detail"""'], {'args': '[self.id]'}), "('apply:projects:detail', args=[self.id])\n", (9538, 9579), False, 'from django.urls import reverse\n'), ((12844, 12860), 'hypha.apply.utils.storage.PrivateStorage', 'PrivateStorage', ([], {}), '()\n', (12858, 12860), False, 'from hypha.apply.utils.storage import PrivateStorage\n'), ((13128, 13139), 'django.utils.translation.gettext_lazy', '_', (['"""Signed"""'], {}), "('Signed')\n", (13129, 13139), True, 'from django.utils.translation import gettext_lazy as _\n'), ((13163, 13176), 'django.utils.translation.gettext_lazy', '_', (['"""Unsigned"""'], {}), "('Unsigned')\n", (13164, 13176), True, 'from django.utils.translation import gettext_lazy as _\n'), ((13765, 13781), 'hypha.apply.utils.storage.PrivateStorage', 'PrivateStorage', ([], {}), '()\n', (13779, 13781), False, 'from hypha.apply.utils.storage import PrivateStorage\n'), ((2449, 2466), 'django.db.models.F', 'F', (['"""proposed_end"""'], {}), "('proposed_end')\n", (2450, 2466), False, 'from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value\n'), ((2792, 2821), 'django.db.models.Max', 'Max', (['"""invoices__requested_at"""'], {}), "('invoices__requested_at')\n", (2795, 2821), False, 'from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value\n'), ((8511, 8567), 'django.utils.translation.gettext_lazy', '_', (['"""Proposed End Date must be after Proposed Start Date"""'], {}), "('Proposed End Date must be after Proposed Start Date')\n", (8512, 8567), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12318, 12354), 'django.utils.translation.gettext_lazy', '_', (['"""Approval of {project} by {user}"""'], {}), "('Approval of {project} by {user}')\n", (12319, 12354), True, 'from django.utils.translation import gettext_lazy as _\n'), ((13216, 13253), 'django.utils.translation.gettext_lazy', '_', (['"""Contract for {project} ({state})"""'], {}), "('Contract for {project} ({state})')\n", (13217, 13253), True, 'from django.utils.translation import gettext_lazy as _\n'), ((13822, 13848), 'django.utils.translation.gettext_lazy', '_', (['"""Project file: {title}"""'], {}), "('Project file: {title}')\n", (13823, 13848), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2645, 2672), 'django.db.models.Sum', 'Sum', (['"""invoices__paid_value"""'], {}), "('invoices__paid_value')\n", (2648, 2672), False, 'from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value\n'), ((2674, 2682), 'django.db.models.Value', 'Value', (['(0)'], {}), '(0)\n', (2679, 2682), False, 'from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value\n'), ((3679, 3697), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (3695, 3697), False, 'from django.db import models\n'), ((4685, 4708), 'decimal.Decimal', 'decimal.Decimal', (['"""0.01"""'], {}), "('0.01')\n", (4700, 4708), False, 'import decimal\n'), ((8110, 8124), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (8122, 8124), False, 'from django.utils import timezone\n'), ((15089, 15112), 'decimal.Decimal', 'decimal.Decimal', (['"""0.01"""'], {}), "('0.01')\n", (15104, 15112), False, 'import decimal\n'), ((3263, 3284), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3282, 3284), False, 'from django.db import models\n'), ((3186, 3197), 'django.db.models.Count', 'Count', (['"""pk"""'], {}), "('pk')\n", (3191, 3197), False, 'from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value\n'), ((3499, 3513), 'django.db.models.OuterRef', 'OuterRef', (['"""pk"""'], {}), "('pk')\n", (3507, 3513), False, 'from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value\n'), ((3079, 3093), 'django.db.models.OuterRef', 'OuterRef', (['"""pk"""'], {}), "('pk')\n", (3087, 3093), False, 'from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value\n')]
|
import os
import matplotlib as mpl
import torch
import torchvision
from data_management import IPDataset, Jitter, SimulateMeasurements
from networks import IterativeNet, Tiramisu
from operators import Radon
# ----- load configuration -----
import config # isort:skip
# ----- global configuration -----
mpl.use("agg")
device = torch.device("cuda:0")
torch.cuda.set_device(0)
# ----- measurement configuration -----
theta = torch.linspace(0, 180, 61)[:-1] # 60 lines, exclude endpoint
OpA = Radon(config.n, theta)
# ----- network configuration -----
subnet_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"down_blocks": (5, 7, 9, 12, 15),
"up_blocks": (15, 12, 9, 7, 5),
"pool_factors": (2, 2, 2, 2, 2),
"bottleneck_layers": 20,
"growth_rate": 16,
"out_chans_first_conv": 16,
}
subnet = Tiramisu
it_net_params = {
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
# ----- training configuration -----
mseloss = torch.nn.MSELoss(reduction="sum")
def loss_func(pred, tar):
return mseloss(pred, tar) / pred.shape[0]
train_phases = 1
train_params = {
"num_epochs": [19],
"batch_size": [10],
"loss_func": loss_func,
"save_path": [
os.path.join(
config.RESULTS_PATH,
"Radon_Tiramisu_jitter_v6_"
"train_phase_{}".format((i + 1) % (train_phases + 1)),
)
for i in range(train_phases + 1)
],
"save_epochs": 1,
"optimizer": torch.optim.Adam,
"optimizer_params": [{"lr": 8e-5, "eps": 2e-4, "weight_decay": 5e-4}],
"scheduler": torch.optim.lr_scheduler.StepLR,
"scheduler_params": {"step_size": 1, "gamma": 1.0},
"acc_steps": [1],
"train_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA), Jitter(5e2, 0.0, 1.0)]
),
"val_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA)],
),
"train_loader_params": {"shuffle": True, "num_workers": 0},
"val_loader_params": {"shuffle": False, "num_workers": 0},
}
# ----- data configuration -----
train_data_params = {
"path": config.DATA_PATH,
"device": device,
}
train_data = IPDataset
val_data_params = {
"path": config.DATA_PATH,
"device": device,
}
val_data = IPDataset
# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
with open(
os.path.join(train_params["save_path"][-1], "hyperparameters.txt"), "w"
) as file:
for key, value in subnet_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in it_net_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_data_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in val_data_params.items():
file.write(key + ": " + str(value) + "\n")
file.write("train_phases" + ": " + str(train_phases) + "\n")
# ------ construct network and train -----
subnet_tmp = subnet(**subnet_params).to(device)
it_net_tmp = IterativeNet(
subnet_tmp,
**{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
).to(device)
it_net_tmp.load_state_dict(
torch.load(
"results/Radon_Tiramisu_jitter_v4_train_phase_1/model_weights.pt",
map_location=torch.device(device),
)
)
subnet = it_net_tmp.subnet
it_net = IterativeNet(subnet, **it_net_params).to(device)
train_data = train_data("train", **train_data_params)
val_data = val_data("val", **val_data_params)
for i in range(train_phases):
train_params_cur = {}
for key, value in train_params.items():
train_params_cur[key] = (
value[i] if isinstance(value, (tuple, list)) else value
)
print("Phase {}:".format(i + 1))
for key, value in train_params_cur.items():
print(key + ": " + str(value))
it_net.train_on(train_data, val_data, **train_params_cur)
|
[
"operators.Radon",
"os.makedirs",
"matplotlib.use",
"networks.IterativeNet",
"os.path.join",
"data_management.Jitter",
"torch.nn.MSELoss",
"data_management.SimulateMeasurements",
"torch.cuda.set_device",
"torch.linspace",
"torch.device"
] |
[((308, 322), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {}), "('agg')\n", (315, 322), True, 'import matplotlib as mpl\n'), ((332, 354), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (344, 354), False, 'import torch\n'), ((355, 379), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (376, 379), False, 'import torch\n'), ((497, 519), 'operators.Radon', 'Radon', (['config.n', 'theta'], {}), '(config.n, theta)\n', (502, 519), False, 'from operators import Radon\n'), ((1087, 1120), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1103, 1120), False, 'import torch\n'), ((2422, 2479), 'os.makedirs', 'os.makedirs', (["train_params['save_path'][-1]"], {'exist_ok': '(True)'}), "(train_params['save_path'][-1], exist_ok=True)\n", (2433, 2479), False, 'import os\n'), ((429, 455), 'torch.linspace', 'torch.linspace', (['(0)', '(180)', '(61)'], {}), '(0, 180, 61)\n', (443, 455), False, 'import torch\n'), ((2495, 2561), 'os.path.join', 'os.path.join', (["train_params['save_path'][-1]", '"""hyperparameters.txt"""'], {}), "(train_params['save_path'][-1], 'hyperparameters.txt')\n", (2507, 2561), False, 'import os\n'), ((3233, 3400), 'networks.IterativeNet', 'IterativeNet', (['subnet_tmp'], {}), "(subnet_tmp, **{'num_iter': 1, 'lam': 0.0, 'lam_learnable': \n False, 'final_dc': False, 'resnet_factor': 1.0, 'operator': OpA,\n 'inverter': OpA.inv})\n", (3245, 3400), False, 'from networks import IterativeNet, Tiramisu\n'), ((3682, 3719), 'networks.IterativeNet', 'IterativeNet', (['subnet'], {}), '(subnet, **it_net_params)\n', (3694, 3719), False, 'from networks import IterativeNet, Tiramisu\n'), ((1870, 1895), 'data_management.SimulateMeasurements', 'SimulateMeasurements', (['OpA'], {}), '(OpA)\n', (1890, 1895), False, 'from data_management import IPDataset, Jitter, SimulateMeasurements\n'), ((1897, 1920), 'data_management.Jitter', 'Jitter', (['(500.0)', '(0.0)', '(1.0)'], {}), '(500.0, 0.0, 1.0)\n', (1903, 1920), False, 'from data_management import IPDataset, Jitter, SimulateMeasurements\n'), ((1989, 2014), 'data_management.SimulateMeasurements', 'SimulateMeasurements', (['OpA'], {}), '(OpA)\n', (2009, 2014), False, 'from data_management import IPDataset, Jitter, SimulateMeasurements\n'), ((3616, 3636), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (3628, 3636), False, 'import torch\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import requests
from cleep.exception import CommandError, MissingParameter
from cleep.libs.internals.task import Task
from cleep.core import CleepModule
from cleep.common import CATEGORIES
__all__ = ["Openweathermap"]
class Openweathermap(CleepModule):
"""
OpenWeatherMap application.
Returns current weather conditions and forecast.
Note:
https://openweathermap.org/api
"""
MODULE_AUTHOR = "Cleep"
MODULE_VERSION = "1.2.3"
MODULE_DEPS = []
MODULE_CATEGORY = CATEGORIES.SERVICE
MODULE_DESCRIPTION = "Gets weather conditions using OpenWeatherMap service"
MODULE_LONGDESCRIPTION = (
"This application gets data from OpenWeatherMap online service and displays it directly on your device "
"dashboard.<br>OpenWeatherMap allows to get for free current weather condition and 5 days forecast.<br> "
"This application also broadcasts weather event on all your devices."
)
MODULE_TAGS = ["weather", "forecast"]
MODULE_URLINFO = "https://github.com/tangb/cleepapp-openweathermap"
MODULE_URLHELP = None
MODULE_URLSITE = "https://openweathermap.org/"
MODULE_URLBUGS = "https://github.com/tangb/cleepapp-openweathermap/issues"
MODULE_CONFIG_FILE = "openweathermap.conf"
DEFAULT_CONFIG = {"apikey": None}
OWM_WEATHER_URL = "https://api.openweathermap.org/data/2.5/weather"
OWM_FORECAST_URL = "https://api.openweathermap.org/data/2.5/forecast"
OWM_ICON_URL = "https://openweathermap.org/img/wn/%s.png"
OWM_TASK_DELAY = 900
OWM_PREVENT_FLOOD = 15
OWM_WEATHER_CODES = {
200: "Thunderstorm with light rain",
201: "Thunderstorm with rain",
202: "Thunderstorm with heavy rain",
210: "Light thunderstorm",
211: "Thunderstorm",
212: "Heavy thunderstorm",
221: "Ragged thunderstorm",
230: "Thunderstorm with light drizzle",
231: "Thunderstorm with drizzle",
232: "Thunderstorm with heavy drizzle",
300: "Light intensity drizzle",
301: "Drizzle",
302: "Heavy intensity drizzle",
310: "Light intensity drizzle rain",
311: "Drizzle rain",
312: "Heavy intensity drizzle rain",
313: "Shower rain and drizzle",
314: "Heavy shower rain and drizzle",
321: "Shower drizzle",
500: "Light rain",
501: "Moderate rain",
502: "Heavy intensity rain",
503: "Very heavy rain",
504: "Extreme rain",
511: "Freezing rain",
520: "Light intensity shower rain",
521: "Shower rain",
522: "Heavy intensity shower rain",
531: "Ragged shower rain",
600: "Light snow",
601: "Snow",
602: "Heavy snow",
611: "Sleet",
612: "Shower sleet",
615: "Light rain and snow",
616: "Rain and snow",
620: "Light shower snow",
621: "Shower snow",
622: "Heavy shower snow",
701: "Mist",
711: "Smoke",
721: "Haze",
731: "Sand, dust whirls",
741: "Fog",
751: "Sand",
761: "Dust",
762: "Volcanic ash",
771: "Squalls",
781: "Tornado",
800: "Clear sky",
801: "Few clouds",
802: "Scattered clouds",
803: "Broken clouds",
804: "Overcast clouds",
900: "Tornado",
901: "Tropical storm",
902: "Hurricane",
903: "Cold",
904: "Hot",
905: "Windy",
906: "Hail",
951: "Calm",
952: "Light breeze",
953: "Gentle breeze",
954: "Moderate breeze",
955: "Fresh breeze",
956: "Strong breeze",
957: "High wind, near gale",
958: "Gale",
959: "Severe gale",
960: "Storm",
961: "Violent storm",
962: "Hurricane",
}
OWM_WIND_DIRECTIONS = [
"N",
"NNE",
"NE",
"ENE",
"E",
"ESE",
"SE",
"SSE",
"S",
"SSW",
"SW",
"WSW",
"W",
"WNW",
"NW",
"NNW",
"N",
]
def __init__(self, bootstrap, debug_enabled):
"""
Constructor
Args:
bootstrap (dict): bootstrap objects
debug_enabled (bool): flag to set debug level to logger
"""
# init
CleepModule.__init__(self, bootstrap, debug_enabled)
# members
self.weather_task = None
self.__owm_uuid = None
self.__forecast = []
# events
self.openweathermap_weather_update = self._get_event(
"openweathermap.weather.update"
)
def _configure(self):
"""
Configure module
"""
# add openweathermap device
if self._get_device_count() == 0:
owm = {
"type": "openweathermap",
"name": "OpenWeatherMap",
"lastupdate": None,
"celsius": None,
"fahrenheit": None,
"humidity": None,
"pressure": None,
"windspeed": None,
"winddirection": None,
"code": None,
"condition": None,
"icon": None,
}
self._add_device(owm)
# get device uuid
devices = self.get_module_devices()
self.__owm_uuid = list(devices.keys())[0]
def _on_start(self):
"""
Module starts
"""
# update weather conditions
self._force_weather_update()
# start weather task
self._start_weather_task()
def _on_stop(self):
"""
Module stops
"""
self._stop_weather_task()
def _force_weather_update(self):
"""
Force weather update according to last update to not flood owm api
"""
# get devices if not provided
devices = self.get_module_devices()
last_update = devices[self.__owm_uuid].get("lastupdate")
if last_update is None or last_update + self.OWM_PREVENT_FLOOD < time.time():
self._weather_task()
def _start_weather_task(self):
"""
Start weather task
"""
if self.weather_task is None:
self.weather_task = Task(
self.OWM_TASK_DELAY, self._weather_task, self.logger
)
self.weather_task.start()
def _stop_weather_task(self):
"""
Stop weather task
"""
if self.weather_task is not None:
self.weather_task.stop()
def _restart_weather_task(self):
"""
Restart weather task
"""
self._stop_weather_task()
self._start_weather_task()
def _owm_request(self, url, params):
"""
Request OWM api
Args:
url (string): request url
params (dict): dict of parameters
Returns:
tuple: request response::
(
status (int): request status code,
data (dict): request response data
)
"""
status = None
resp_data = None
try:
self.logger.debug("Request params: %s" % params)
resp = requests.get(url, params=params)
resp_data = resp.json()
self.logger.debug("Response data: %s" % resp_data)
status = resp.status_code
if status != 200:
self.logger.error("OWM api response [%s]: %s" % (status, resp_data))
except Exception:
self.logger.exception("Error while requesting OWM API:")
return (status, resp_data)
def _get_weather(self, apikey):
"""
Get weather condition
Args:
apikey (string): OWM apikey
Returns:
dict: weather conditions (see http://openweathermap.org/current#parameter for output format)
Raises:
InvalidParameter: if input parameter is invalid
CommandError: if command failed
"""
# check parameter
self._check_parameters([{"name": "apikey", "value": apikey, "type": str}])
# get position infos from parameters app
resp = self.send_command("get_position", "parameters")
self.logger.debug("Get position from parameters module resp: %s" % resp)
if not resp or resp.error:
raise Exception(
"Unable to get device position (%s)" % resp.error
if resp
else "No response"
)
position = resp.data
# request api
(status, resp) = self._owm_request(
self.OWM_WEATHER_URL,
{
"appid": apikey,
"lat": position["latitude"],
"lon": position["longitude"],
"units": "metric",
"mode": "json",
},
)
self.logger.debug("OWM response: %s" % (resp))
# handle errors
if status == 401:
raise Exception("Invalid OWM api key")
if status != 200:
raise Exception("Error requesting openweathermap api (status %s)" % status)
if not isinstance(resp, dict) or "cod" not in resp:
raise Exception("Invalid OWM api response format. Is API changed?")
if resp["cod"] != 200: # cod is int for weather request
raise Exception(
resp["message"] if "message" in resp else "Unknown error from api"
)
return resp
def _get_forecast(self, apikey):
"""
Get forecast (5 days with 3 hours step)
Args:
apikey (string): OWM apikey
Returns:
dict: forecast (see http://openweathermap.org/forecast5 for output format)
Raises:
InvalidParameter: if input parameter is invalid
CommandError: if command failed
"""
# check parameter
self._check_parameters([{"name": "apikey", "value": apikey, "type": str}])
# get position infos from parameters app
resp = self.send_command("get_position", "parameters")
self.logger.debug("Get position from parameters module resp: %s" % resp)
if not resp or resp.error:
raise Exception(
"Unable to get device position (%s)" % resp.error
if resp
else "No response"
)
position = resp.data
# request api
(status, resp) = self._owm_request(
self.OWM_FORECAST_URL,
{
"appid": apikey,
"lat": position["latitude"],
"lon": position["longitude"],
"units": "metric",
"mode": "json",
},
)
self.logger.trace("OWM response: %s" % (resp))
# handle errors
if status == 401:
raise Exception("Invalid OWM api key")
if status != 200:
raise Exception("Error requesting openweathermap api (status %s)" % status)
if "cod" not in resp:
raise Exception("Invalid OWM api response format. Is API changed?")
if resp["cod"] != "200": # cod is string for forecast request
raise Exception(
"API message: %s" % resp["message"]
if "message" in resp
else "Unknown error from api"
)
if "list" not in resp or len(resp["list"]) == 0:
raise Exception("No forecast data retrieved")
return resp["list"]
def _weather_task(self):
"""
Weather task in charge to refresh weather condition every hours
Send openweathermap.weather.update event with following data::
{
lastupdate (int): timestamp,
icon (string): openweathermap icon,
condition (string): openweathermap condition string (english),
code (int): openweather condition code,
celsius (float): current temperature in celsius,
fahrenheit (float): current temperature in fahrenheit,
pressure (float): current pressure,
humidity (float): current humidity,
windspeed (float): current wind speed,
winddirection (string): current wind direction,
winddegrees (float): current wind degrees
}
"""
try:
self.logger.debug("Update weather conditions")
# get api key
config = self._get_config()
if config["apikey"] is None or len(config["apikey"]) == 0:
self.logger.debug("No apikey configured")
return
# apikey configured, get weather
weather = self._get_weather(config["apikey"])
self.logger.debug("Weather: %s" % weather)
self.__forecast = self._get_forecast(config["apikey"])
self.logger.debug("Forecast: %s" % self.__forecast)
# save current weather conditions
device = self._get_devices()[self.__owm_uuid]
device["lastupdate"] = int(time.time())
if "weather" in weather and len(weather["weather"]) > 0:
icon = weather["weather"][0].get("icon")
device["icon"] = self.OWM_ICON_URL % icon or "unknown"
wid = weather["weather"][0].get("id")
device["condition"] = self.OWM_WEATHER_CODES[wid] if wid else "?"
device["code"] = int(wid) if wid else 0
else:
device["icon"] = self.OWM_ICON_URL % "unknown"
device["condition"] = "?"
device["code"] = 0
if "main" in weather:
device["celsius"] = weather["main"].get("temp", 0.0)
device["fahrenheit"] = (
weather["main"].get("temp", 0.0) * 9.0 / 5.0 + 32.0
)
device["pressure"] = weather["main"].get("pressure", 0.0)
device["humidity"] = weather["main"].get("humidity", 0.0)
else:
device["celsius"] = 0.0
device["fahrenheit"] = 0.0
device["pressure"] = 0.0
device["humidity"] = 0.0
if "wind" in weather:
device["windspeed"] = weather["wind"].get("speed", 0.0)
device["winddegrees"] = weather["wind"].get("deg", 0)
index = int(round((weather["wind"].get("deg", 0) % 360) / 22.5) + 1)
device["winddirection"] = self.OWM_WIND_DIRECTIONS[
0 if index >= 17 else index
]
else:
device["windspeed"] = 0.0
device["winddegrees"] = 0.0
device["winddirection"] = "N"
self._update_device(self.__owm_uuid, device)
# and emit event
event_keys = [
"icon",
"condition",
"code",
"celsius",
"fahrenheit",
"pressure",
"humidity",
"windspeed",
"winddegrees",
"winddirection",
"lastupdate",
]
self.openweathermap_weather_update.send(
params={k: v for k, v in device.items() if k in event_keys},
device_id=self.__owm_uuid,
)
except Exception:
self.logger.exception("Exception during weather task:")
def set_apikey(self, apikey):
"""
Set openweathermap apikey
Args:
apikey (string): OWM apikey
Returns:
bool: True if apikey saved successfully
Raises:
CommandError: if error occured while using apikey to get current weather
"""
self._check_parameters([{"name": "apikey", "value": apikey, "type": str}])
# test apikey (should raise exception if error)
self._get_weather(apikey)
# test succeed, update weather right now and restart task
self._restart_weather_task()
self._force_weather_update()
# save config
return self._update_config({"apikey": apikey})
def get_weather(self):
"""
Return current weather conditions
Useful to use it in action script
Returns:
dict: device information
"""
return self._get_devices()[self.__owm_uuid]
def get_forecast(self):
"""
Return last forecast information.
May be empty if cleep just restarted.
Returns:
list: list of forecast data (every 3 hours)
"""
return self.__forecast
|
[
"cleep.core.CleepModule.__init__",
"time.time",
"cleep.libs.internals.task.Task",
"requests.get"
] |
[((4444, 4496), 'cleep.core.CleepModule.__init__', 'CleepModule.__init__', (['self', 'bootstrap', 'debug_enabled'], {}), '(self, bootstrap, debug_enabled)\n', (4464, 4496), False, 'from cleep.core import CleepModule\n'), ((6387, 6445), 'cleep.libs.internals.task.Task', 'Task', (['self.OWM_TASK_DELAY', 'self._weather_task', 'self.logger'], {}), '(self.OWM_TASK_DELAY, self._weather_task, self.logger)\n', (6391, 6445), False, 'from cleep.libs.internals.task import Task\n'), ((7371, 7403), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (7383, 7403), False, 'import requests\n'), ((6184, 6195), 'time.time', 'time.time', ([], {}), '()\n', (6193, 6195), False, 'import time\n'), ((13300, 13311), 'time.time', 'time.time', ([], {}), '()\n', (13309, 13311), False, 'import time\n')]
|
from pywps import Service
from pywps.tests import assert_response_success
from .common import client_for, get_output
from emu.processes.wps_dummy import Dummy
def test_wps_dummy():
client = client_for(Service(processes=[Dummy()]))
datainputs = "input1=10;input2=2"
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='dummyprocess',
datainputs=datainputs)
assert_response_success(resp)
assert get_output(resp.xml) == {'output1': '11', 'output2': '1'}
|
[
"pywps.tests.assert_response_success",
"emu.processes.wps_dummy.Dummy"
] |
[((428, 457), 'pywps.tests.assert_response_success', 'assert_response_success', (['resp'], {}), '(resp)\n', (451, 457), False, 'from pywps.tests import assert_response_success\n'), ((227, 234), 'emu.processes.wps_dummy.Dummy', 'Dummy', ([], {}), '()\n', (232, 234), False, 'from emu.processes.wps_dummy import Dummy\n')]
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def talker():
pub = rospy.Publisher('chatter', String)
rospy.init_node('talker', anonymous=True)
while not rospy.is_shutdown():
str = "%s: hello world %s" % (rospy.get_name(), rospy.get_time())
rospy.loginfo(str)
pub.publish(String(str))
rospy.sleep(1.0)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
[
"std_msgs.msg.String",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_time",
"rospy.get_name",
"rospy.sleep",
"rospy.Publisher",
"rospy.loginfo"
] |
[((93, 127), 'rospy.Publisher', 'rospy.Publisher', (['"""chatter"""', 'String'], {}), "('chatter', String)\n", (108, 127), False, 'import rospy\n'), ((132, 173), 'rospy.init_node', 'rospy.init_node', (['"""talker"""'], {'anonymous': '(True)'}), "('talker', anonymous=True)\n", (147, 173), False, 'import rospy\n'), ((188, 207), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (205, 207), False, 'import rospy\n'), ((291, 309), 'rospy.loginfo', 'rospy.loginfo', (['str'], {}), '(str)\n', (304, 309), False, 'import rospy\n'), ((351, 367), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (362, 367), False, 'import rospy\n'), ((330, 341), 'std_msgs.msg.String', 'String', (['str'], {}), '(str)\n', (336, 341), False, 'from std_msgs.msg import String\n'), ((247, 263), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (261, 263), False, 'import rospy\n'), ((265, 281), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (279, 281), False, 'import rospy\n')]
|
from sklearn import preprocessing, svm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import cross_validation
import pandas as pd
import numpy as np
import quandl
import math
df = quandl.get('WIKI/GOOGL')
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(-99999, inplace = True)
forecast_out = int(math.ceil(0.01 * len(df)))
print(forecast_out)
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace = True)
X = np.array(df.drop(['label'],1))
y = np.array(df['label'])
X = preprocessing.scale(X)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y, test_size = 0.2)
clf = LinearRegression()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test,y_test)
print(accuracy)
|
[
"numpy.array",
"quandl.get",
"sklearn.cross_validation.train_test_split",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.scale"
] |
[((250, 274), 'quandl.get', 'quandl.get', (['"""WIKI/GOOGL"""'], {}), "('WIKI/GOOGL')\n", (260, 274), False, 'import quandl\n'), ((826, 847), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (834, 847), True, 'import numpy as np\n'), ((852, 874), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (871, 874), False, 'from sklearn import preprocessing, svm\n'), ((879, 900), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (887, 900), True, 'import numpy as np\n'), ((937, 991), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (970, 991), False, 'from sklearn import cross_validation\n'), ((1000, 1018), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1016, 1018), False, 'from sklearn.linear_model import LinearRegression\n')]
|
import datetime
import os
import yaml
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
import plotly.graph_objs as go
ENV_FILE = '../env.yaml'
with open(ENV_FILE) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
# Initialisation des chemins vers les fichiers
ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE))
DATA_FILE = os.path.join(ROOT_DIR,
params['directories']['processed'],
params['files']['all_data'])
#Lecture du fihcier de données
epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=['Last Update'])
.assign(day=lambda _df:_df['Last Update'].dt.date)
.drop_duplicates(subset=['Country/Region', 'Province/State', 'day'])
[lambda df: df['day'] <= datetime.date(2020,3,20)]
)
# replacing Mainland china with just China
cases = ['Confirmed', 'Deaths', 'Recovered']
# After 14/03/2020 the names of the countries are quite different
epidemie_df['Country/Region'] = epidemie_df['Country/Region'].replace('Mainland China', 'China')
# filling missing values
epidemie_df[['Province/State']] = epidemie_df[['Province/State']].fillna('')
epidemie_df[cases] = epidemie_df[cases].fillna(0)
countries=[{'label':c, 'value': c} for c in epidemie_df['Country/Region'].unique()]
app = dash.Dash('C0VID-19 Explorer')
app.layout = html.Div([
html.H1(['C0VID-19 Explorer'], style={'textAlign': 'center', 'color': 'navy', 'font-weight': 'bold'}),
dcc.Tabs([
dcc.Tab(label='Time', children=[
dcc.Markdown("""
Select a country:
""",style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
html.Div([
dcc.Dropdown(
id='country',
options=countries,
placeholder="Select a country...",
)
]),
html.Div([
dcc.Markdown("""You can select a second country:""",
style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.Dropdown(
id='country2',
options=countries,
placeholder="Select a country...",
)
]),
html.Div([dcc.Markdown("""Cases: """,
style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.RadioItems(
id='variable',
options=[
{'label':'Confirmed', 'value': 'Confirmed'},
{'label':'Deaths', 'value': 'Deaths'},
{'label':'Recovered', 'value': 'Recovered'}
],
value='Confirmed',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Graph(id='graph1')
])
]),
dcc.Tab(label='Map', children=[
#html.H6(['COVID-19 in numbers:']),
dcc.Markdown("""
**COVID-19**
This is a graph that shows the evolution of the COVID-19 around the world
** Cases:**
""", style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.Dropdown(id="value-selected", value='Confirmed',
options=[{'label': "Deaths ", 'value': 'Deaths'},
{'label': "Confirmed", 'value': 'Confirmed'},
{'label': "Recovered", 'value': 'Recovered'}],
placeholder="Select a country...",
style={"display": "inline-block", "margin-left": "auto", "margin-right": "auto",
"width": "70%"}, className="six columns"),
dcc.Graph(id='map1'),
dcc.Slider(
id='map_day',
min=0,
max=(epidemie_df['day'].max() - epidemie_df['day'].min()).days,
value=0,
marks={i:str(i) for i, date in enumerate(epidemie_df['day'].unique())}
)
]),
dcc.Tab(label='SIR Model', children=[
dcc.Markdown("""
**SIR model**
S(Susceptible)I(Infectious)R(Recovered) is a model describing the dynamics of infectious disease. The model divides the population into compartments. Each compartment is expected to have the same characteristics. SIR represents the three compartments segmented by the model.
**Select a country:**
""", style={'textAlign': 'left', 'color': 'navy'}),
html.Div([
dcc.Dropdown(
id='Country',
value='Portugal',
options=countries),
]),
dcc.Markdown("""Select:""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Dropdown(id='cases',
options=[
{'label': 'Confirmed', 'value': 'Confirmed'},
{'label': 'Deaths', 'value': 'Deaths'},
{'label': 'Recovered', 'value': 'Recovered'}],
value=['Confirmed','Deaths','Recovered'],
multi=True),
dcc.Markdown("""
**Select your paramaters:**
""", style={'textAlign': 'left', 'color': 'navy'}),
html.Label( style={'textAlign': 'left', 'color': 'navy', "width": "20%"}),
html.Div([
dcc.Markdown(""" Beta:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-beta',
type ='number',
placeholder='Input Beta',
min =-50,
max =100,
step =0.01,
value=0.45
)
]),
html.Div([
dcc.Markdown(""" Gamma:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-gamma',
type ='number',
placeholder='Input Gamma',
min =-50,
max =100,
step =0.01,
value=0.55
)
]),
html.Div([
dcc.Markdown(""" Population:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-pop',placeholder='Population',
type ='number',
min =1000,
max =1000000000000000,
step =1000,
value=1000,
)
]),
html.Div([
dcc.RadioItems(id='variable2',
options=[
{'label':'Optimize','value':'optimize'}],
value='Confirmed',
labelStyle={'display':'inline-block','color': 'navy', "width": "20%"})
]),
html.Div([
dcc.Graph(id='graph2')
]),
])
]),
])
@app.callback(
Output('graph1', 'figure'),
[
Input('country','value'),
Input('country2','value'),
Input('variable','value'),
]
)
def update_graph(country, country2, variable):
print(country)
if country is None:
graph_df = epidemie_df.groupby('day').agg({variable:'sum'}).reset_index()
else:
graph_df=(epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({variable:'sum'})
.reset_index()
)
if country2 is not None:
graph2_df=(epidemie_df[epidemie_df['Country/Region'] == country2]
.groupby(['Country/Region', 'day'])
.agg({variable:'sum'})
.reset_index()
)
return {
'data':[
dict(
x=graph_df['day'],
y=graph_df[variable],
type='line',
name=country if country is not None else 'Total'
)
] + ([
dict(
x=graph2_df['day'],
y=graph2_df[variable],
type='line',
name=country2
)
] if country2 is not None else [])
}
@app.callback(
Output('map1', 'figure'),
[
Input('map_day','value'),
Input("value-selected", "value")
]
)
def update_map(map_day,selected):
day= epidemie_df['day'].sort_values(ascending=False).unique()[map_day]
map_df = (epidemie_df[epidemie_df['day'] == day]
.groupby(['Country/Region'])
.agg({selected:'sum', 'Latitude': 'mean', 'Longitude': 'mean'})
.reset_index()
)
return {
'data':[
dict(
type='scattergeo',
lon=map_df['Longitude'],
lat=map_df['Latitude'],
text=map_df.apply(lambda r: r['Country/Region'] + '(' + str(r[selected]) + ')', axis=1),
mode='markers',
marker=dict(
size=np.maximum(map_df[selected]/ 1_000, 10)
)
)
],
'layout': dict(
title=str(day),
geo=dict(showland=True)
)
}
@app.callback(
Output('graph2', 'figure'),
[
Input('input-beta', 'value'),
Input('input-gamma','value'),
Input('input-pop','value'),
Input('Country','value')
#Input('variable2','value')
]
)
def update_model(beta, gamma, population, Country):
print(Country)
country=Country
country_df = (epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered': 'sum'})
.reset_index())
country_df['Infected'] = country_df['Confirmed'].diff()
steps = len(country_df['Infected'])
def SIR(t, y):
S = y[0]; I = y[1]; R = y[2]
return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
solution = solve_ivp(SIR, [0, steps], [population, 1, 0], t_eval=np.arange(0, steps, 1))
#def sumsq_error(parameters):
#beta, gamma = parameters
#def SIR(t,y):
#S=y[0]
#I=y[1]
#R=y[2]
#return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
#solution = solve_ivp(SIR,[0,nb_steps-1],[total_population,1,0],t_eval=np.arange(0,nb_steps,1))
#return(sum((solution.y[1]-infected_population)**2))
#msol = minimize(sumsq_error,[0.001,0.1],method='Nelder-Mead')
#if variable2 == 'optimize':
#gamma,beta == msol.x
return {
'data': [
dict(
x=solution.t,
y=solution.y[0],
type='line',
name=country+': Susceptible')
] + ([
dict(
x=solution.t,
y=solution.y[1],
type='line',
name=country+': Infected')
]) + ([
dict(
x=solution.t,
y=solution.y[2],
type='line',
name=country+': Recovered')
]) + ([
dict(
x=solution.t,
y=country_df['Infected'],
type='line',
name=country+': Original Data(Infected)')
])
}
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"pandas.read_csv",
"numpy.arange",
"dash_core_components.RadioItems",
"dash_core_components.Input",
"dash.dependencies.Output",
"os.path.join",
"yaml.load",
"dash.dependencies.Input",
"dash_core_components.Dropdown",
"dash_html_components.Label",
"datetime.date",
"dash_core_components.Markdown",
"dash_html_components.H1",
"os.path.abspath",
"numpy.maximum",
"dash.Dash",
"dash_core_components.Graph"
] |
[((530, 622), 'os.path.join', 'os.path.join', (['ROOT_DIR', "params['directories']['processed']", "params['files']['all_data']"], {}), "(ROOT_DIR, params['directories']['processed'], params['files'][\n 'all_data'])\n", (542, 622), False, 'import os\n'), ((1497, 1527), 'dash.Dash', 'dash.Dash', (['"""C0VID-19 Explorer"""'], {}), "('C0VID-19 Explorer')\n", (1506, 1527), False, 'import dash\n'), ((379, 415), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (388, 415), False, 'import yaml\n'), ((491, 516), 'os.path.abspath', 'os.path.abspath', (['ENV_FILE'], {}), '(ENV_FILE)\n', (506, 516), False, 'import os\n'), ((7634, 7660), 'dash.dependencies.Output', 'Output', (['"""graph1"""', '"""figure"""'], {}), "('graph1', 'figure')\n", (7640, 7660), False, 'from dash.dependencies import Input, Output\n'), ((8901, 8925), 'dash.dependencies.Output', 'Output', (['"""map1"""', '"""figure"""'], {}), "('map1', 'figure')\n", (8907, 8925), False, 'from dash.dependencies import Input, Output\n'), ((9914, 9940), 'dash.dependencies.Output', 'Output', (['"""graph2"""', '"""figure"""'], {}), "('graph2', 'figure')\n", (9920, 9940), False, 'from dash.dependencies import Input, Output\n'), ((1557, 1662), 'dash_html_components.H1', 'html.H1', (["['C0VID-19 Explorer']"], {'style': "{'textAlign': 'center', 'color': 'navy', 'font-weight': 'bold'}"}), "(['C0VID-19 Explorer'], style={'textAlign': 'center', 'color':\n 'navy', 'font-weight': 'bold'})\n", (1564, 1662), True, 'import dash_html_components as html\n'), ((7676, 7701), 'dash.dependencies.Input', 'Input', (['"""country"""', '"""value"""'], {}), "('country', 'value')\n", (7681, 7701), False, 'from dash.dependencies import Input, Output\n'), ((7710, 7736), 'dash.dependencies.Input', 'Input', (['"""country2"""', '"""value"""'], {}), "('country2', 'value')\n", (7715, 7736), False, 'from dash.dependencies import Input, Output\n'), ((7745, 7771), 'dash.dependencies.Input', 'Input', (['"""variable"""', '"""value"""'], {}), "('variable', 'value')\n", (7750, 7771), False, 'from dash.dependencies import Input, Output\n'), ((8941, 8966), 'dash.dependencies.Input', 'Input', (['"""map_day"""', '"""value"""'], {}), "('map_day', 'value')\n", (8946, 8966), False, 'from dash.dependencies import Input, Output\n'), ((8975, 9007), 'dash.dependencies.Input', 'Input', (['"""value-selected"""', '"""value"""'], {}), "('value-selected', 'value')\n", (8980, 9007), False, 'from dash.dependencies import Input, Output\n'), ((9956, 9984), 'dash.dependencies.Input', 'Input', (['"""input-beta"""', '"""value"""'], {}), "('input-beta', 'value')\n", (9961, 9984), False, 'from dash.dependencies import Input, Output\n'), ((9994, 10023), 'dash.dependencies.Input', 'Input', (['"""input-gamma"""', '"""value"""'], {}), "('input-gamma', 'value')\n", (9999, 10023), False, 'from dash.dependencies import Input, Output\n'), ((10032, 10059), 'dash.dependencies.Input', 'Input', (['"""input-pop"""', '"""value"""'], {}), "('input-pop', 'value')\n", (10037, 10059), False, 'from dash.dependencies import Input, Output\n'), ((10068, 10093), 'dash.dependencies.Input', 'Input', (['"""Country"""', '"""value"""'], {}), "('Country', 'value')\n", (10073, 10093), False, 'from dash.dependencies import Input, Output\n'), ((957, 983), 'datetime.date', 'datetime.date', (['(2020)', '(3)', '(20)'], {}), '(2020, 3, 20)\n', (970, 983), False, 'import datetime\n'), ((10788, 10810), 'numpy.arange', 'np.arange', (['(0)', 'steps', '(1)'], {}), '(0, steps, 1)\n', (10797, 10810), True, 'import numpy as np\n'), ((715, 766), 'pandas.read_csv', 'pd.read_csv', (['DATA_FILE'], {'parse_dates': "['Last Update']"}), "(DATA_FILE, parse_dates=['Last Update'])\n", (726, 766), True, 'import pandas as pd\n'), ((1728, 1888), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n Select a country:\n \n """'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), '(\n """\n Select a country:\n \n """,\n style={\'textAlign\': \'left\', \'color\': \'navy\', \'font-weight\': \'bold\'})\n', (1740, 1888), True, 'import dash_core_components as dcc\n'), ((3275, 3549), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n **COVID-19**\n This is a graph that shows the evolution of the COVID-19 around the world \n \n ** Cases:**\n """'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), '(\n """\n **COVID-19**\n This is a graph that shows the evolution of the COVID-19 around the world \n \n ** Cases:**\n """\n , style={\'textAlign\': \'left\', \'color\': \'navy\', \'font-weight\': \'bold\'})\n', (3287, 3549), True, 'import dash_core_components as dcc\n'), ((3554, 3927), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""value-selected"""', 'value': '"""Confirmed"""', 'options': "[{'label': 'Deaths ', 'value': 'Deaths'}, {'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Recovered', 'value': 'Recovered'}]", 'placeholder': '"""Select a country..."""', 'style': "{'display': 'inline-block', 'margin-left': 'auto', 'margin-right': 'auto',\n 'width': '70%'}", 'className': '"""six columns"""'}), "(id='value-selected', value='Confirmed', options=[{'label':\n 'Deaths ', 'value': 'Deaths'}, {'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Recovered', 'value': 'Recovered'}],\n placeholder='Select a country...', style={'display': 'inline-block',\n 'margin-left': 'auto', 'margin-right': 'auto', 'width': '70%'},\n className='six columns')\n", (3566, 3927), True, 'import dash_core_components as dcc\n'), ((4109, 4129), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""map1"""'}), "(id='map1')\n", (4118, 4129), True, 'import dash_core_components as dcc\n'), ((4489, 4951), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n **SIR model**\n S(Susceptible)I(Infectious)R(Recovered) is a model describing the dynamics of infectious disease. The model divides the population into compartments. Each compartment is expected to have the same characteristics. SIR represents the three compartments segmented by the model.\n \n **Select a country:**\n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), '(\n """\n **SIR model**\n S(Susceptible)I(Infectious)R(Recovered) is a model describing the dynamics of infectious disease. The model divides the population into compartments. Each compartment is expected to have the same characteristics. SIR represents the three compartments segmented by the model.\n \n **Select a country:**\n """\n , style={\'textAlign\': \'left\', \'color\': \'navy\'})\n', (4501, 4951), True, 'import dash_core_components as dcc\n'), ((5136, 5205), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Select:"""'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "('Select:', style={'textAlign': 'left', 'color': 'navy'})\n", (5148, 5205), True, 'import dash_core_components as dcc\n'), ((5223, 5457), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""cases"""', 'options': "[{'label': 'Confirmed', 'value': 'Confirmed'}, {'label': 'Deaths', 'value':\n 'Deaths'}, {'label': 'Recovered', 'value': 'Recovered'}]", 'value': "['Confirmed', 'Deaths', 'Recovered']", 'multi': '(True)'}), "(id='cases', options=[{'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Deaths', 'value': 'Deaths'}, {'label':\n 'Recovered', 'value': 'Recovered'}], value=['Confirmed', 'Deaths',\n 'Recovered'], multi=True)\n", (5235, 5457), True, 'import dash_core_components as dcc\n'), ((5581, 5739), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n \n **Select your paramaters:**\n \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), '(\n """\n \n **Select your paramaters:**\n \n """\n , style={\'textAlign\': \'left\', \'color\': \'navy\'})\n', (5593, 5739), True, 'import dash_core_components as dcc\n'), ((5743, 5815), 'dash_html_components.Label', 'html.Label', ([], {'style': "{'textAlign': 'left', 'color': 'navy', 'width': '20%'}"}), "(style={'textAlign': 'left', 'color': 'navy', 'width': '20%'})\n", (5753, 5815), True, 'import dash_html_components as html\n'), ((9699, 9738), 'numpy.maximum', 'np.maximum', (['(map_df[selected] / 1000)', '(10)'], {}), '(map_df[selected] / 1000, 10)\n', (9709, 9738), True, 'import numpy as np\n'), ((1920, 2005), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""country"""', 'options': 'countries', 'placeholder': '"""Select a country..."""'}), "(id='country', options=countries, placeholder='Select a country...'\n )\n", (1932, 2005), True, 'import dash_core_components as dcc\n'), ((2135, 2256), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""You can select a second country:"""'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), "('You can select a second country:', style={'textAlign': 'left',\n 'color': 'navy', 'font-weight': 'bold'})\n", (2147, 2256), True, 'import dash_core_components as dcc\n'), ((2305, 2391), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""country2"""', 'options': 'countries', 'placeholder': '"""Select a country..."""'}), "(id='country2', options=countries, placeholder=\n 'Select a country...')\n", (2317, 2391), True, 'import dash_core_components as dcc\n'), ((2505, 2601), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Cases: """'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), "('Cases: ', style={'textAlign': 'left', 'color': 'navy',\n 'font-weight': 'bold'})\n", (2517, 2601), True, 'import dash_core_components as dcc\n'), ((2650, 2893), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""variable"""', 'options': "[{'label': 'Confirmed', 'value': 'Confirmed'}, {'label': 'Deaths', 'value':\n 'Deaths'}, {'label': 'Recovered', 'value': 'Recovered'}]", 'value': '"""Confirmed"""', 'labelStyle': "{'display': 'inline-block'}"}), "(id='variable', options=[{'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Deaths', 'value': 'Deaths'}, {'label':\n 'Recovered', 'value': 'Recovered'}], value='Confirmed', labelStyle={\n 'display': 'inline-block'})\n", (2664, 2893), True, 'import dash_core_components as dcc\n'), ((3125, 3147), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph1"""'}), "(id='graph1')\n", (3134, 3147), True, 'import dash_core_components as dcc\n'), ((4982, 5045), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""Country"""', 'value': '"""Portugal"""', 'options': 'countries'}), "(id='Country', value='Portugal', options=countries)\n", (4994, 5045), True, 'import dash_core_components as dcc\n'), ((5858, 5953), 'dash_core_components.Markdown', 'dcc.Markdown', (['""" Beta: \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "(' Beta: \\n ', style={'textAlign': 'left',\n 'color': 'navy'})\n", (5870, 5953), True, 'import dash_core_components as dcc\n'), ((5970, 6082), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-beta"""', 'type': '"""number"""', 'placeholder': '"""Input Beta"""', 'min': '(-50)', 'max': '(100)', 'step': '(0.01)', 'value': '(0.45)'}), "(id='input-beta', type='number', placeholder='Input Beta', min=-50,\n max=100, step=0.01, value=0.45)\n", (5979, 6082), True, 'import dash_core_components as dcc\n'), ((6303, 6399), 'dash_core_components.Markdown', 'dcc.Markdown', (['""" Gamma: \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "(' Gamma: \\n ', style={'textAlign': 'left',\n 'color': 'navy'})\n", (6315, 6399), True, 'import dash_core_components as dcc\n'), ((6416, 6531), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-gamma"""', 'type': '"""number"""', 'placeholder': '"""Input Gamma"""', 'min': '(-50)', 'max': '(100)', 'step': '(0.01)', 'value': '(0.55)'}), "(id='input-gamma', type='number', placeholder='Input Gamma', min=-\n 50, max=100, step=0.01, value=0.55)\n", (6425, 6531), True, 'import dash_core_components as dcc\n'), ((6747, 6848), 'dash_core_components.Markdown', 'dcc.Markdown', (['""" Population: \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "(' Population: \\n ', style={'textAlign':\n 'left', 'color': 'navy'})\n", (6759, 6848), True, 'import dash_core_components as dcc\n'), ((6865, 6990), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-pop"""', 'placeholder': '"""Population"""', 'type': '"""number"""', 'min': '(1000)', 'max': '(1000000000000000)', 'step': '(1000)', 'value': '(1000)'}), "(id='input-pop', placeholder='Population', type='number', min=1000,\n max=1000000000000000, step=1000, value=1000)\n", (6874, 6990), True, 'import dash_core_components as dcc\n'), ((7187, 7371), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""variable2"""', 'options': "[{'label': 'Optimize', 'value': 'optimize'}]", 'value': '"""Confirmed"""', 'labelStyle': "{'display': 'inline-block', 'color': 'navy', 'width': '20%'}"}), "(id='variable2', options=[{'label': 'Optimize', 'value':\n 'optimize'}], value='Confirmed', labelStyle={'display': 'inline-block',\n 'color': 'navy', 'width': '20%'})\n", (7201, 7371), True, 'import dash_core_components as dcc\n'), ((7539, 7561), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph2"""'}), "(id='graph2')\n", (7548, 7561), True, 'import dash_core_components as dcc\n')]
|
import pytest
from astropy.io import fits
import numpy as np
from lightkurve.io.kepseismic import read_kepseismic_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_kepseismic():
"""Can we detect the correct format for KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
f = fits.open(url)
assert detect_filetype(f) == "KEPSEISMIC"
@pytest.mark.remote_data
def test_read_kepseismic():
"""Can we read KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX"]
lc = read_kepseismic_lightcurve(url)
flux_lc = lc.flux.value
# print(flux_lc, fluxes)
assert np.sum(fluxes) == np.sum(flux_lc)
|
[
"lightkurve.io.kepseismic.read_kepseismic_lightcurve",
"lightkurve.io.detect.detect_filetype",
"numpy.sum",
"astropy.io.fits.open"
] |
[((461, 475), 'astropy.io.fits.open', 'fits.open', (['url'], {}), '(url)\n', (470, 475), False, 'from astropy.io import fits\n'), ((878, 909), 'lightkurve.io.kepseismic.read_kepseismic_lightcurve', 'read_kepseismic_lightcurve', (['url'], {}), '(url)\n', (904, 909), False, 'from lightkurve.io.kepseismic import read_kepseismic_lightcurve\n'), ((488, 506), 'lightkurve.io.detect.detect_filetype', 'detect_filetype', (['f'], {}), '(f)\n', (503, 506), False, 'from lightkurve.io.detect import detect_filetype\n'), ((783, 814), 'astropy.io.fits.open', 'fits.open', (['url'], {'mode': '"""readonly"""'}), "(url, mode='readonly')\n", (792, 814), False, 'from astropy.io import fits\n'), ((980, 994), 'numpy.sum', 'np.sum', (['fluxes'], {}), '(fluxes)\n', (986, 994), True, 'import numpy as np\n'), ((998, 1013), 'numpy.sum', 'np.sum', (['flux_lc'], {}), '(flux_lc)\n', (1004, 1013), True, 'import numpy as np\n')]
|
from ggplot import aes, geom_point, ggplot, mtcars
import matplotlib.pyplot as plt
from pandas import DataFrame
from bokeh import mpl
from bokeh.plotting import output_file, show
g = ggplot(mtcars, aes(x='wt', y='mpg', color='qsec')) + geom_point()
g.make()
plt.title("Point ggplot-based plot in Bokeh.")
output_file("ggplot_point.html", title="ggplot_point.py example")
show(mpl.to_bokeh())
|
[
"bokeh.mpl.to_bokeh",
"bokeh.plotting.output_file",
"matplotlib.pyplot.title",
"ggplot.aes",
"ggplot.geom_point"
] |
[((262, 308), 'matplotlib.pyplot.title', 'plt.title', (['"""Point ggplot-based plot in Bokeh."""'], {}), "('Point ggplot-based plot in Bokeh.')\n", (271, 308), True, 'import matplotlib.pyplot as plt\n'), ((310, 375), 'bokeh.plotting.output_file', 'output_file', (['"""ggplot_point.html"""'], {'title': '"""ggplot_point.py example"""'}), "('ggplot_point.html', title='ggplot_point.py example')\n", (321, 375), False, 'from bokeh.plotting import output_file, show\n'), ((239, 251), 'ggplot.geom_point', 'geom_point', ([], {}), '()\n', (249, 251), False, 'from ggplot import aes, geom_point, ggplot, mtcars\n'), ((382, 396), 'bokeh.mpl.to_bokeh', 'mpl.to_bokeh', ([], {}), '()\n', (394, 396), False, 'from bokeh import mpl\n'), ((201, 235), 'ggplot.aes', 'aes', ([], {'x': '"""wt"""', 'y': '"""mpg"""', 'color': '"""qsec"""'}), "(x='wt', y='mpg', color='qsec')\n", (204, 235), False, 'from ggplot import aes, geom_point, ggplot, mtcars\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import sys
from io import open
import numpy as np
import torch
import json
from torch.utils.data import (DataLoader, SequentialSampler, RandomSampler, TensorDataset)
from tqdm import tqdm, trange
import ray
from ray import tune
from ray.tune.schedulers import HyperBandScheduler
from models.modeling_bert import QuestionAnswering, Config
from utils.optimization import AdamW, WarmupLinearSchedule
from utils.tokenization import BertTokenizer
from utils.korquad_utils import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions)
from debug.evaluate_korquad import evaluate as korquad_eval
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# In[2]:
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# In[3]:
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# In[4]:
from ray import tune
from ray.tune import track
from ray.tune.schedulers import HyperBandScheduler
from ray.tune.suggest.bayesopt import BayesOptSearch
ray.shutdown()
ray.init(webui_host='127.0.0.1')
# In[5]:
search_space = {
"max_seq_length": 512,
"doc_stride": 128,
"max_query_length": tune.sample_from(lambda _: int(np.random.uniform(50, 100))), #tune.uniform(50, 100),
"train_batch_size": 32,
"learning_rate": tune.loguniform(5e-4, 5e-7, 10),
"num_train_epochs": tune.grid_search([4, 8, 12, 16]),
"max_grad_norm": 1.0,
"adam_epsilon": 1e-6,
"warmup_proportion": 0.1,
"n_best_size": tune.sample_from(lambda _: int(np.random.uniform(50, 100))), #tune.uniform(50, 100),
"max_answer_length": tune.sample_from(lambda _: int(np.random.uniform(12, 25))), #tune.uniform(12, 25),
"seed": tune.sample_from(lambda _: int(np.random.uniform(1e+6, 1e+8)))
}
# In[ ]:
def load_and_cache_examples(predict_file, max_seq_length, doc_stride, max_query_length, tokenizer):
# Load data features from cache or dataset file
examples = read_squad_examples(input_file=predict_file,
is_training=False,
version_2_with_negative=False)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False)
return examples, features
# In[ ]:
def evaluate(predict_file, batch_size, device, output_dir, n_best_size, max_answer_length, model, eval_examples, eval_features):
""" Eval """
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
logger.info("***** Evaluating *****")
logger.info(" Num features = %d", len(dataset))
logger.info(" Batch size = %d", batch_size)
model.eval()
all_results = []
# set_seed(args) # Added here for reproductibility (even between python 2 and 3)
logger.info("Start evaluating!")
for input_ids, input_mask, segment_ids, example_indices in tqdm(dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(output_dir, "predictions.json")
output_nbest_file = os.path.join(output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results,
n_best_size, max_answer_length,
False, output_prediction_file, output_nbest_file,
None, False, False, 0.0)
expected_version = 'KorQuAD_v1.0'
with open(predict_file) as dataset_file:
dataset_json = json.load(dataset_file)
read_version = "_".join(dataset_json['version'].split("_")[:-1])
if (read_version != expected_version):
logger.info('Evaluation expects ' + expected_version + ', but got dataset with ' + read_version, file=sys.stderr)
dataset = dataset_json['data']
with open(os.path.join(output_dir, "predictions.json")) as prediction_file:
predictions = json.load(prediction_file)
_eval = korquad_eval(dataset, predictions)
logger.info(json.dumps(_eval))
return _eval
# In[6]:
def train_korquad(train_config):
# setup
basepath = '/jupyterhome/enpline_bert_competition/korquad-challenge/src'
logger.info("train_config : %s" % str(train_config))
output_dir='output'
checkpoint=os.path.join(basepath,'data/bert_small_ckpt.bin')
model_config=os.path.join(basepath,'data/bert_small.json')
vocab_file=os.path.join(basepath,'data/ko_vocab_32k.txt')
train_file=os.path.join(basepath, 'data/KorQuAD_v1.0_train.json')
predict_file=os.path.join(basepath, 'data/KorQuAD_v1.0_dev.json')
null_score_diff_threshold = 0.0
no_cuda = False
verbose_logging = False
fp16 = True
fp16_opt_level = 'O2'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device: {} n_gpu: {}, 16-bits training: {}".format(device, n_gpu, fp16))
random.seed(train_config['seed'])
np.random.seed(train_config['seed'])
torch.manual_seed(train_config['seed'])
if n_gpu > 0:
torch.cuda.manual_seed_all(train_config['seed'])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tokenizer = BertTokenizer(vocab_file, max_len=train_config['max_seq_length'], do_basic_tokenize=True)
# Prepare model
config = Config.from_json_file(model_config)
model = QuestionAnswering(config)
model.bert.load_state_dict(torch.load(checkpoint))
num_params = count_parameters(model)
logger.info("Total Parameter: %d" % num_params)
logger.info("Hyper-parameters: %s" % str(train_config))
paramfile_path = os.path.join(output_dir, 'hyperparameters.txt')
with open(paramfile_path, "w") as paramfile:
logger.info("writing hyperparameters at",paramfile_path)
paramfile.write("%s" % str(train_config))
model.to(device)
cached_train_features_file = train_file + '_{0}_{1}_{2}'.format(str(train_config['max_seq_length']), str(train_config['doc_stride']),
str(train_config['max_query_length']))
train_examples = read_squad_examples(input_file=train_file, is_training=True, version_2_with_negative=False)
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=train_config['max_seq_length'],
doc_stride=train_config['doc_stride'],
max_query_length=train_config['max_query_length'],
is_training=True)
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
num_train_optimization_steps = int(len(train_features) / train_config['train_batch_size']) * train_config['num_train_epochs']
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=train_config['learning_rate'],
eps=train_config['adam_epsilon'])
scheduler = WarmupLinearSchedule(optimizer,
warmup_steps=num_train_optimization_steps*0.1,
t_total=num_train_optimization_steps)
if fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Batch size = %d", train_config['train_batch_size'])
logger.info(" Num steps = %d", num_train_optimization_steps)
num_train_step = num_train_optimization_steps
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=train_config['train_batch_size'])
model.train()
global_step = 0
epoch = 0
output_model_file = ''
# training
# for epoch_idx in trange(int(train_config['num_train_epochs'])):
# iter_bar = tqdm(train_dataloader, desc="Train(XX Epoch) Step(XX/XX) (Mean loss=X.X) (loss=X.X)")
for epoch_idx in range(int(train_config['num_train_epochs'])):
tr_step, total_loss, mean_loss = 0, 0., 0.
for step, batch in enumerate(train_dataloader):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), train_config['max_grad_norm'])
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), train_config['max_grad_norm'])
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
tr_step += 1
total_loss += loss
mean_loss = total_loss / tr_step
# iter_bar.set_description("Train Step(%d / %d) (Mean loss=%5.5f) (loss=%5.5f)" %
# (global_step, num_train_step, mean_loss, loss.item()))
epoch += 1
logger.info("** ** * Saving file * ** **")
model_checkpoint = "korquad_%d.bin" % (epoch)
logger.info(model_checkpoint)
#save the last model
output_model_file = os.path.join(output_dir, model_checkpoint)
if n_gpu > 1:
torch.save(model.module.state_dict(), output_model_file)
else:
torch.save(model.state_dict(), output_model_file)
# Evaluate with final model
examples, features = load_and_cache_examples(predict_file, train_config['max_seq_length'], train_config['doc_stride'],
train_config['max_query_length'], tokenizer)
eval = evaluate(predict_file=predict_file, batch_size=16, device=device, output_dir=output_dir, n_best_size=train_config['n_best_size'], max_answer_length=train_config['max_answer_length'],
model=model, eval_examples=examples, eval_features=features)
logger.info("-" * 16, 'evaltion', "-" * 16)
logger.info(eval)
track.log(f1 = eval['f1'])
# In[ ]:
analysis = tune.run(train_korquad, config=search_space, scheduler=HyperBandScheduler(metric='f1', mode='max'), resources_per_trial={'gpu':1})
# In[ ]:
dfs = analysis.trial_dataframes
# In[ ]:
# ax = None
# for d in dfs.values():
# ax = d.mean_loss.plot(ax=ax, legend=True)
# ax.set_xlabel("Epochs")
# ax.set_ylabel("Mean Loss")
|
[
"logging.getLogger",
"models.modeling_bert.Config.from_json_file",
"ray.tune.track.log",
"apex.amp.scale_loss",
"utils.korquad_utils.RawResult",
"torch.cuda.device_count",
"io.open",
"ray.tune.grid_search",
"apex.amp.initialize",
"torch.cuda.is_available",
"ray.init",
"os.path.exists",
"json.dumps",
"utils.tokenization.BertTokenizer",
"ray.tune.loguniform",
"utils.korquad_utils.write_predictions",
"numpy.random.seed",
"apex.amp.master_params",
"ray.tune.schedulers.HyperBandScheduler",
"utils.optimization.AdamW",
"torch.utils.data.SequentialSampler",
"pickle.load",
"torch.utils.data.TensorDataset",
"numpy.random.uniform",
"utils.optimization.WarmupLinearSchedule",
"models.modeling_bert.QuestionAnswering",
"logging.basicConfig",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"pickle.dump",
"ray.shutdown",
"os.makedirs",
"torch.load",
"tqdm.tqdm",
"os.path.join",
"random.seed",
"torch.utils.data.RandomSampler",
"debug.evaluate_korquad.evaluate",
"utils.korquad_utils.read_squad_examples",
"utils.korquad_utils.convert_examples_to_features",
"torch.tensor",
"torch.utils.data.DataLoader",
"json.load",
"torch.no_grad"
] |
[((887, 1030), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (906, 1030), False, 'import logging\n'), ((1070, 1097), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1087, 1097), False, 'import logging\n'), ((1380, 1394), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (1392, 1394), False, 'import ray\n'), ((1395, 1427), 'ray.init', 'ray.init', ([], {'webui_host': '"""127.0.0.1"""'}), "(webui_host='127.0.0.1')\n", (1403, 1427), False, 'import ray\n'), ((1666, 1700), 'ray.tune.loguniform', 'tune.loguniform', (['(0.0005)', '(5e-07)', '(10)'], {}), '(0.0005, 5e-07, 10)\n', (1681, 1700), False, 'from ray import tune\n'), ((1723, 1755), 'ray.tune.grid_search', 'tune.grid_search', (['[4, 8, 12, 16]'], {}), '([4, 8, 12, 16])\n', (1739, 1755), False, 'from ray import tune\n'), ((2309, 2407), 'utils.korquad_utils.read_squad_examples', 'read_squad_examples', ([], {'input_file': 'predict_file', 'is_training': '(False)', 'version_2_with_negative': '(False)'}), '(input_file=predict_file, is_training=False,\n version_2_with_negative=False)\n', (2328, 2407), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((2494, 2679), 'utils.korquad_utils.convert_examples_to_features', 'convert_examples_to_features', ([], {'examples': 'examples', 'tokenizer': 'tokenizer', 'max_seq_length': 'max_seq_length', 'doc_stride': 'doc_stride', 'max_query_length': 'max_query_length', 'is_training': '(False)'}), '(examples=examples, tokenizer=tokenizer,\n max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=\n max_query_length, is_training=False)\n', (2522, 2679), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((3100, 3168), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in eval_features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in eval_features], dtype=torch.long)\n', (3112, 3168), False, 'import torch\n'), ((3190, 3259), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in eval_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in eval_features], dtype=torch.long)\n', (3202, 3259), False, 'import torch\n'), ((3282, 3352), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in eval_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in eval_features], dtype=torch.long)\n', (3294, 3352), False, 'import torch\n'), ((3445, 3530), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_example_index'], {}), '(all_input_ids, all_input_mask, all_segment_ids, all_example_index\n )\n', (3458, 3530), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((3540, 3566), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (3557, 3566), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((3584, 3643), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'batch_size'}), '(dataset, sampler=sampler, batch_size=batch_size)\n', (3594, 3643), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((4014, 4049), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""Evaluating"""'}), "(dataloader, desc='Evaluating')\n", (4018, 4049), False, 'from tqdm import tqdm, trange\n'), ((4844, 4888), 'os.path.join', 'os.path.join', (['output_dir', '"""predictions.json"""'], {}), "(output_dir, 'predictions.json')\n", (4856, 4888), False, 'import os\n'), ((4913, 4963), 'os.path.join', 'os.path.join', (['output_dir', '"""nbest_predictions.json"""'], {}), "(output_dir, 'nbest_predictions.json')\n", (4925, 4963), False, 'import os\n'), ((4968, 5143), 'utils.korquad_utils.write_predictions', 'write_predictions', (['eval_examples', 'eval_features', 'all_results', 'n_best_size', 'max_answer_length', '(False)', 'output_prediction_file', 'output_nbest_file', 'None', '(False)', '(False)', '(0.0)'], {}), '(eval_examples, eval_features, all_results, n_best_size,\n max_answer_length, False, output_prediction_file, output_nbest_file,\n None, False, False, 0.0)\n', (4985, 5143), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((5759, 5793), 'debug.evaluate_korquad.evaluate', 'korquad_eval', (['dataset', 'predictions'], {}), '(dataset, predictions)\n', (5771, 5793), True, 'from debug.evaluate_korquad import evaluate as korquad_eval\n'), ((6093, 6143), 'os.path.join', 'os.path.join', (['basepath', '"""data/bert_small_ckpt.bin"""'], {}), "(basepath, 'data/bert_small_ckpt.bin')\n", (6105, 6143), False, 'import os\n'), ((6160, 6206), 'os.path.join', 'os.path.join', (['basepath', '"""data/bert_small.json"""'], {}), "(basepath, 'data/bert_small.json')\n", (6172, 6206), False, 'import os\n'), ((6221, 6268), 'os.path.join', 'os.path.join', (['basepath', '"""data/ko_vocab_32k.txt"""'], {}), "(basepath, 'data/ko_vocab_32k.txt')\n", (6233, 6268), False, 'import os\n'), ((6283, 6337), 'os.path.join', 'os.path.join', (['basepath', '"""data/KorQuAD_v1.0_train.json"""'], {}), "(basepath, 'data/KorQuAD_v1.0_train.json')\n", (6295, 6337), False, 'import os\n'), ((6355, 6407), 'os.path.join', 'os.path.join', (['basepath', '"""data/KorQuAD_v1.0_dev.json"""'], {}), "(basepath, 'data/KorQuAD_v1.0_dev.json')\n", (6367, 6407), False, 'import os\n'), ((6636, 6661), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6659, 6661), False, 'import torch\n'), ((6751, 6784), 'random.seed', 'random.seed', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6762, 6784), False, 'import random\n'), ((6789, 6825), 'numpy.random.seed', 'np.random.seed', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6803, 6825), True, 'import numpy as np\n'), ((6830, 6869), 'torch.manual_seed', 'torch.manual_seed', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6847, 6869), False, 'import torch\n'), ((7034, 7127), 'utils.tokenization.BertTokenizer', 'BertTokenizer', (['vocab_file'], {'max_len': "train_config['max_seq_length']", 'do_basic_tokenize': '(True)'}), "(vocab_file, max_len=train_config['max_seq_length'],\n do_basic_tokenize=True)\n", (7047, 7127), False, 'from utils.tokenization import BertTokenizer\n'), ((7162, 7197), 'models.modeling_bert.Config.from_json_file', 'Config.from_json_file', (['model_config'], {}), '(model_config)\n', (7183, 7197), False, 'from models.modeling_bert import QuestionAnswering, Config\n'), ((7210, 7235), 'models.modeling_bert.QuestionAnswering', 'QuestionAnswering', (['config'], {}), '(config)\n', (7227, 7235), False, 'from models.modeling_bert import QuestionAnswering, Config\n'), ((7465, 7512), 'os.path.join', 'os.path.join', (['output_dir', '"""hyperparameters.txt"""'], {}), "(output_dir, 'hyperparameters.txt')\n", (7477, 7512), False, 'import os\n'), ((7976, 8071), 'utils.korquad_utils.read_squad_examples', 'read_squad_examples', ([], {'input_file': 'train_file', 'is_training': '(True)', 'version_2_with_negative': '(False)'}), '(input_file=train_file, is_training=True,\n version_2_with_negative=False)\n', (7995, 8071), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((9284, 9392), 'utils.optimization.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': "train_config['learning_rate']", 'eps': "train_config['adam_epsilon']"}), "(optimizer_grouped_parameters, lr=train_config['learning_rate'], eps=\n train_config['adam_epsilon'])\n", (9289, 9392), False, 'from utils.optimization import AdamW, WarmupLinearSchedule\n'), ((9448, 9570), 'utils.optimization.WarmupLinearSchedule', 'WarmupLinearSchedule', (['optimizer'], {'warmup_steps': '(num_train_optimization_steps * 0.1)', 't_total': 'num_train_optimization_steps'}), '(optimizer, warmup_steps=num_train_optimization_steps *\n 0.1, t_total=num_train_optimization_steps)\n', (9468, 9570), False, 'from utils.optimization import AdamW, WarmupLinearSchedule\n'), ((10349, 10418), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in train_features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in train_features], dtype=torch.long)\n', (10361, 10418), False, 'import torch\n'), ((10440, 10510), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in train_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in train_features], dtype=torch.long)\n', (10452, 10510), False, 'import torch\n'), ((10533, 10604), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in train_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in train_features], dtype=torch.long)\n', (10545, 10604), False, 'import torch\n'), ((10631, 10705), 'torch.tensor', 'torch.tensor', (['[f.start_position for f in train_features]'], {'dtype': 'torch.long'}), '([f.start_position for f in train_features], dtype=torch.long)\n', (10643, 10705), False, 'import torch\n'), ((10730, 10802), 'torch.tensor', 'torch.tensor', (['[f.end_position for f in train_features]'], {'dtype': 'torch.long'}), '([f.end_position for f in train_features], dtype=torch.long)\n', (10742, 10802), False, 'import torch\n'), ((10820, 10925), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_start_positions', 'all_end_positions'], {}), '(all_input_ids, all_input_mask, all_segment_ids,\n all_start_positions, all_end_positions)\n', (10833, 10925), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((10974, 10999), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (10987, 10999), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((11023, 11118), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': "train_config['train_batch_size']"}), "(train_data, sampler=train_sampler, batch_size=train_config[\n 'train_batch_size'])\n", (11033, 11118), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((12999, 13041), 'os.path.join', 'os.path.join', (['output_dir', 'model_checkpoint'], {}), '(output_dir, model_checkpoint)\n', (13011, 13041), False, 'import os\n'), ((13799, 13823), 'ray.tune.track.log', 'track.log', ([], {'f1': "eval['f1']"}), "(f1=eval['f1'])\n", (13808, 13823), False, 'from ray.tune import track\n'), ((5250, 5268), 'io.open', 'open', (['predict_file'], {}), '(predict_file)\n', (5254, 5268), False, 'from io import open\n'), ((5309, 5332), 'json.load', 'json.load', (['dataset_file'], {}), '(dataset_file)\n', (5318, 5332), False, 'import json\n'), ((5720, 5746), 'json.load', 'json.load', (['prediction_file'], {}), '(prediction_file)\n', (5729, 5746), False, 'import json\n'), ((5810, 5827), 'json.dumps', 'json.dumps', (['_eval'], {}), '(_eval)\n', (5820, 5827), False, 'import json\n'), ((6896, 6944), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6922, 6944), False, 'import torch\n'), ((6957, 6983), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (6971, 6983), False, 'import os\n'), ((6993, 7016), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (7004, 7016), False, 'import os\n'), ((7267, 7289), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (7277, 7289), False, 'import torch\n'), ((7527, 7552), 'io.open', 'open', (['paramfile_path', '"""w"""'], {}), "(paramfile_path, 'w')\n", (7531, 7552), False, 'from io import open\n'), ((9902, 9960), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': 'fp16_opt_level'}), '(model, optimizer, opt_level=fp16_opt_level)\n', (9916, 9960), False, 'from apex import amp\n'), ((13910, 13953), 'ray.tune.schedulers.HyperBandScheduler', 'HyperBandScheduler', ([], {'metric': '"""f1"""', 'mode': '"""max"""'}), "(metric='f1', mode='max')\n", (13928, 13953), False, 'from ray.tune.schedulers import HyperBandScheduler\n'), ((4193, 4208), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4206, 4208), False, 'import torch\n'), ((5632, 5676), 'os.path.join', 'os.path.join', (['output_dir', '"""predictions.json"""'], {}), "(output_dir, 'predictions.json')\n", (5644, 5676), False, 'import os\n'), ((6586, 6611), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6609, 6611), False, 'import torch\n'), ((8095, 8133), 'io.open', 'open', (['cached_train_features_file', '"""rb"""'], {}), "(cached_train_features_file, 'rb')\n", (8099, 8133), False, 'from io import open\n'), ((8174, 8193), 'pickle.load', 'pickle.load', (['reader'], {}), '(reader)\n', (8185, 8193), False, 'import pickle\n'), ((8231, 8473), 'utils.korquad_utils.convert_examples_to_features', 'convert_examples_to_features', ([], {'examples': 'train_examples', 'tokenizer': 'tokenizer', 'max_seq_length': "train_config['max_seq_length']", 'doc_stride': "train_config['doc_stride']", 'max_query_length': "train_config['max_query_length']", 'is_training': '(True)'}), "(examples=train_examples, tokenizer=tokenizer,\n max_seq_length=train_config['max_seq_length'], doc_stride=train_config[\n 'doc_stride'], max_query_length=train_config['max_query_length'],\n is_training=True)\n", (8259, 8473), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((1563, 1589), 'numpy.random.uniform', 'np.random.uniform', (['(50)', '(100)'], {}), '(50, 100)\n', (1580, 1589), True, 'import numpy as np\n'), ((1889, 1915), 'numpy.random.uniform', 'np.random.uniform', (['(50)', '(100)'], {}), '(50, 100)\n', (1906, 1915), True, 'import numpy as np\n'), ((1999, 2024), 'numpy.random.uniform', 'np.random.uniform', (['(12)', '(25)'], {}), '(12, 25)\n', (2016, 2024), True, 'import numpy as np\n'), ((2095, 2136), 'numpy.random.uniform', 'np.random.uniform', (['(1000000.0)', '(100000000.0)'], {}), '(1000000.0, 100000000.0)\n', (2112, 2136), True, 'import numpy as np\n'), ((4651, 4736), 'utils.korquad_utils.RawResult', 'RawResult', ([], {'unique_id': 'unique_id', 'start_logits': 'start_logits', 'end_logits': 'end_logits'}), '(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits\n )\n', (4660, 4736), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((8642, 8680), 'io.open', 'open', (['cached_train_features_file', '"""wb"""'], {}), "(cached_train_features_file, 'wb')\n", (8646, 8680), False, 'from io import open\n'), ((8704, 8739), 'pickle.dump', 'pickle.dump', (['train_features', 'writer'], {}), '(train_features, writer)\n', (8715, 8739), False, 'import pickle\n'), ((12012, 12043), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (12026, 12043), False, 'from apex import amp\n'), ((12150, 12178), 'apex.amp.master_params', 'amp.master_params', (['optimizer'], {}), '(optimizer)\n', (12167, 12178), False, 'from apex import amp\n')]
|
from datetime import date
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from phonenumber_field.formfields import PhoneNumberField
from .models import Profile
from .options import STATE_CHOICES, YEARS
from .utils import AgeValidator
class UserRegisterForm(UserCreationForm):
first_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Given name"})
)
middle_name = forms.CharField(
max_length=30,
required=False,
widget=forms.TextInput(attrs={"placeholder": "Middle name"}),
)
last_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Surname"})
)
date_of_birth = forms.DateField(
label="Date of Birth",
initial=date.today(),
required=True,
help_text="Age must be above 16",
validators=[AgeValidator],
widget=forms.SelectDateWidget(years=YEARS),
)
email = forms.EmailField(
max_length=150,
widget=forms.TextInput(attrs={"placeholder": "e.g. <EMAIL>"}),
)
address1 = forms.CharField(
max_length=100,
help_text="Street, District",
widget=forms.TextInput(attrs={"placeholder": "Street, District"}),
)
address2 = forms.CharField(
max_length=100,
help_text="State",
widget=forms.Select(attrs={"placeholder": "State"}, choices=STATE_CHOICES),
)
phone = PhoneNumberField(
required=False,
initial="+977",
help_text="Phone number must contain country calling code (e.g. +97798XXYYZZSS)",
)
class Meta:
model = User
fields = [
"first_name",
"middle_name",
"last_name",
"date_of_birth",
"username",
"email",
"phone",
"<PASSWORD>",
"<PASSWORD>",
"address1",
"address2",
]
# widget={
# 'username': forms.TextInput(attrs={'placeholder': 'Enter desired username.'}),
# }
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = [
"username",
]
class ProfileUpdateForm(forms.ModelForm):
first_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Given name"})
)
middle_name = forms.CharField(
max_length=30,
required=False,
widget=forms.TextInput(attrs={"placeholder": "Middle name"}),
)
last_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Surname"})
)
email = forms.EmailField(
max_length=150,
widget=forms.TextInput(attrs={"placeholder": "e.g. <EMAIL>"}),
)
address1 = forms.CharField(
max_length=100,
help_text="Street, District",
widget=forms.TextInput(attrs={"placeholder": "Street, District"}),
)
address2 = forms.CharField(
max_length=100,
help_text="State",
widget=forms.Select(attrs={"placeholder": "State"}, choices=STATE_CHOICES),
)
phone = PhoneNumberField(
required=False,
help_text="Phone number must contain country calling code (e.g. +97798XXYYZZSS)",
)
class Meta:
model = Profile
fields = [
"first_name",
"middle_name",
"last_name",
"email",
"address1",
"address2",
"phone",
"image",
]
|
[
"django.forms.Select",
"phonenumber_field.formfields.PhoneNumberField",
"django.forms.SelectDateWidget",
"django.forms.TextInput",
"datetime.date.today"
] |
[((1503, 1638), 'phonenumber_field.formfields.PhoneNumberField', 'PhoneNumberField', ([], {'required': '(False)', 'initial': '"""+977"""', 'help_text': '"""Phone number must contain country calling code (e.g. +97798XXYYZZSS)"""'}), "(required=False, initial='+977', help_text=\n 'Phone number must contain country calling code (e.g. +97798XXYYZZSS)')\n", (1519, 1638), False, 'from phonenumber_field.formfields import PhoneNumberField\n'), ((3195, 3314), 'phonenumber_field.formfields.PhoneNumberField', 'PhoneNumberField', ([], {'required': '(False)', 'help_text': '"""Phone number must contain country calling code (e.g. +97798XXYYZZSS)"""'}), "(required=False, help_text=\n 'Phone number must contain country calling code (e.g. +97798XXYYZZSS)')\n", (3211, 3314), False, 'from phonenumber_field.formfields import PhoneNumberField\n'), ((420, 472), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Given name'}"}), "(attrs={'placeholder': 'Given name'})\n", (435, 472), False, 'from django import forms\n'), ((576, 629), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Middle name'}"}), "(attrs={'placeholder': 'Middle name'})\n", (591, 629), False, 'from django import forms\n'), ((700, 749), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Surname'}"}), "(attrs={'placeholder': 'Surname'})\n", (715, 749), False, 'from django import forms\n'), ((840, 852), 'datetime.date.today', 'date.today', ([], {}), '()\n', (850, 852), False, 'from datetime import date\n'), ((969, 1004), 'django.forms.SelectDateWidget', 'forms.SelectDateWidget', ([], {'years': 'YEARS'}), '(years=YEARS)\n', (991, 1004), False, 'from django import forms\n'), ((1081, 1135), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'e.g. <EMAIL>'}"}), "(attrs={'placeholder': 'e.g. <EMAIL>'})\n", (1096, 1135), False, 'from django import forms\n'), ((1252, 1310), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Street, District'}"}), "(attrs={'placeholder': 'Street, District'})\n", (1267, 1310), False, 'from django import forms\n'), ((1416, 1483), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'placeholder': 'State'}", 'choices': 'STATE_CHOICES'}), "(attrs={'placeholder': 'State'}, choices=STATE_CHOICES)\n", (1428, 1483), False, 'from django import forms\n'), ((2368, 2420), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Given name'}"}), "(attrs={'placeholder': 'Given name'})\n", (2383, 2420), False, 'from django import forms\n'), ((2524, 2577), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Middle name'}"}), "(attrs={'placeholder': 'Middle name'})\n", (2539, 2577), False, 'from django import forms\n'), ((2648, 2697), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Surname'}"}), "(attrs={'placeholder': 'Surname'})\n", (2663, 2697), False, 'from django import forms\n'), ((2773, 2827), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'e.g. <EMAIL>'}"}), "(attrs={'placeholder': 'e.g. <EMAIL>'})\n", (2788, 2827), False, 'from django import forms\n'), ((2944, 3002), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Street, District'}"}), "(attrs={'placeholder': 'Street, District'})\n", (2959, 3002), False, 'from django import forms\n'), ((3108, 3175), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'placeholder': 'State'}", 'choices': 'STATE_CHOICES'}), "(attrs={'placeholder': 'State'}, choices=STATE_CHOICES)\n", (3120, 3175), False, 'from django import forms\n')]
|
from django.urls import path
from . import views
app_name = 'trip'
urlpatterns = [
path('', views.index, name='index'),
path('tripblog/', views.AllTrip.as_view(), name="tripplan"),
path('likereview/', views.like_comment_view, name="like_comment"),
path('tripdetail/<int:pk>/', views.trip_detail, name="tripdetail"),
path('addpost/', views.add_post, name="addpost"),
path('likepost/', views.like_post, name="like_trip"),
path('tripdetail/edit/<int:pk>', views.edit_post, name='editpost'),
path('tripdetail/<int:pk>/remove', views.delete_post, name='deletepost'),
path('category/<category>', views.CatsListView.as_view(), name='category'),
path('addcomment/', views.post_comment, name="add_comment"),
path('action/gettripqueries', views.get_trip_queries, name='get-trip-query'),
# 127.0.0.1/domnfoironkwe_0394
path('place/<str:place_id>/', views.place_info, name='place-detail'),
path('place/<str:place_id>/like', views.place_like, name='place-like'),
path('place/<str:place_id>/dislike', views.place_dislike, name='place-dislike'),
path('place/<str:place_id>/addreview', views.place_review, name='place-review'),
path('place/<str:place_id>/removereview', views.place_remove_review, name='place-remove-review'),
]
|
[
"django.urls.path"
] |
[((88, 123), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (92, 123), False, 'from django.urls import path\n'), ((194, 259), 'django.urls.path', 'path', (['"""likereview/"""', 'views.like_comment_view'], {'name': '"""like_comment"""'}), "('likereview/', views.like_comment_view, name='like_comment')\n", (198, 259), False, 'from django.urls import path\n'), ((265, 331), 'django.urls.path', 'path', (['"""tripdetail/<int:pk>/"""', 'views.trip_detail'], {'name': '"""tripdetail"""'}), "('tripdetail/<int:pk>/', views.trip_detail, name='tripdetail')\n", (269, 331), False, 'from django.urls import path\n'), ((337, 385), 'django.urls.path', 'path', (['"""addpost/"""', 'views.add_post'], {'name': '"""addpost"""'}), "('addpost/', views.add_post, name='addpost')\n", (341, 385), False, 'from django.urls import path\n'), ((391, 443), 'django.urls.path', 'path', (['"""likepost/"""', 'views.like_post'], {'name': '"""like_trip"""'}), "('likepost/', views.like_post, name='like_trip')\n", (395, 443), False, 'from django.urls import path\n'), ((449, 515), 'django.urls.path', 'path', (['"""tripdetail/edit/<int:pk>"""', 'views.edit_post'], {'name': '"""editpost"""'}), "('tripdetail/edit/<int:pk>', views.edit_post, name='editpost')\n", (453, 515), False, 'from django.urls import path\n'), ((521, 593), 'django.urls.path', 'path', (['"""tripdetail/<int:pk>/remove"""', 'views.delete_post'], {'name': '"""deletepost"""'}), "('tripdetail/<int:pk>/remove', views.delete_post, name='deletepost')\n", (525, 593), False, 'from django.urls import path\n'), ((679, 738), 'django.urls.path', 'path', (['"""addcomment/"""', 'views.post_comment'], {'name': '"""add_comment"""'}), "('addcomment/', views.post_comment, name='add_comment')\n", (683, 738), False, 'from django.urls import path\n'), ((744, 820), 'django.urls.path', 'path', (['"""action/gettripqueries"""', 'views.get_trip_queries'], {'name': '"""get-trip-query"""'}), "('action/gettripqueries', views.get_trip_queries, name='get-trip-query')\n", (748, 820), False, 'from django.urls import path\n'), ((861, 929), 'django.urls.path', 'path', (['"""place/<str:place_id>/"""', 'views.place_info'], {'name': '"""place-detail"""'}), "('place/<str:place_id>/', views.place_info, name='place-detail')\n", (865, 929), False, 'from django.urls import path\n'), ((935, 1005), 'django.urls.path', 'path', (['"""place/<str:place_id>/like"""', 'views.place_like'], {'name': '"""place-like"""'}), "('place/<str:place_id>/like', views.place_like, name='place-like')\n", (939, 1005), False, 'from django.urls import path\n'), ((1011, 1090), 'django.urls.path', 'path', (['"""place/<str:place_id>/dislike"""', 'views.place_dislike'], {'name': '"""place-dislike"""'}), "('place/<str:place_id>/dislike', views.place_dislike, name='place-dislike')\n", (1015, 1090), False, 'from django.urls import path\n'), ((1096, 1175), 'django.urls.path', 'path', (['"""place/<str:place_id>/addreview"""', 'views.place_review'], {'name': '"""place-review"""'}), "('place/<str:place_id>/addreview', views.place_review, name='place-review')\n", (1100, 1175), False, 'from django.urls import path\n'), ((1181, 1282), 'django.urls.path', 'path', (['"""place/<str:place_id>/removereview"""', 'views.place_remove_review'], {'name': '"""place-remove-review"""'}), "('place/<str:place_id>/removereview', views.place_remove_review, name=\n 'place-remove-review')\n", (1185, 1282), False, 'from django.urls import path\n')]
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.build_graph.address import Address
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants_test.base_test import BaseTest
class CmdLineSpecParserTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'generic': Target
}
)
def setUp(self):
super(CmdLineSpecParserTest, self).setUp()
def add_target(path, name):
self.add_to_build_file(path, 'generic(name="{name}")\n'.format(name=name))
add_target('BUILD', 'root')
add_target('a', 'a')
add_target('a', 'b')
add_target('a/b', 'b')
add_target('a/b', 'c')
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper)
def test_normal(self):
self.assert_parsed(cmdline_spec=':root', expected=[':root'])
self.assert_parsed(cmdline_spec='//:root', expected=[':root'])
self.assert_parsed(cmdline_spec='a', expected=['a'])
self.assert_parsed(cmdline_spec='a:a', expected=['a'])
self.assert_parsed(cmdline_spec='a/b', expected=['a/b'])
self.assert_parsed(cmdline_spec='a/b:b', expected=['a/b'])
self.assert_parsed(cmdline_spec='a/b:c', expected=['a/b:c'])
def test_sibling(self):
self.assert_parsed(cmdline_spec=':', expected=[':root'])
self.assert_parsed(cmdline_spec='//:', expected=[':root'])
self.assert_parsed(cmdline_spec='a:', expected=['a', 'a:b'])
self.assert_parsed(cmdline_spec='//a:', expected=['a', 'a:b'])
self.assert_parsed(cmdline_spec='a/b:', expected=['a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//a/b:', expected=['a/b', 'a/b:c'])
def test_sibling_or_descendents(self):
self.assert_parsed(cmdline_spec='::', expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//::', expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='a::', expected=['a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//a::', expected=['a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='a/b::', expected=['a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//a/b::', expected=['a/b', 'a/b:c'])
def test_absolute(self):
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a'), expected=['a'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a:a'), expected=['a'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a:'), expected=['a', 'a:b'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a::'),
expected=['a', 'a:b', 'a/b', 'a/b:c'])
double_absolute = '/' + os.path.join(self.build_root, 'a')
self.assertEquals('//', double_absolute[:2],
'A sanity check we have a leading-// absolute spec')
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses(double_absolute).next()
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('/not/the/buildroot/a').next()
def test_cmd_line_affordances(self):
self.assert_parsed(cmdline_spec='./:root', expected=[':root'])
self.assert_parsed(cmdline_spec='//./:root', expected=[':root'])
self.assert_parsed(cmdline_spec='//./a/../:root', expected=[':root'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, './a/../:root'),
expected=[':root'])
self.assert_parsed(cmdline_spec='a/', expected=['a'])
self.assert_parsed(cmdline_spec='./a/', expected=['a'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, './a/'), expected=['a'])
self.assert_parsed(cmdline_spec='a/b/:b', expected=['a/b'])
self.assert_parsed(cmdline_spec='./a/b/:b', expected=['a/b'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, './a/b/:b'), expected=['a/b'])
def test_cmd_line_spec_list(self):
self.assert_parsed_list(cmdline_spec_list=['a', 'a/b'], expected=['a', 'a/b'])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
def test_does_not_exist(self):
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('c').next()
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('c:').next()
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('c::').next()
def assert_parsed(self, cmdline_spec, expected):
def sort(addresses):
return sorted(addresses, key=lambda address: address.spec)
self.assertEqual(sort(Address.parse(addr) for addr in expected),
sort(self.spec_parser.parse_addresses(cmdline_spec)))
def assert_parsed_list(self, cmdline_spec_list, expected):
def sort(addresses):
return sorted(addresses, key=lambda address: address.spec)
self.assertEqual(sort(Address.parse(addr) for addr in expected),
sort(self.spec_parser.parse_addresses(cmdline_spec_list)))
def test_spec_excludes(self):
expected_specs = [':root', 'a', 'a:b', 'a/b', 'a/b:c']
# This bogus BUILD file gets in the way of parsing.
self.add_to_build_file('some/dir', 'COMPLETELY BOGUS BUILDFILE)\n')
with self.assertRaises(CmdLineSpecParser.BadSpecError):
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
# Test absolute path in spec_excludes.
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper,
spec_excludes=[os.path.join(self.build_root, 'some')])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
# Test relative path in spec_excludes.
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper,
spec_excludes=['some'])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
def test_exclude_target_regexps(self):
expected_specs = [':root', 'a', 'a:b', 'a/b', 'a/b:c']
# This bogus BUILD file gets in the way of parsing.
self.add_to_build_file('some/dir', 'COMPLETELY BOGUS BUILDFILE)\n')
with self.assertRaises(CmdLineSpecParser.BadSpecError):
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper,
exclude_target_regexps=[r'.*some/dir.*'])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
class CmdLineSpecParserBadBuildTest(BaseTest):
def setUp(self):
super(CmdLineSpecParserBadBuildTest, self).setUp()
self.add_to_build_file('bad/a', 'a_is_bad')
self.add_to_build_file('bad/b', 'b_is_bad')
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper)
self.NO_FAIL_FAST_RE = re.compile(r"""^--------------------
.*
Exception message: name 'a_is_bad' is not defined
while executing BUILD file BuildFile\((/[^/]+)*/bad/a/BUILD, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/a' failed\.
.*
Exception message: name 'b_is_bad' is not defined
while executing BUILD file BuildFile\((/[^/]+)*T/bad/b/BUILD, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/b' failed\.
Invalid BUILD files for \[::\]$""", re.DOTALL)
self.FAIL_FAST_RE = """^name 'a_is_bad' is not defined
while executing BUILD file BuildFile\((/[^/]+)*/bad/a/BUILD\, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/a' failed.$"""
def test_bad_build_files(self):
with self.assertRaisesRegexp(self.spec_parser.BadSpecError, self.NO_FAIL_FAST_RE):
list(self.spec_parser.parse_addresses('::'))
def test_bad_build_files_fail_fast(self):
with self.assertRaisesRegexp(self.spec_parser.BadSpecError, self.FAIL_FAST_RE):
list(self.spec_parser.parse_addresses('::', True))
|
[
"pants.build_graph.address.Address.parse",
"re.compile",
"pants.base.cmd_line_spec_parser.CmdLineSpecParser",
"os.path.join",
"pants.build_graph.build_file_aliases.BuildFileAliases"
] |
[((672, 717), 'pants.build_graph.build_file_aliases.BuildFileAliases', 'BuildFileAliases', ([], {'targets': "{'generic': Target}"}), "(targets={'generic': Target})\n", (688, 717), False, 'from pants.build_graph.build_file_aliases import BuildFileAliases\n'), ((1088, 1143), 'pants.base.cmd_line_spec_parser.CmdLineSpecParser', 'CmdLineSpecParser', (['self.build_root', 'self.address_mapper'], {}), '(self.build_root, self.address_mapper)\n', (1105, 1143), False, 'from pants.base.cmd_line_spec_parser import CmdLineSpecParser\n'), ((6176, 6255), 'pants.base.cmd_line_spec_parser.CmdLineSpecParser', 'CmdLineSpecParser', (['self.build_root', 'self.address_mapper'], {'spec_excludes': "['some']"}), "(self.build_root, self.address_mapper, spec_excludes=['some'])\n", (6193, 6255), False, 'from pants.base.cmd_line_spec_parser import CmdLineSpecParser\n'), ((6771, 6871), 'pants.base.cmd_line_spec_parser.CmdLineSpecParser', 'CmdLineSpecParser', (['self.build_root', 'self.address_mapper'], {'exclude_target_regexps': "['.*some/dir.*']"}), "(self.build_root, self.address_mapper,\n exclude_target_regexps=['.*some/dir.*'])\n", (6788, 6871), False, 'from pants.base.cmd_line_spec_parser import CmdLineSpecParser\n'), ((7234, 7289), 'pants.base.cmd_line_spec_parser.CmdLineSpecParser', 'CmdLineSpecParser', (['self.build_root', 'self.address_mapper'], {}), '(self.build_root, self.address_mapper)\n', (7251, 7289), False, 'from pants.base.cmd_line_spec_parser import CmdLineSpecParser\n'), ((7318, 7795), 're.compile', 're.compile', (['"""^--------------------\n.*\nException message: name \'a_is_bad\' is not defined\n while executing BUILD file BuildFile\\\\((/[^/]+)*/bad/a/BUILD, FileSystemProjectTree\\\\(.*\\\\)\\\\)\n Loading addresses from \'bad/a\' failed\\\\.\n.*\nException message: name \'b_is_bad\' is not defined\n while executing BUILD file BuildFile\\\\((/[^/]+)*T/bad/b/BUILD, FileSystemProjectTree\\\\(.*\\\\)\\\\)\n Loading addresses from \'bad/b\' failed\\\\.\nInvalid BUILD files for \\\\[::\\\\]$"""', 're.DOTALL'], {}), '(\n """^--------------------\n.*\nException message: name \'a_is_bad\' is not defined\n while executing BUILD file BuildFile\\\\((/[^/]+)*/bad/a/BUILD, FileSystemProjectTree\\\\(.*\\\\)\\\\)\n Loading addresses from \'bad/a\' failed\\\\.\n.*\nException message: name \'b_is_bad\' is not defined\n while executing BUILD file BuildFile\\\\((/[^/]+)*T/bad/b/BUILD, FileSystemProjectTree\\\\(.*\\\\)\\\\)\n Loading addresses from \'bad/b\' failed\\\\.\nInvalid BUILD files for \\\\[::\\\\]$"""\n , re.DOTALL)\n', (7328, 7795), False, 'import re\n'), ((3043, 3077), 'os.path.join', 'os.path.join', (['self.build_root', '"""a"""'], {}), "(self.build_root, 'a')\n", (3055, 3077), False, 'import os\n'), ((2640, 2674), 'os.path.join', 'os.path.join', (['self.build_root', '"""a"""'], {}), "(self.build_root, 'a')\n", (2652, 2674), False, 'import os\n'), ((2728, 2764), 'os.path.join', 'os.path.join', (['self.build_root', '"""a:a"""'], {}), "(self.build_root, 'a:a')\n", (2740, 2764), False, 'import os\n'), ((2818, 2853), 'os.path.join', 'os.path.join', (['self.build_root', '"""a:"""'], {}), "(self.build_root, 'a:')\n", (2830, 2853), False, 'import os\n'), ((2914, 2950), 'os.path.join', 'os.path.join', (['self.build_root', '"""a::"""'], {}), "(self.build_root, 'a::')\n", (2926, 2950), False, 'import os\n'), ((3740, 3785), 'os.path.join', 'os.path.join', (['self.build_root', '"""./a/../:root"""'], {}), "(self.build_root, './a/../:root')\n", (3752, 3785), False, 'import os\n'), ((3985, 4022), 'os.path.join', 'os.path.join', (['self.build_root', '"""./a/"""'], {}), "(self.build_root, './a/')\n", (3997, 4022), False, 'import os\n'), ((4207, 4248), 'os.path.join', 'os.path.join', (['self.build_root', '"""./a/b/:b"""'], {}), "(self.build_root, './a/b/:b')\n", (4219, 4248), False, 'import os\n'), ((5029, 5048), 'pants.build_graph.address.Address.parse', 'Address.parse', (['addr'], {}), '(addr)\n', (5042, 5048), False, 'from pants.build_graph.address import Address\n'), ((5326, 5345), 'pants.build_graph.address.Address.parse', 'Address.parse', (['addr'], {}), '(addr)\n', (5339, 5345), False, 'from pants.build_graph.address import Address\n'), ((5990, 6027), 'os.path.join', 'os.path.join', (['self.build_root', '"""some"""'], {}), "(self.build_root, 'some')\n", (6002, 6027), False, 'import os\n')]
|
import os
import json
from torchblocks.metrics import SequenceLabelingScore
from torchblocks.trainer import SequenceLabelingTrainer
from torchblocks.callback import TrainLogger
from torchblocks.processor import SequenceLabelingProcessor, InputExample
from torchblocks.utils import seed_everything, dict_to_text, build_argparse
from torchblocks.utils import prepare_device, get_checkpoints
from torchblocks.data import CNTokenizer
from torchblocks.data import Vocabulary, VOCAB_NAME
from torchblocks.models.nn.lstm_crf import LSTMCRF
from torchblocks.models.bases import TrainConfig
from torchblocks.models.bases import WEIGHTS_NAME
MODEL_CLASSES = {
'lstm-crf': (TrainConfig, LSTMCRF, CNTokenizer)
}
def build_vocab(data_dir, vocab_dir):
'''
构建vocab
'''
vocab = Vocabulary()
vocab_path = os.path.join(vocab_dir, VOCAB_NAME)
if os.path.exists(vocab_path):
vocab.load_vocab(str(vocab_path))
else:
files = ["train.json", "dev.json", "test.json"]
for file in files:
with open(os.path.join(data_dir, file), 'r') as fr:
for line in fr:
line = json.loads(line.strip())
text = line['text']
vocab.update(list(text))
vocab.build_vocab()
vocab.save_vocab(vocab_path)
print("vocab size: ", len(vocab))
class CluenerProcessor(SequenceLabelingProcessor):
def get_labels(self):
"""See base class."""
# 默认第一个为X
return ["X", "B-address", "B-book", "B-company", 'B-game', 'B-government', 'B-movie', 'B-name',
'B-organization', 'B-position', 'B-scene', "I-address",
"I-book", "I-company", 'I-game', 'I-government', 'I-movie', 'I-name',
'I-organization', 'I-position', 'I-scene',
"S-address", "S-book", "S-company", 'S-game', 'S-government', 'S-movie',
'S-name', 'S-organization', 'S-position',
'S-scene', 'O', "[START]", "[END]"]
def read_data(self, input_file):
"""Reads a json list file."""
lines = []
with open(input_file, 'r') as f:
for line in f:
line = json.loads(line.strip())
text = line['text']
label_entities = line.get('label', None)
labels = ['O'] * len(text)
if label_entities is not None:
for key, value in label_entities.items():
for sub_name, sub_index in value.items():
for start_index, end_index in sub_index:
assert text[start_index:end_index + 1] == sub_name
if start_index == end_index:
labels[start_index] = 'S-' + key
else:
labels[start_index] = 'B-' + key
labels[start_index + 1:end_index + 1] = ['I-' + key] * (len(sub_name) - 1)
lines.append({"text": text, "labels": labels})
return lines
def create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['text']
labels = line['labels']
examples.append(InputExample(guid=guid, texts=[text_a, None], label_ids=labels))
return examples
def main():
parser = build_argparse()
parser.add_argument('--markup', type=str, default='bios', choices=['bios', 'bio'])
parser.add_argument('--use_crf', action='store_true', default=True)
args = parser.parse_args()
# output dir
if args.model_name is None:
args.model_name = args.model_path.split("/")[-1]
args.output_dir = args.output_dir + '{}'.format(args.model_name)
os.makedirs(args.output_dir, exist_ok=True)
# logging
prefix = "_".join([args.model_name, args.task_name])
logger = TrainLogger(log_dir=args.output_dir, prefix=prefix)
# device
logger.info("initializing device")
args.device, args.n_gpu = prepare_device(args.gpu, args.local_rank)
# build vocab
build_vocab(args.data_dir, vocab_dir=args.model_path)
seed_everything(args.seed)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
# data processor
logger.info("initializing data processor")
tokenizer = tokenizer_class.from_pretrained(args.model_path, do_lower_case=args.do_lower_case)
processor = CluenerProcessor(data_dir=args.data_dir, tokenizer=tokenizer, prefix=prefix,add_special_tokens=False)
label_list = processor.get_labels()
num_labels = len(label_list)
id2label = {i: label for i, label in enumerate(label_list)}
args.id2label = id2label
args.num_labels = num_labels
# model
logger.info("initializing model and config")
config = config_class.from_pretrained(args.model_path, num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_path, config=config)
model.to(args.device)
# Trainer
logger.info("initializing traniner")
trainer = SequenceLabelingTrainer(args=args, logger=logger, collate_fn=processor.collate_fn,
input_keys=processor.get_input_keys(),
metrics=[SequenceLabelingScore(id2label, markup=args.markup)])
# do train
if args.do_train:
train_dataset = processor.create_dataset(args.train_max_seq_length, 'train.json', 'train', )
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.json', 'dev')
trainer.train(model, train_dataset=train_dataset, eval_dataset=eval_dataset)
# do eval
if args.do_eval and args.local_rank in [-1, 0]:
results = {}
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.json', 'dev')
checkpoints = [args.output_dir]
if args.eval_all_checkpoints or args.checkpoint_number > 0:
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint, config=config)
model.to(args.device)
trainer.evaluate(model, eval_dataset, save_preds=True, prefix=str(global_step))
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in trainer.records['result'].items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
dict_to_text(output_eval_file, results)
# do predict
if args.do_predict:
test_dataset = processor.create_dataset(args.eval_max_seq_length, 'test.json', 'test')
if args.checkpoint_number == 0:
raise ValueError("checkpoint number should > 0,but get %d", args.checkpoint_number)
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
trainer.predict(model, test_dataset=test_dataset, prefix=str(global_step))
if __name__ == "__main__":
main()
|
[
"os.path.exists",
"torchblocks.data.Vocabulary",
"torchblocks.metrics.SequenceLabelingScore",
"os.makedirs",
"torchblocks.utils.dict_to_text",
"os.path.join",
"torchblocks.callback.TrainLogger",
"torchblocks.utils.seed_everything",
"torchblocks.utils.get_checkpoints",
"torchblocks.processor.InputExample",
"torchblocks.utils.prepare_device",
"torchblocks.utils.build_argparse"
] |
[((808, 820), 'torchblocks.data.Vocabulary', 'Vocabulary', ([], {}), '()\n', (818, 820), False, 'from torchblocks.data import Vocabulary, VOCAB_NAME\n'), ((839, 874), 'os.path.join', 'os.path.join', (['vocab_dir', 'VOCAB_NAME'], {}), '(vocab_dir, VOCAB_NAME)\n', (851, 874), False, 'import os\n'), ((883, 909), 'os.path.exists', 'os.path.exists', (['vocab_path'], {}), '(vocab_path)\n', (897, 909), False, 'import os\n'), ((3631, 3647), 'torchblocks.utils.build_argparse', 'build_argparse', ([], {}), '()\n', (3645, 3647), False, 'from torchblocks.utils import seed_everything, dict_to_text, build_argparse\n'), ((4027, 4070), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (4038, 4070), False, 'import os\n'), ((4160, 4211), 'torchblocks.callback.TrainLogger', 'TrainLogger', ([], {'log_dir': 'args.output_dir', 'prefix': 'prefix'}), '(log_dir=args.output_dir, prefix=prefix)\n', (4171, 4211), False, 'from torchblocks.callback import TrainLogger\n'), ((4299, 4340), 'torchblocks.utils.prepare_device', 'prepare_device', (['args.gpu', 'args.local_rank'], {}), '(args.gpu, args.local_rank)\n', (4313, 4340), False, 'from torchblocks.utils import prepare_device, get_checkpoints\n'), ((4424, 4450), 'torchblocks.utils.seed_everything', 'seed_everything', (['args.seed'], {}), '(args.seed)\n', (4439, 4450), False, 'from torchblocks.utils import seed_everything, dict_to_text, build_argparse\n'), ((7060, 7109), 'os.path.join', 'os.path.join', (['args.output_dir', '"""eval_results.txt"""'], {}), "(args.output_dir, 'eval_results.txt')\n", (7072, 7109), False, 'import os\n'), ((7119, 7158), 'torchblocks.utils.dict_to_text', 'dict_to_text', (['output_eval_file', 'results'], {}), '(output_eval_file, results)\n', (7131, 7158), False, 'from torchblocks.utils import seed_everything, dict_to_text, build_argparse\n'), ((7459, 7529), 'torchblocks.utils.get_checkpoints', 'get_checkpoints', (['args.output_dir', 'args.checkpoint_number', 'WEIGHTS_NAME'], {}), '(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)\n', (7474, 7529), False, 'from torchblocks.utils import prepare_device, get_checkpoints\n'), ((6396, 6466), 'torchblocks.utils.get_checkpoints', 'get_checkpoints', (['args.output_dir', 'args.checkpoint_number', 'WEIGHTS_NAME'], {}), '(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)\n', (6411, 6466), False, 'from torchblocks.utils import prepare_device, get_checkpoints\n'), ((3510, 3573), 'torchblocks.processor.InputExample', 'InputExample', ([], {'guid': 'guid', 'texts': '[text_a, None]', 'label_ids': 'labels'}), '(guid=guid, texts=[text_a, None], label_ids=labels)\n', (3522, 3573), False, 'from torchblocks.processor import SequenceLabelingProcessor, InputExample\n'), ((5700, 5751), 'torchblocks.metrics.SequenceLabelingScore', 'SequenceLabelingScore', (['id2label'], {'markup': 'args.markup'}), '(id2label, markup=args.markup)\n', (5721, 5751), False, 'from torchblocks.metrics import SequenceLabelingScore\n'), ((1073, 1101), 'os.path.join', 'os.path.join', (['data_dir', 'file'], {}), '(data_dir, file)\n', (1085, 1101), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 01:33:18 2019
@author: iqbalsublime
"""
from Customer import Customer
from Restaurent import Restaurent
from Reserve import Reserve
from Menu import Menu
from Order import Order
cust1= Customer(1,"Iqbal", "0167****671")
rest1= Restaurent(1,"Farmgate", "102 Kazi Nazrul Islam Ave, Dhaka")
reserve1=Reserve(1, "20-11-2019",cust1, rest1)
"""
print("******Reservation*******")
print("Reserve ID:{}, Date: {} Customer Name: {}, Mobile:{}, Branch: {}".format(reserve1.reserveid,
reserve1.date, reserve1.customer.name, reserve1.customer.mobile, reserve1.restaurent.bname))
#print(reserve1.description())
print("******Reservation*******")
"""
menu1= Menu(1,"Burger", 160,"Fast Food",4)
menu2= Menu(2,"Pizza", 560,"Fast Food",2)
menu3= Menu(3,"Biriani", 220,"Indian",1)
menu4= Menu(4,"Pitha", 50,"Bangla",5)
order1= Order(1,"20-11-2019", cust1)
order1.addMenu(menu1)
order1.addMenu(menu2)
order1.addMenu(menu3)
order1.addMenu(menu4)
print("******Invoice*******")
print("Order ID:{}, Date: {} Customer Name: {}, Mobile:{}".format(order1.oid,
order1.date, order1.Customer.name, order1.Customer.mobile))
totalBill=0.0
serial=1
print("SL---Food----Price---Qy----total")
for order in order1.menus:
print(serial,order.name, order.price, order.quantity, (order.price*order.quantity))
totalBill=totalBill+(order.price*order.quantity)
print("Grand Total :", totalBill)
print("******Invoice*******")
|
[
"Customer.Customer",
"Order.Order",
"Menu.Menu",
"Reserve.Reserve",
"Restaurent.Restaurent"
] |
[((240, 275), 'Customer.Customer', 'Customer', (['(1)', '"""Iqbal"""', '"""0167****671"""'], {}), "(1, 'Iqbal', '0167****671')\n", (248, 275), False, 'from Customer import Customer\n'), ((282, 343), 'Restaurent.Restaurent', 'Restaurent', (['(1)', '"""Farmgate"""', '"""102 Kazi Nazrul Islam Ave, Dhaka"""'], {}), "(1, 'Farmgate', '102 Kazi Nazrul Islam Ave, Dhaka')\n", (292, 343), False, 'from Restaurent import Restaurent\n'), ((352, 390), 'Reserve.Reserve', 'Reserve', (['(1)', '"""20-11-2019"""', 'cust1', 'rest1'], {}), "(1, '20-11-2019', cust1, rest1)\n", (359, 390), False, 'from Reserve import Reserve\n'), ((704, 742), 'Menu.Menu', 'Menu', (['(1)', '"""Burger"""', '(160)', '"""Fast Food"""', '(4)'], {}), "(1, 'Burger', 160, 'Fast Food', 4)\n", (708, 742), False, 'from Menu import Menu\n'), ((747, 784), 'Menu.Menu', 'Menu', (['(2)', '"""Pizza"""', '(560)', '"""Fast Food"""', '(2)'], {}), "(2, 'Pizza', 560, 'Fast Food', 2)\n", (751, 784), False, 'from Menu import Menu\n'), ((789, 825), 'Menu.Menu', 'Menu', (['(3)', '"""Biriani"""', '(220)', '"""Indian"""', '(1)'], {}), "(3, 'Biriani', 220, 'Indian', 1)\n", (793, 825), False, 'from Menu import Menu\n'), ((830, 863), 'Menu.Menu', 'Menu', (['(4)', '"""Pitha"""', '(50)', '"""Bangla"""', '(5)'], {}), "(4, 'Pitha', 50, 'Bangla', 5)\n", (834, 863), False, 'from Menu import Menu\n'), ((871, 900), 'Order.Order', 'Order', (['(1)', '"""20-11-2019"""', 'cust1'], {}), "(1, '20-11-2019', cust1)\n", (876, 900), False, 'from Order import Order\n')]
|
# -*- test-case-name: twisted.logger.test.test_io -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
File-like object that logs.
"""
import sys
from typing import AnyStr, Iterable, Optional
from constantly import NamedConstant
from incremental import Version
from twisted.python.deprecate import deprecatedProperty
from ._levels import LogLevel
from ._logger import Logger
class LoggingFile:
"""
File-like object that turns C{write()} calls into logging events.
Note that because event formats are L{str}, C{bytes} received via C{write()}
are converted to C{str}, which is the opposite of what C{file} does.
@ivar softspace: Attribute to make this class more file-like under Python 2;
value is zero or one. Do not use.
"""
_softspace = 0
@deprecatedProperty(Version("Twisted", 21, 2, 0))
def softspace(self):
return self._softspace
@softspace.setter # type: ignore[no-redef]
def softspace(self, value):
self._softspace = value
def __init__(
self,
logger: Logger,
level: NamedConstant = LogLevel.info,
encoding: Optional[str] = None,
) -> None:
"""
@param logger: the logger to log through.
@param level: the log level to emit events with.
@param encoding: The encoding to expect when receiving bytes via
C{write()}. If L{None}, use C{sys.getdefaultencoding()}.
"""
self.level = level
self.log = logger
if encoding is None:
self._encoding = sys.getdefaultencoding()
else:
self._encoding = encoding
self._buffer = ""
self._closed = False
@property
def closed(self) -> bool:
"""
Read-only property. Is the file closed?
@return: true if closed, otherwise false.
"""
return self._closed
@property
def encoding(self) -> str:
"""
Read-only property. File encoding.
@return: an encoding.
"""
return self._encoding
@property
def mode(self) -> str:
"""
Read-only property. File mode.
@return: "w"
"""
return "w"
@property
def newlines(self) -> None:
"""
Read-only property. Types of newlines encountered.
@return: L{None}
"""
return None
@property
def name(self) -> str:
"""
The name of this file; a repr-style string giving information about its
namespace.
@return: A file name.
"""
return "<{} {}#{}>".format(
self.__class__.__name__,
self.log.namespace,
self.level.name,
)
def close(self) -> None:
"""
Close this file so it can no longer be written to.
"""
self._closed = True
def flush(self) -> None:
"""
No-op; this file does not buffer.
"""
pass
def fileno(self) -> int:
"""
Returns an invalid file descriptor, since this is not backed by an FD.
@return: C{-1}
"""
return -1
def isatty(self) -> bool:
"""
A L{LoggingFile} is not a TTY.
@return: C{False}
"""
return False
def write(self, message: AnyStr) -> None:
"""
Log the given message.
@param message: The message to write.
"""
if self._closed:
raise ValueError("I/O operation on closed file")
if isinstance(message, bytes):
text = message.decode(self._encoding)
else:
text = message
lines = (self._buffer + text).split("\n")
self._buffer = lines[-1]
lines = lines[0:-1]
for line in lines:
self.log.emit(self.level, format="{log_io}", log_io=line)
def writelines(self, lines: Iterable[AnyStr]) -> None:
"""
Log each of the given lines as a separate message.
@param lines: Data to write.
"""
for line in lines:
self.write(line)
def _unsupported(self, *args: object) -> None:
"""
Template for unsupported operations.
@param args: Arguments.
"""
raise OSError("unsupported operation")
read = _unsupported
next = _unsupported
readline = _unsupported
readlines = _unsupported
xreadlines = _unsupported
seek = _unsupported
tell = _unsupported
truncate = _unsupported
|
[
"sys.getdefaultencoding",
"incremental.Version"
] |
[((838, 866), 'incremental.Version', 'Version', (['"""Twisted"""', '(21)', '(2)', '(0)'], {}), "('Twisted', 21, 2, 0)\n", (845, 866), False, 'from incremental import Version\n'), ((1581, 1605), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (1603, 1605), False, 'import sys\n')]
|
from dataclasses import dataclass
# from pprint import pprint
import aiohttp
import discord
from discord.ext import commands
from bot import constants
API_URL = "https://livescore6.p.rapidapi.com/matches/v2/"
LIVE_MATCHES_URL = API_URL + "list-live"
HEADERS = {
"x-rapidapi-key": constants.RAPIDAPI_KEY,
"x-rapidapi-host": constants.RAPIDAPI_LIVESCORE6_HOST,
}
@dataclass
class CricketMatch:
format: str
match_no: str
teams: tuple[str, str]
summary: str
scores: dict
status: str
_eid: str
class Cricket(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@staticmethod
def get_live_matches_list_embed(matches: list[CricketMatch]) -> discord.Embed:
embed = discord.Embed(title="Current Live Matches:", colour=discord.Colour.random())
for match in matches:
match_info = f"""\
{match.teams[0]}: {match.scores['T1I1']}
{match.teams[1]}: {match.scores['T2I1']}
"""
if "test" in match.format.lower():
match_info += f"""\
{match.teams[0]}: {match.scores['T1I2']}
{match.teams[1]}: {match.scores['T2I2']}
"""
match_info += f"""\
{match.summary}
{match.status}
"""
embed.add_field(
name="{} vs {}: {}".format(*match.teams, match.match_no or match.format), value=match_info, inline=False
)
return embed
@commands.command()
async def live_scores(self, ctx: commands.Context) -> None:
"""Sends information about ongoing cricket matches."""
querystring = {"Category": "cricket"}
async with aiohttp.ClientSession() as session:
async with session.get(
LIVE_MATCHES_URL, headers=HEADERS, params=querystring
) as response:
response = await response.json()
# pprint(response)
if not response:
await ctx.send("No matches in progress currently!")
return
matches = [
CricketMatch(
format=match["EtTx"],
teams=(
match["T1"][0]["Nm"],
match["T2"][0]["Nm"],
),
summary=match["ECo"],
_eid=match["Eid"],
status=match["EpsL"],
scores={
"T1I1": f"{match.get('Tr1C1', '-')}/"
f"{match.get('Tr1CW1', '-')} "
f"({match.get('Tr1CO1', '-')})",
"T2I1": f"{match.get('Tr2C1', '-')}/"
f"{match.get('Tr2CW1', '-')} "
f"({match.get('Tr2CO1', '-')})",
"T1I2": f"{match.get('Tr1C2', '-')}/"
f"{match.get('Tr1CW2', '-')} "
f"({match.get('Tr1CO2', '-')})",
"T2I2": f"{match.get('Tr2C2', '-')}/"
f"{match.get('Tr2CW2', '-')} "
f"({match.get('Tr2CO2', '-')})",
},
match_no=match.get("ErnInf", ""),
)
for match in map(lambda m: m["Events"][0], response["Stages"])
]
await ctx.send(embed=self.get_live_matches_list_embed(matches))
def setup(bot: commands.Bot):
"""Add Cricket Cog."""
bot.add_cog(Cricket(bot))
|
[
"aiohttp.ClientSession",
"discord.ext.commands.command",
"discord.Colour.random"
] |
[((1400, 1418), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1416, 1418), False, 'from discord.ext import commands\n'), ((1612, 1635), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1633, 1635), False, 'import aiohttp\n'), ((798, 821), 'discord.Colour.random', 'discord.Colour.random', ([], {}), '()\n', (819, 821), False, 'import discord\n')]
|
import os
import os.path
from datetime import datetime
import time
from stat import *
import pathlib
import json
def generateFileManifest(filename, manifest_filename=None):
string = ""
data = {}
if os.path.isfile(filename):
f = pathlib.Path(filename)
data[os.path.abspath(filename)] = {
'ctime': [str(f.stat().st_ctime), str(datetime.fromtimestamp(f.stat().st_ctime))],
'mtime':[str(f.stat().st_mtime), str(datetime.fromtimestamp(f.stat().st_mtime))]
}
json_data = json.dumps(data)
if manifest_filename != None:
with open(manifest_filename, "w+") as manifest_file:
manifest_file.write(json_data)
else:
print ("skipping bad filename: {}".format(filename))
return data
|
[
"os.path.isfile",
"json.dumps",
"os.path.abspath",
"pathlib.Path"
] |
[((212, 236), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (226, 236), False, 'import os\n'), ((250, 272), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (262, 272), False, 'import pathlib\n'), ((565, 581), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (575, 581), False, 'import json\n'), ((287, 312), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (302, 312), False, 'import os\n')]
|
import unittest
import os
from six import StringIO
from package_manager import util
CHECKSUM_TXT = "1915adb697103d42655711e7b00a7dbe398a33d7719d6370c01001273010d069"
DEBIAN_JESSIE_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="8"
VERSION="Debian GNU/Linux 8 (jessie)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_STRETCH_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="9"
VERSION="Debian GNU/Linux 9 (stretch)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_BUSTER_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="10"
VERSION="Debian GNU/Linux 10 (buster)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
# VERSION and VERSION_ID aren't set on unknown distros
DEBIAN_UNKNOWN_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
osReleaseForDistro = {
"jessie": DEBIAN_JESSIE_OS_RELEASE,
"stretch": DEBIAN_STRETCH_OS_RELEASE,
"buster": DEBIAN_BUSTER_OS_RELEASE,
"???": DEBIAN_UNKNOWN_OS_RELEASE,
}
class TestUtil(unittest.TestCase):
def test_sha256(self):
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'testdata', 'checksum.txt')
actual = util.sha256_checksum(filename)
self.assertEqual(CHECKSUM_TXT, actual)
def test_generate_debian_os_release(self):
for distro, expected_output in osReleaseForDistro.items():
output_file = StringIO()
util.generate_os_release(distro, output_file)
self.assertEqual(expected_output, output_file.getvalue())
if __name__ == '__main__':
unittest.main()
|
[
"os.path.join",
"package_manager.util.sha256_checksum",
"os.path.dirname",
"package_manager.util.generate_os_release",
"six.StringIO",
"unittest.main"
] |
[((2461, 2476), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2474, 2476), False, 'import unittest\n'), ((1955, 1980), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1970, 1980), False, 'import os\n'), ((2000, 2053), 'os.path.join', 'os.path.join', (['current_dir', '"""testdata"""', '"""checksum.txt"""'], {}), "(current_dir, 'testdata', 'checksum.txt')\n", (2012, 2053), False, 'import os\n'), ((2071, 2101), 'package_manager.util.sha256_checksum', 'util.sha256_checksum', (['filename'], {}), '(filename)\n', (2091, 2101), False, 'from package_manager import util\n'), ((2290, 2300), 'six.StringIO', 'StringIO', ([], {}), '()\n', (2298, 2300), False, 'from six import StringIO\n'), ((2313, 2358), 'package_manager.util.generate_os_release', 'util.generate_os_release', (['distro', 'output_file'], {}), '(distro, output_file)\n', (2337, 2358), False, 'from package_manager import util\n')]
|
# Generated by Django 2.2.1 on 2022-02-25 15:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mutational_landscape', '0002_auto_20180117_1457'),
]
operations = [
migrations.RemoveField(
model_name='diseasemutations',
name='protein',
),
migrations.RemoveField(
model_name='diseasemutations',
name='residue',
),
migrations.DeleteModel(
name='CancerMutations',
),
migrations.DeleteModel(
name='DiseaseMutations',
),
]
|
[
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField"
] |
[((240, 309), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""diseasemutations"""', 'name': '"""protein"""'}), "(model_name='diseasemutations', name='protein')\n", (262, 309), False, 'from django.db import migrations\n'), ((354, 423), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""diseasemutations"""', 'name': '"""residue"""'}), "(model_name='diseasemutations', name='residue')\n", (376, 423), False, 'from django.db import migrations\n'), ((468, 514), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""CancerMutations"""'}), "(name='CancerMutations')\n", (490, 514), False, 'from django.db import migrations\n'), ((547, 594), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""DiseaseMutations"""'}), "(name='DiseaseMutations')\n", (569, 594), False, 'from django.db import migrations\n')]
|
# Apache License Version 2.0
#
# Copyright (c) 2021., Redis Labs
# All rights reserved.
#
# This attribute is the only one place that the version number is written down,
# so there is only one place to change it when the version number changes.
import pkg_resources
PKG_NAME = "redis-benchmarks-specification"
try:
__version__ = pkg_resources.get_distribution(PKG_NAME).version
except (pkg_resources.DistributionNotFound, AttributeError):
__version__ = "99.99.99" # like redis
|
[
"pkg_resources.get_distribution"
] |
[((338, 378), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['PKG_NAME'], {}), '(PKG_NAME)\n', (368, 378), False, 'import pkg_resources\n')]
|
import requests
from utils import loginFile, dataAnalysis
import os
import datetime
from dateutil.relativedelta import relativedelta
import json
from utils.logCls import Logger
dirpath = os.path.dirname(__file__)
cookieFile = f"{dirpath}/utils/cookies.txt"
dataFile = f"{dirpath}/datas"
class DevopsProject:
def __init__(self, logFileName):
# 初始化搜索起始与截止时间
self.endDate = datetime.datetime.today().date()
self.startDate = self.endDate - relativedelta(months=+1)
# log日志
self.logger = Logger("[告警信息通报({}-{})]".format(self.startDate, self.endDate), logFileName)
def _load_cookies(self):
print("----------_load_cookies----------")
# 加载cookie
if not os.path.exists(cookieFile):
return False
# 3、判断cookies是否过期
try:
with open(cookieFile, "r")as f:
cookies = f.read()
if self.login_check(cookies):
return cookies
else:
return
except Exception as e:
print(e.args)
os.remove(cookieFile)
self.logger.get_log().debug("[cookies过期]")
return False
def login_check(self, cookies):
# cookie验证是否有效
self.logger.get_log().debug("[正在验证cookie]")
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': cookies,
'Host': 'xt.devops123.net',
'Referer': 'http://xt.devops123.net/Welcome/login/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
}
checkUrl = "http://xt.devops123.net/portal/substation_list/991"
response = requests.get(checkUrl, headers=headers)
if response.status_code == 200:
if "管理面板" in response.text:
self.logger.get_log().debug("[加载cookie成功]")
return True
else:
self.logger.get_log().debug("[加载失败, 正在进行登录]")
return False
raise response.raise_for_status()
def login(self):
# 登录
cookies = self._load_cookies()
if cookies:
return cookies
cookies = loginFile.loginDevops().login()
return cookies
def getReportData(self, cookies):
self.logger.get_log().debug("[正在搜索告警信息]")
self.searchTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 搜索告警信息
downloadUrl = "http://xt.devops123.net/alarm?selCity=&selCounty=0&selSubstation=&selRoom=&level=1&selDevModel=&selStatus%5B%5D=unresolved&reportDate={}%E8%87%B3{}&selSignalName=&substationType%5B%5D=A%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=B%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=C%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=D%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=D1%E7%BA%A7%E5%B1%80%E7%AB%99&word=&export=exporttoexcel"
headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Referer': 'http://xt.devops123.net/alarm?level=1',
'Cookie': cookies,
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6'
}
response = requests.get(downloadUrl.format(str(self.startDate), str(self.endDate)), headers=headers)
return response.text
def getDingDingInfo(self, cityName):
# 加载钉钉机器人信息
with open("utils/dingdingRobotInfo.json", "r", encoding="utf-8")as f:
robotInfo = json.loads(f.read())
if cityName in list(robotInfo.keys()):
SECRET = robotInfo.get(cityName)[0]
WEBHOOK = robotInfo.get(cityName)[1]
return SECRET, WEBHOOK
else:
self.logger.get_log().debug("[没有该{}对应的钉钉信息,请检查dingdingRobotInfo.json文件]".format(cityName))
return
def detail_data(self, dataList, monitorInfo, warn5=False, byhour=False):
if warn5:
for data in dataList:
k, group = data
SECRET, WEBHOOK = self.getDingDingInfo(k)
htmlPath = dataAnalysis.data2html(k, group, dataFile, k2="超过5天告警信息汇总")
imgFile = dataAnalysis.html2image(htmlPath)
imgUrl = dataAnalysis.img2url(imgFile)
sendTitle = f"{k}-{'超过5天告警信息汇总'}\n\n- 数据提取时间:{self.searchTime}\n- 上报时间段:\t{self.startDate}至{self.endDate} \n"
sendText = sendTitle + "\n".join(
[f"- {k}:\t{v}条" for k, v in group.groupby("信号名称")["信号名称"].count().sort_values(ascending=False).to_dict().items()])
yield k, SECRET, WEBHOOK, imgUrl, sendText
else:
for data in dataList:
k, group = data
if byhour:
group = group.loc[group["信号名称"].isin(monitorInfo)]
SECRET, WEBHOOK = self.getDingDingInfo(k)
htmlPath = dataAnalysis.data2html(k, group, dataFile)
imgFile = dataAnalysis.html2image(htmlPath)
imgUrl = dataAnalysis.img2url(imgFile)
sendText = "\n".join([f"- {k}:\t{v}条" for k, v in group.groupby("区域")["区域"].count().to_dict().items()])
yield k, SECRET, WEBHOOK, imgUrl, sendText
def reportTotal(self, totalInfo, monitorInfo):
self.logger.get_log().debug("正在汇总信息...")
cityNames = ["乌鲁木齐", "昌吉", "吐鲁番", "奎屯", "博州", "哈密", "塔城", "阿勒泰", "伊犁", "巴州",
"和田", "阿克苏", "石河子", "喀什", "克州", "克拉玛依"]
totalSendTextByCity = {}
summaryInfo = dataAnalysis.dataSummary(totalInfo)
for city in cityNames:
summaryText = "\n".join([f"- {k} : {v}条" for k, v in summaryInfo.get(city, {}).items() if k in monitorInfo])
if summaryText:
totalSendText = f"{self.startDate}至{self.endDate}\n- #告警消息汇总#\n- 数据提取时间:{self.searchTime}\n- #按照信号名称汇总如下#\n" + summaryText
else:
totalSendText = f"{self.startDate}至{self.endDate}\n- 数据提取时间:{self.searchTime}\n" + "无告警信息."
totalSendTextByCity[city] = totalSendText
return totalSendTextByCity
def monitorByHour(self):
try:
monitorInfo = ["通信状态", "烟感", "温度", "交流输入停电警告", "交流输入停电告警", "蓄电池组总电压过低", "水浸", "电池熔丝故障告警", "蓄电池总电压过高"]
self.logger.get_log().debug("[正在登录]")
new_cookie = self.login()
# 获取excel的xml
self.logger.get_log().debug("[进入【温度】【交流输入停电告警】【蓄电池组总电压过低】监控...(监控频率:每小时一次)]")
xmlData = self.getReportData(new_cookie)
# 分析xml
if dataAnalysis.parseData(xmlData, dataFile):
totalInfo, warn5days, dataList = dataAnalysis.parseData(xmlData, dataFile, byhour=True)
totalSendTextByCity = self.reportTotal(totalInfo, monitorInfo)
self.logger.get_log().debug("[发送告警信息]")
for k, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(dataList, monitorInfo, byhour=True):
totalSendText = totalSendTextByCity.get(k)
if "无告警信息" in totalSendText:
dataAnalysis.sendMessage(SECRET, WEBHOOK, totalSendText, imgUrl="")
self.logger.get_log().debug(totalSendText)
else:
sendTextTotal = f"{totalSendText}\n{'- #按照县汇总如下#'}\n{sendText}"
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendTextTotal, imgUrl)
self.logger.get_log().debug(sendTextTotal)
self.logger.get_log().debug("[告警信息发送结束]")
dataAnalysis.clearDir(dataFile)
except Exception as e:
self.logger.get_log().debug(e.args)
def monitorByDay(self):
try:
self.logger.get_log().debug("[进入【通信状态】【烟感】【水浸】【电池熔丝故障告警】【蓄电池总电压过高】【手动控制状态】【启动电池电压低】监控...(监控频率:每天一次)]")
monitorInfo = ["通信状态", "烟感", "水浸", "电池熔丝故障告警", "蓄电池总电压过高", "手动控制状态", "启动电池电压低", "交流输入停电警告", "交流输入停电告警", "温度",
"蓄电池组总电压过低"]
new_cookie = self.login()
# 获取excel的xml
xmlData = self.getReportData(new_cookie)
# 分析xml
if dataAnalysis.parseData(xmlData, dataFile):
totalInfo, warn5days, dataList = dataAnalysis.parseData(xmlData, dataFile)
totalSendTextByCity = self.reportTotal(totalInfo, monitorInfo)
self.logger.get_log().debug("[汇总告警时间超过5天的信息]")
for k, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(warn5days, monitorInfo, warn5=True):
self.logger.get_log().debug(sendText)
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendText, imgUrl)
self.logger.get_log().debug("[汇总告警信息]")
for k1, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(dataList, monitorInfo):
totalSendText = totalSendTextByCity.get(k1)
if "无告警信息" in totalSendText:
dataAnalysis.sendMessage(SECRET, WEBHOOK, totalSendText, imgUrl="")
self.logger.get_log().debug(totalSendText)
else:
sendTextTotal = f"{totalSendText}\n{'- #按照县汇总如下#'}\n{sendText}"
self.logger.get_log().debug(sendTextTotal)
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendTextTotal, imgUrl)
self.logger.get_log().debug("告警信息发送结束")
except Exception as e:
self.logger.get_log().debug(e.args)
def main(self):
# 主函数
self.monitorByDay()
# self.monitorByHour()
if __name__ == '__main__':
demo = DevopsProject("test")
demo.main()
|
[
"os.path.exists",
"dateutil.relativedelta.relativedelta",
"utils.dataAnalysis.sendMessage",
"utils.dataAnalysis.dataSummary",
"requests.get",
"os.path.dirname",
"datetime.datetime.now",
"utils.dataAnalysis.parseData",
"utils.loginFile.loginDevops",
"utils.dataAnalysis.data2html",
"utils.dataAnalysis.img2url",
"datetime.datetime.today",
"utils.dataAnalysis.html2image",
"utils.dataAnalysis.clearDir",
"os.remove"
] |
[((188, 213), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (203, 213), False, 'import os\n'), ((2102, 2141), 'requests.get', 'requests.get', (['checkUrl'], {'headers': 'headers'}), '(checkUrl, headers=headers)\n', (2114, 2141), False, 'import requests\n'), ((6279, 6314), 'utils.dataAnalysis.dataSummary', 'dataAnalysis.dataSummary', (['totalInfo'], {}), '(totalInfo)\n', (6303, 6314), False, 'from utils import loginFile, dataAnalysis\n'), ((469, 493), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (482, 493), False, 'from dateutil.relativedelta import relativedelta\n'), ((723, 749), 'os.path.exists', 'os.path.exists', (['cookieFile'], {}), '(cookieFile)\n', (737, 749), False, 'import os\n'), ((7301, 7342), 'utils.dataAnalysis.parseData', 'dataAnalysis.parseData', (['xmlData', 'dataFile'], {}), '(xmlData, dataFile)\n', (7323, 7342), False, 'from utils import loginFile, dataAnalysis\n'), ((8305, 8336), 'utils.dataAnalysis.clearDir', 'dataAnalysis.clearDir', (['dataFile'], {}), '(dataFile)\n', (8326, 8336), False, 'from utils import loginFile, dataAnalysis\n'), ((8888, 8929), 'utils.dataAnalysis.parseData', 'dataAnalysis.parseData', (['xmlData', 'dataFile'], {}), '(xmlData, dataFile)\n', (8910, 8929), False, 'from utils import loginFile, dataAnalysis\n'), ((396, 421), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (419, 421), False, 'import datetime\n'), ((1078, 1099), 'os.remove', 'os.remove', (['cookieFile'], {}), '(cookieFile)\n', (1087, 1099), False, 'import os\n'), ((2602, 2625), 'utils.loginFile.loginDevops', 'loginFile.loginDevops', ([], {}), '()\n', (2623, 2625), False, 'from utils import loginFile, dataAnalysis\n'), ((2773, 2796), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2794, 2796), False, 'import datetime\n'), ((4830, 4889), 'utils.dataAnalysis.data2html', 'dataAnalysis.data2html', (['k', 'group', 'dataFile'], {'k2': '"""超过5天告警信息汇总"""'}), "(k, group, dataFile, k2='超过5天告警信息汇总')\n", (4852, 4889), False, 'from utils import loginFile, dataAnalysis\n'), ((4916, 4949), 'utils.dataAnalysis.html2image', 'dataAnalysis.html2image', (['htmlPath'], {}), '(htmlPath)\n', (4939, 4949), False, 'from utils import loginFile, dataAnalysis\n'), ((4975, 5004), 'utils.dataAnalysis.img2url', 'dataAnalysis.img2url', (['imgFile'], {}), '(imgFile)\n', (4995, 5004), False, 'from utils import loginFile, dataAnalysis\n'), ((5640, 5682), 'utils.dataAnalysis.data2html', 'dataAnalysis.data2html', (['k', 'group', 'dataFile'], {}), '(k, group, dataFile)\n', (5662, 5682), False, 'from utils import loginFile, dataAnalysis\n'), ((5709, 5742), 'utils.dataAnalysis.html2image', 'dataAnalysis.html2image', (['htmlPath'], {}), '(htmlPath)\n', (5732, 5742), False, 'from utils import loginFile, dataAnalysis\n'), ((5768, 5797), 'utils.dataAnalysis.img2url', 'dataAnalysis.img2url', (['imgFile'], {}), '(imgFile)\n', (5788, 5797), False, 'from utils import loginFile, dataAnalysis\n'), ((7393, 7447), 'utils.dataAnalysis.parseData', 'dataAnalysis.parseData', (['xmlData', 'dataFile'], {'byhour': '(True)'}), '(xmlData, dataFile, byhour=True)\n', (7415, 7447), False, 'from utils import loginFile, dataAnalysis\n'), ((8980, 9021), 'utils.dataAnalysis.parseData', 'dataAnalysis.parseData', (['xmlData', 'dataFile'], {}), '(xmlData, dataFile)\n', (9002, 9021), False, 'from utils import loginFile, dataAnalysis\n'), ((9357, 9416), 'utils.dataAnalysis.sendMessage', 'dataAnalysis.sendMessage', (['SECRET', 'WEBHOOK', 'sendText', 'imgUrl'], {}), '(SECRET, WEBHOOK, sendText, imgUrl)\n', (9381, 9416), False, 'from utils import loginFile, dataAnalysis\n'), ((7833, 7900), 'utils.dataAnalysis.sendMessage', 'dataAnalysis.sendMessage', (['SECRET', 'WEBHOOK', 'totalSendText'], {'imgUrl': '""""""'}), "(SECRET, WEBHOOK, totalSendText, imgUrl='')\n", (7857, 7900), False, 'from utils import loginFile, dataAnalysis\n'), ((8106, 8170), 'utils.dataAnalysis.sendMessage', 'dataAnalysis.sendMessage', (['SECRET', 'WEBHOOK', 'sendTextTotal', 'imgUrl'], {}), '(SECRET, WEBHOOK, sendTextTotal, imgUrl)\n', (8130, 8170), False, 'from utils import loginFile, dataAnalysis\n'), ((9713, 9780), 'utils.dataAnalysis.sendMessage', 'dataAnalysis.sendMessage', (['SECRET', 'WEBHOOK', 'totalSendText'], {'imgUrl': '""""""'}), "(SECRET, WEBHOOK, totalSendText, imgUrl='')\n", (9737, 9780), False, 'from utils import loginFile, dataAnalysis\n'), ((10053, 10117), 'utils.dataAnalysis.sendMessage', 'dataAnalysis.sendMessage', (['SECRET', 'WEBHOOK', 'sendTextTotal', 'imgUrl'], {}), '(SECRET, WEBHOOK, sendTextTotal, imgUrl)\n', (10077, 10117), False, 'from utils import loginFile, dataAnalysis\n')]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright (c) 2018 - huwei <<EMAIL>>
"""
This is a python script for the ezalor tools which is used to io monitor.
You can use the script to open or off the switch, or point the package name which you want to monitor it only.
The core function is to export data what ezalor is record.
"""
import os
import re
import sys, getopt
import sqlite3
import subprocess
import xlsxwriter as xw
from markhelper import MarkHelper
from record import Record
from style import Style
from datetime import datetime
DB_NAME_REG = "^ezalor_{0}(.*).db$"
tableheaders = ["path", "process", "thread", "processId", "threadId",
"readCount", "readBytes", "readTime", "writeCount", "writeBytes", "writeTime", "stacktrace",
"openTime", "closeTime", "mark"]
envDir = "/sdcard/ezalor/"
AUTOCOLUMN_WIDTH_INDEXS = [0, 1, 2, 12, 13, 14]
def print_help_and_exit():
print("\n"
"This is a python script for the ezalor tools which is used to io monitor.You can use the script to open or\n"
"off the switch, or point the package name which you want to monitor it only.The core function is to export\n"
"data what ezalor is record.\n"
"\n"
"Usage : ezalor [Options] [Args]\n"
"\n"
" Options:\n"
" -h, --help :Print the message and exit\n"
" -e, --export [packageName] [exportPath] :export a html to the path\n"
"\n"
" Examples:\n"
" ezalor -e com.wellerv.ezalor.sample export excel\n"
)
sys.exit(0)
def write_to_file(path, content):
if ("." == path):
htmlPath = "export.html"
else:
htmlPath = path + "export.html"
fo = open(htmlPath, "w")
fo.write(content)
fo.close()
return htmlPath
def export(packageName, path):
print("export to path:" + path + " begin.")
workbook = xw.Workbook(path)
# style
style = Style(workbook)
worksheet = workbook.add_worksheet("ioHistory")
# get process by packageName
processes = get_process_by_package(packageName)
# init column_max_width_array
column_max_width_array = [0] * len(AUTOCOLUMN_WIDTH_INDEXS)
# loop create table group by process
row = 0
for process in processes:
row = create_table(worksheet, style, process, row, get_data_by_process(packageName, process),
column_max_width_array)
# auto fit column width
auto_fit_column_width(worksheet, column_max_width_array)
workbook.close()
print("\nexport successful:" + path)
def auto_fit_column_width(worksheet, column_max_width_array):
# set column width
for j in range(len(column_max_width_array)):
worksheet.set_column(AUTOCOLUMN_WIDTH_INDEXS[j], AUTOCOLUMN_WIDTH_INDEXS[j], column_max_width_array[j])
def get_data_by_process(packageName, process):
# pull db file from mobile
os.system("adb pull /sdcard/ezalor/" + packageName + "/ezalor_" + process + ".db ezalor.db")
# fetch data from db file
cursor = get_cursor("ezalor.db")
cursor.execute("select * from iohistory")
results = cursor.fetchall()
# clear db file
os.remove("ezalor.db")
return results
def create_table(worksheet, style, process, row, data, column_max_width_array):
# write a title of table
worksheet.set_row(row, 24)
worksheet.merge_range(row, 0, row, 14, process + " ioHistory", style.title)
row += 1
# write headers of table
for index, item in enumerate(tableheaders):
worksheet.write(row, index, tableheaders[index], style.table_headers)
row += 1
for recordFieldValues in data:
# fill the mark
record = Record(recordFieldValues)
mark = MarkHelper.get_io_mark(record, style)
for column, columnValue in enumerate(recordFieldValues):
value = get_value(column, recordFieldValues)
worksheet.write(row, column, value, mark.style)
# get max width
if (column in AUTOCOLUMN_WIDTH_INDEXS):
i = AUTOCOLUMN_WIDTH_INDEXS.index(column)
column_max_width_array[i] = max(column_max_width_array[i], len(value))
# write mark
column += 1
if (column in AUTOCOLUMN_WIDTH_INDEXS):
i = AUTOCOLUMN_WIDTH_INDEXS.index(column)
column_max_width_array[i] = max(column_max_width_array[i], len(mark.message))
worksheet.write(row, column, mark.message, mark.style)
row += 1
return row
def get_value(column, record):
if column == 13 or column == 12:
java_timestamp = record[column]
return datetime.fromtimestamp(java_timestamp / 1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return record[column]
def get_process_by_package(packageName):
# exec adb shell ls
dbDir = envDir + packageName
results = subprocess.getstatusoutput("adb shell ls " + dbDir)
# get db fileName by reg
files = []
if (results[0] == 0):
for file in results[1].split("\n"):
print(file)
if (re.match(DB_NAME_REG.format(packageName), file)):
files.append(re.findall(r"ezalor_(.+?).db", file)[0])
return files
# os.system("rm " + path + "ezalor.db")
def get_cursor(dbpath):
conn = sqlite3.connect(dbpath)
return conn.cursor()
def main(argv):
try:
opts, args = getopt.getopt(argv, "hs:e:", ["help", "switch", "export"])
except getopt.GetoptError:
print_help_and_exit()
if len(opts) == 0:
print_help_and_exit()
return
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help_and_exit()
elif opt in ("-e", "--export"):
if (len(arg) == 0):
print_help_and_exit()
packageName = arg
filename = packageName + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ".xlsx"
outPath = filename if len(args) == 0 \
else args[0] + "/" + filename
export(packageName, outPath)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"record.Record",
"markhelper.MarkHelper.get_io_mark",
"getopt.getopt",
"datetime.datetime.fromtimestamp",
"sqlite3.connect",
"subprocess.getstatusoutput",
"style.Style",
"datetime.datetime.now",
"sys.exit",
"os.system",
"re.findall",
"xlsxwriter.Workbook",
"os.remove"
] |
[((1644, 1655), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1652, 1655), False, 'import sys, getopt\n'), ((1982, 1999), 'xlsxwriter.Workbook', 'xw.Workbook', (['path'], {}), '(path)\n', (1993, 1999), True, 'import xlsxwriter as xw\n'), ((2024, 2039), 'style.Style', 'Style', (['workbook'], {}), '(workbook)\n', (2029, 2039), False, 'from style import Style\n'), ((2998, 3094), 'os.system', 'os.system', (["('adb pull /sdcard/ezalor/' + packageName + '/ezalor_' + process +\n '.db ezalor.db')"], {}), "('adb pull /sdcard/ezalor/' + packageName + '/ezalor_' + process +\n '.db ezalor.db')\n", (3007, 3094), False, 'import os\n'), ((3260, 3282), 'os.remove', 'os.remove', (['"""ezalor.db"""'], {}), "('ezalor.db')\n", (3269, 3282), False, 'import os\n'), ((4949, 5000), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (["('adb shell ls ' + dbDir)"], {}), "('adb shell ls ' + dbDir)\n", (4975, 5000), False, 'import subprocess\n'), ((5371, 5394), 'sqlite3.connect', 'sqlite3.connect', (['dbpath'], {}), '(dbpath)\n', (5386, 5394), False, 'import sqlite3\n'), ((3782, 3807), 'record.Record', 'Record', (['recordFieldValues'], {}), '(recordFieldValues)\n', (3788, 3807), False, 'from record import Record\n'), ((3823, 3860), 'markhelper.MarkHelper.get_io_mark', 'MarkHelper.get_io_mark', (['record', 'style'], {}), '(record, style)\n', (3845, 3860), False, 'from markhelper import MarkHelper\n'), ((5468, 5526), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hs:e:"""', "['help', 'switch', 'export']"], {}), "(argv, 'hs:e:', ['help', 'switch', 'export'])\n", (5481, 5526), False, 'import sys, getopt\n'), ((4723, 4770), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(java_timestamp / 1000.0)'], {}), '(java_timestamp / 1000.0)\n', (4745, 4770), False, 'from datetime import datetime\n'), ((5234, 5269), 're.findall', 're.findall', (['"""ezalor_(.+?).db"""', 'file'], {}), "('ezalor_(.+?).db', file)\n", (5244, 5269), False, 'import re\n'), ((5931, 5945), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5943, 5945), False, 'from datetime import datetime\n')]
|
import pytest
from moto import mock_ec2
from moto.ec2.models import AMIS
from aec.command.ami import delete, describe, share
@pytest.fixture
def mock_aws_config():
mock = mock_ec2()
mock.start()
return {
"region": "ap-southeast-2",
}
def test_describe_images(mock_aws_config):
# describe images defined by moto
# see https://github.com/spulec/moto/blob/master/moto/ec2/resources/amis.json
canonical_account_id = "099720109477"
mock_aws_config["describe_images_owners"] = canonical_account_id
images = describe(config=mock_aws_config)
assert len(images) == 2
assert images[0]["Name"] == "ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20170727"
assert images[1]["Name"] == "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20170721"
def test_describe_images_name_match(mock_aws_config):
# describe images defined by moto
# see https://github.com/spulec/moto/blob/master/moto/ec2/resources/amis.json
canonical_account_id = "099720109477"
mock_aws_config["describe_images_owners"] = canonical_account_id
images = describe(config=mock_aws_config, name_match="*trusty*")
assert len(images) == 1
assert images[0]["Name"] == "ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20170727"
def test_delete_image(mock_aws_config):
delete(mock_aws_config, AMIS[0]["ami_id"])
def test_share_image(mock_aws_config):
share(mock_aws_config, AMIS[0]["ami_id"], "123456789012")
|
[
"moto.mock_ec2",
"aec.command.ami.describe",
"aec.command.ami.delete",
"aec.command.ami.share"
] |
[((178, 188), 'moto.mock_ec2', 'mock_ec2', ([], {}), '()\n', (186, 188), False, 'from moto import mock_ec2\n'), ((551, 583), 'aec.command.ami.describe', 'describe', ([], {'config': 'mock_aws_config'}), '(config=mock_aws_config)\n', (559, 583), False, 'from aec.command.ami import delete, describe, share\n'), ((1109, 1164), 'aec.command.ami.describe', 'describe', ([], {'config': 'mock_aws_config', 'name_match': '"""*trusty*"""'}), "(config=mock_aws_config, name_match='*trusty*')\n", (1117, 1164), False, 'from aec.command.ami import delete, describe, share\n'), ((1338, 1380), 'aec.command.ami.delete', 'delete', (['mock_aws_config', "AMIS[0]['ami_id']"], {}), "(mock_aws_config, AMIS[0]['ami_id'])\n", (1344, 1380), False, 'from aec.command.ami import delete, describe, share\n'), ((1426, 1483), 'aec.command.ami.share', 'share', (['mock_aws_config', "AMIS[0]['ami_id']", '"""123456789012"""'], {}), "(mock_aws_config, AMIS[0]['ami_id'], '123456789012')\n", (1431, 1483), False, 'from aec.command.ami import delete, describe, share\n')]
|
import torch
from lib.utils import is_parallel
import numpy as np
np.set_printoptions(threshold=np.inf)
import cv2
from sklearn.cluster import DBSCAN
def build_targets(cfg, predictions, targets, model, bdd=True):
'''
predictions
[16, 3, 32, 32, 85]
[16, 3, 16, 16, 85]
[16, 3, 8, 8, 85]
torch.tensor(predictions[i].shape)[[3, 2, 3, 2]]
[32,32,32,32]
[16,16,16,16]
[8,8,8,8]
targets[3,x,7]
t [index, class, x, y, w, h, head_index]
'''
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
if bdd:
if is_parallel(model):
det = model.module.det_out_bdd
else:
det = model.det_out_bdd
else:
if is_parallel(model):
det = model.module.det_out_bosch
else:
det = model.det_out_bosch
# print(type(model))
# det = model.model[model.detector_index]
# print(type(det))
na, nt = det.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(det.nl):
anchors = det.anchors[i] #[3,2]
gain[2:6] = torch.tensor(predictions[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < cfg.TRAIN.ANCHOR_THRESHOLD # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
def morphological_process(image, kernel_size=5, func_type=cv2.MORPH_CLOSE):
"""
morphological process to fill the hole in the binary segmentation result
:param image:
:param kernel_size:
:return:
"""
if len(image.shape) == 3:
raise ValueError('Binary segmentation result image should be a single channel image')
if image.dtype is not np.uint8:
image = np.array(image, np.uint8)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
# close operation fille hole
closing = cv2.morphologyEx(image, func_type, kernel, iterations=1)
return closing
def connect_components_analysis(image):
"""
connect components analysis to remove the small components
:param image:
:return:
"""
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
# print(gray_image.dtype)
return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
def if_y(samples_x):
for sample_x in samples_x:
if len(sample_x):
# if len(sample_x) != (sample_x[-1] - sample_x[0] + 1) or sample_x[-1] == sample_x[0]:
if sample_x[-1] == sample_x[0]:
return False
return True
def fitlane(mask, sel_labels, labels, stats):
H, W = mask.shape
for label_group in sel_labels:
states = [stats[k] for k in label_group]
x, y, w, h, _ = states[0]
# if len(label_group) > 1:
# print('in')
# for m in range(len(label_group)-1):
# labels[labels == label_group[m+1]] = label_group[0]
t = label_group[0]
# samples_y = np.linspace(y, H-1, 30)
# else:
samples_y = np.linspace(y, y+h-1, 30)
samples_x = [np.where(labels[int(sample_y)]==t)[0] for sample_y in samples_y]
if if_y(samples_x):
samples_x = [int(np.mean(sample_x)) if len(sample_x) else -1 for sample_x in samples_x]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_y = samples_y[samples_x != -1]
samples_x = samples_x[samples_x != -1]
func = np.polyfit(samples_y, samples_x, 2)
x_limits = np.polyval(func, H-1)
# if (y_max + h - 1) >= 720:
if x_limits < 0 or x_limits > W:
# if (y_max + h - 1) > 720:
# draw_y = np.linspace(y, 720-1, 720-y)
draw_y = np.linspace(y, y+h-1, h)
else:
# draw_y = np.linspace(y, y+h-1, y+h-y)
draw_y = np.linspace(y, H-1, H-y)
draw_x = np.polyval(func, draw_y)
# draw_y = draw_y[draw_x < W]
# draw_x = draw_x[draw_x < W]
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
else:
# if ( + w - 1) >= 1280:
samples_x = np.linspace(x, W-1, 30)
# else:
# samples_x = np.linspace(x, x_max+w-1, 30)
samples_y = [np.where(labels[:, int(sample_x)]==t)[0] for sample_x in samples_x]
samples_y = [int(np.mean(sample_y)) if len(sample_y) else -1 for sample_y in samples_y]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_x = samples_x[samples_y != -1]
samples_y = samples_y[samples_y != -1]
try:
func = np.polyfit(samples_x, samples_y, 2)
except:
pass
# y_limits = np.polyval(func, 0)
# if y_limits > 720 or y_limits < 0:
# if (x + w - 1) >= 1280:
# draw_x = np.linspace(x, 1280-1, 1280-x)
# else:
y_limits = np.polyval(func, 0)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
else:
y_limits = np.polyval(func, W-1)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
# if x+w-1 < 640:
# draw_x = np.linspace(0, x+w-1, w+x-x)
else:
draw_x = np.linspace(x, W-1, W-x)
draw_y = np.polyval(func, draw_x)
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
return mask
def connect_lane(image, shadow_height=0):
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
if shadow_height:
image[:shadow_height] = 0
mask = np.zeros((image.shape[0], image.shape[1]), np.uint8)
num_labels, labels, stats, centers = cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
# ratios = []
selected_label = []
for t in range(1, num_labels, 1):
_, _, _, _, area = stats[t]
if area > 400:
selected_label.append(t)
if len(selected_label) == 0:
return mask
else:
split_labels = [[label,] for label in selected_label]
mask_post = fitlane(mask, split_labels, labels, stats)
return mask_post
|
[
"lib.utils.is_parallel",
"numpy.polyfit",
"torch.max",
"numpy.array",
"torch.arange",
"numpy.mean",
"numpy.asarray",
"numpy.linspace",
"numpy.polyval",
"torch.zeros_like",
"torch.ones_like",
"cv2.polylines",
"cv2.morphologyEx",
"cv2.cvtColor",
"torch.cat",
"numpy.set_printoptions",
"torch.tensor",
"numpy.zeros",
"cv2.connectedComponentsWithStats",
"cv2.getStructuringElement",
"torch.ones"
] |
[((66, 103), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (85, 103), True, 'import numpy as np\n'), ((1059, 1095), 'torch.ones', 'torch.ones', (['(7)'], {'device': 'targets.device'}), '(7, device=targets.device)\n', (1069, 1095), False, 'import torch\n'), ((3616, 3704), 'cv2.getStructuringElement', 'cv2.getStructuringElement', ([], {'shape': 'cv2.MORPH_ELLIPSE', 'ksize': '(kernel_size, kernel_size)'}), '(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size,\n kernel_size))\n', (3641, 3704), False, 'import cv2\n'), ((3749, 3805), 'cv2.morphologyEx', 'cv2.morphologyEx', (['image', 'func_type', 'kernel'], {'iterations': '(1)'}), '(image, func_type, kernel, iterations=1)\n', (3765, 3805), False, 'import cv2\n'), ((4146, 4224), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['gray_image'], {'connectivity': '(8)', 'ltype': 'cv2.CV_32S'}), '(gray_image, connectivity=8, ltype=cv2.CV_32S)\n', (4178, 4224), False, 'import cv2\n'), ((7970, 8022), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1])', 'np.uint8'], {}), '((image.shape[0], image.shape[1]), np.uint8)\n', (7978, 8022), True, 'import numpy as np\n'), ((8069, 8147), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['gray_image'], {'connectivity': '(8)', 'ltype': 'cv2.CV_32S'}), '(gray_image, connectivity=8, ltype=cv2.CV_32S)\n', (8101, 8147), False, 'import cv2\n'), ((587, 605), 'lib.utils.is_parallel', 'is_parallel', (['model'], {}), '(model)\n', (598, 605), False, 'from lib.utils import is_parallel\n'), ((721, 739), 'lib.utils.is_parallel', 'is_parallel', (['model'], {}), '(model)\n', (732, 739), False, 'from lib.utils import is_parallel\n'), ((3576, 3601), 'numpy.array', 'np.array', (['image', 'np.uint8'], {}), '(image, np.uint8)\n', (3584, 3601), True, 'import numpy as np\n'), ((4028, 4067), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (4040, 4067), False, 'import cv2\n'), ((4973, 5002), 'numpy.linspace', 'np.linspace', (['y', '(y + h - 1)', '(30)'], {}), '(y, y + h - 1, 30)\n', (4984, 5002), True, 'import numpy as np\n'), ((7826, 7865), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (7838, 7865), False, 'import cv2\n'), ((1710, 1744), 'torch.tensor', 'torch.tensor', (['predictions[i].shape'], {}), '(predictions[i].shape)\n', (1722, 1744), False, 'import torch\n'), ((3022, 3052), 'torch.cat', 'torch.cat', (['(gxy - gij, gwh)', '(1)'], {}), '((gxy - gij, gwh), 1)\n', (3031, 3052), False, 'import torch\n'), ((5247, 5266), 'numpy.array', 'np.array', (['samples_x'], {}), '(samples_x)\n', (5255, 5266), True, 'import numpy as np\n'), ((5291, 5310), 'numpy.array', 'np.array', (['samples_y'], {}), '(samples_y)\n', (5299, 5310), True, 'import numpy as np\n'), ((5432, 5467), 'numpy.polyfit', 'np.polyfit', (['samples_y', 'samples_x', '(2)'], {}), '(samples_y, samples_x, 2)\n', (5442, 5467), True, 'import numpy as np\n'), ((5491, 5514), 'numpy.polyval', 'np.polyval', (['func', '(H - 1)'], {}), '(func, H - 1)\n', (5501, 5514), True, 'import numpy as np\n'), ((5890, 5914), 'numpy.polyval', 'np.polyval', (['func', 'draw_y'], {}), '(func, draw_y)\n', (5900, 5914), True, 'import numpy as np\n'), ((6087, 6145), 'cv2.polylines', 'cv2.polylines', (['mask', '[draw_points]', '(False)', '(1)'], {'thickness': '(15)'}), '(mask, [draw_points], False, 1, thickness=15)\n', (6100, 6145), False, 'import cv2\n'), ((6221, 6246), 'numpy.linspace', 'np.linspace', (['x', '(W - 1)', '(30)'], {}), '(x, W - 1, 30)\n', (6232, 6246), True, 'import numpy as np\n'), ((6542, 6561), 'numpy.array', 'np.array', (['samples_x'], {}), '(samples_x)\n', (6550, 6561), True, 'import numpy as np\n'), ((6586, 6605), 'numpy.array', 'np.array', (['samples_y'], {}), '(samples_y)\n', (6594, 6605), True, 'import numpy as np\n'), ((7058, 7077), 'numpy.polyval', 'np.polyval', (['func', '(0)'], {}), '(func, 0)\n', (7068, 7077), True, 'import numpy as np\n'), ((7544, 7568), 'numpy.polyval', 'np.polyval', (['func', 'draw_x'], {}), '(func, draw_x)\n', (7554, 7568), True, 'import numpy as np\n'), ((7657, 7715), 'cv2.polylines', 'cv2.polylines', (['mask', '[draw_points]', '(False)', '(1)'], {'thickness': '(15)'}), '(mask, [draw_points], False, 1, thickness=15)\n', (7670, 7715), False, 'import cv2\n'), ((1376, 1455), 'torch.tensor', 'torch.tensor', (['[[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]'], {'device': 'targets.device'}), '([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device)\n', (1388, 1455), False, 'import torch\n'), ((5720, 5748), 'numpy.linspace', 'np.linspace', (['y', '(y + h - 1)', 'h'], {}), '(y, y + h - 1, h)\n', (5731, 5748), True, 'import numpy as np\n'), ((5844, 5872), 'numpy.linspace', 'np.linspace', (['y', '(H - 1)', '(H - y)'], {}), '(y, H - 1, H - y)\n', (5855, 5872), True, 'import numpy as np\n'), ((6748, 6783), 'numpy.polyfit', 'np.polyfit', (['samples_x', 'samples_y', '(2)'], {}), '(samples_x, samples_y, 2)\n', (6758, 6783), True, 'import numpy as np\n'), ((7149, 7185), 'numpy.linspace', 'np.linspace', (['x', '(x + w - 1)', '(w + x - x)'], {}), '(x, x + w - 1, w + x - x)\n', (7160, 7185), True, 'import numpy as np\n'), ((7223, 7246), 'numpy.polyval', 'np.polyval', (['func', '(W - 1)'], {}), '(func, W - 1)\n', (7233, 7246), True, 'import numpy as np\n'), ((2400, 2418), 'torch.ones_like', 'torch.ones_like', (['j'], {}), '(j)\n', (2415, 2418), False, 'import torch\n'), ((7324, 7360), 'numpy.linspace', 'np.linspace', (['x', '(x + w - 1)', '(w + x - x)'], {}), '(x, x + w - 1, w + x - x)\n', (7335, 7360), True, 'import numpy as np\n'), ((7498, 7526), 'numpy.linspace', 'np.linspace', (['x', '(W - 1)', '(W - x)'], {}), '(x, W - 1, W - x)\n', (7509, 7526), True, 'import numpy as np\n'), ((2495, 2516), 'torch.zeros_like', 'torch.zeros_like', (['gxy'], {}), '(gxy)\n', (2511, 2516), False, 'import torch\n'), ((5152, 5169), 'numpy.mean', 'np.mean', (['sample_x'], {}), '(sample_x)\n', (5159, 5169), True, 'import numpy as np\n'), ((6026, 6054), 'numpy.asarray', 'np.asarray', (['[draw_x, draw_y]'], {}), '([draw_x, draw_y])\n', (6036, 6054), True, 'import numpy as np\n'), ((6447, 6464), 'numpy.mean', 'np.mean', (['sample_y'], {}), '(sample_y)\n', (6454, 6464), True, 'import numpy as np\n'), ((7596, 7624), 'numpy.asarray', 'np.asarray', (['[draw_x, draw_y]'], {}), '([draw_x, draw_y])\n', (7606, 7624), True, 'import numpy as np\n'), ((1137, 1176), 'torch.arange', 'torch.arange', (['na'], {'device': 'targets.device'}), '(na, device=targets.device)\n', (1149, 1176), False, 'import torch\n'), ((1948, 1969), 'torch.max', 'torch.max', (['r', '(1.0 / r)'], {}), '(r, 1.0 / r)\n', (1957, 1969), False, 'import torch\n')]
|
from django.views.generic import View
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'treasurehunt/treasurehunt_index.html')
|
[
"django.shortcuts.render"
] |
[((143, 198), 'django.shortcuts.render', 'render', (['request', '"""treasurehunt/treasurehunt_index.html"""'], {}), "(request, 'treasurehunt/treasurehunt_index.html')\n", (149, 198), False, 'from django.shortcuts import render\n')]
|
# Copyright 2022 @ReneFreingruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils
import tagging_engine.tagging as tagging
from tagging_engine.tagging import Tag
import mutators.testcase_mutators_helpers as testcase_mutators_helpers
def mutation_change_proto(content, state):
# utils.dbg_msg("Mutation operation: Change proto")
tagging.add_tag(Tag.MUTATION_CHANGE_PROTO1)
# TODO
# Currently I don't return in "lhs" or "rhs" the __proto__ of a function
# So code like this:
# Math.abs.__proto__ = Math.sign.__proto__
# Can currently not be created. Is this required?
# => has such code an effect?
random_line_number = testcase_mutators_helpers.get_random_line_number_to_insert_code(state)
(start_line_with, end_line_with) = testcase_mutators_helpers.get_start_and_end_line_symbols(state, random_line_number, content)
(lhs, code_possibilities) = testcase_mutators_helpers.get_proto_change_lhs(state, random_line_number)
rhs = testcase_mutators_helpers.get_proto_change_rhs(state, random_line_number, code_possibilities)
new_code_line = "%s%s.__proto__ = %s%s" % (start_line_with, lhs, rhs, end_line_with)
# Now just insert the new line to the testcase & state
lines = content.split("\n")
lines.insert(random_line_number, new_code_line)
new_content = "\n".join(lines)
state.state_insert_line(random_line_number, new_content, new_code_line)
return new_content, state
|
[
"mutators.testcase_mutators_helpers.get_proto_change_rhs",
"tagging_engine.tagging.add_tag",
"mutators.testcase_mutators_helpers.get_random_line_number_to_insert_code",
"mutators.testcase_mutators_helpers.get_proto_change_lhs",
"mutators.testcase_mutators_helpers.get_start_and_end_line_symbols"
] |
[((849, 892), 'tagging_engine.tagging.add_tag', 'tagging.add_tag', (['Tag.MUTATION_CHANGE_PROTO1'], {}), '(Tag.MUTATION_CHANGE_PROTO1)\n', (864, 892), True, 'import tagging_engine.tagging as tagging\n'), ((1168, 1238), 'mutators.testcase_mutators_helpers.get_random_line_number_to_insert_code', 'testcase_mutators_helpers.get_random_line_number_to_insert_code', (['state'], {}), '(state)\n', (1231, 1238), True, 'import mutators.testcase_mutators_helpers as testcase_mutators_helpers\n'), ((1278, 1374), 'mutators.testcase_mutators_helpers.get_start_and_end_line_symbols', 'testcase_mutators_helpers.get_start_and_end_line_symbols', (['state', 'random_line_number', 'content'], {}), '(state,\n random_line_number, content)\n', (1334, 1374), True, 'import mutators.testcase_mutators_helpers as testcase_mutators_helpers\n'), ((1404, 1477), 'mutators.testcase_mutators_helpers.get_proto_change_lhs', 'testcase_mutators_helpers.get_proto_change_lhs', (['state', 'random_line_number'], {}), '(state, random_line_number)\n', (1450, 1477), True, 'import mutators.testcase_mutators_helpers as testcase_mutators_helpers\n'), ((1488, 1585), 'mutators.testcase_mutators_helpers.get_proto_change_rhs', 'testcase_mutators_helpers.get_proto_change_rhs', (['state', 'random_line_number', 'code_possibilities'], {}), '(state, random_line_number,\n code_possibilities)\n', (1534, 1585), True, 'import mutators.testcase_mutators_helpers as testcase_mutators_helpers\n')]
|
"""
ckwg +31
Copyright 2016 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Interface to VITAL camera_intrinsics objects
"""
import collections
import ctypes
import numpy
from vital.types.eigen import EigenArray
from vital.util import VitalErrorHandle, VitalObject
class CameraIntrinsics (VitalObject):
def __init__(self, focal_length=1., principle_point=(0, 0),
aspect_ratio=1., skew=0., dist_coeffs=(), from_cptr=None):
"""
:param focal_length: Focal length (default=1.0)
:type focal_length: float
:param principle_point: Principle point (default: [0,0]).
Values are copied into this structure.
:type principle_point: collections.Sequence[float]
:param aspect_ratio: Aspect ratio (default: 1.0)
:type aspect_ratio: float
:param skew: Skew (default: 0.0)
:type skew: float
:param dist_coeffs: Existing distortion coefficients (Default: empty).
Values are copied into this structure.
:type dist_coeffs: collections.Sequence[float]
"""
super(CameraIntrinsics, self).__init__(from_cptr, focal_length,
principle_point, aspect_ratio,
skew, dist_coeffs)
def _new(self, focal_length, principle_point, aspect_ratio, skew,
dist_coeffs):
"""
Construct a new vital::camera_intrinsics instance
:type focal_length: float
:type principle_point: collections.Sequence[float]
:type aspect_ratio: float
:type skew: float
:type dist_coeffs: collections.Sequence[float]
"""
ci_new = self.VITAL_LIB['vital_camera_intrinsics_new']
ci_new.argtypes = [
ctypes.c_double,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
ctypes.c_double,
ctypes.c_double,
EigenArray.c_ptr_type('X', 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR,
]
ci_new.restype = self.C_TYPE_PTR
# Make "vectors"
pp = EigenArray.from_iterable(principle_point, target_shape=(2, 1))
dc = EigenArray(len(dist_coeffs), dynamic_rows=True)
if len(dist_coeffs):
dc.T[:] = dist_coeffs
with VitalErrorHandle() as eh:
return ci_new(focal_length, pp, aspect_ratio, skew, dc, eh)
def _destroy(self):
ci_dtor = self.VITAL_LIB['vital_camera_intrinsics_destroy']
ci_dtor.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
with VitalErrorHandle() as eh:
ci_dtor(self, eh)
@property
def focal_length(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_focal_length']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def principle_point(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_principle_point']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(2, from_cptr=m_ptr, owns_data=True)
@property
def aspect_ratio(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_aspect_ratio']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def skew(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_skew']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def dist_coeffs(self):
""" Get the distortion coefficients array """
f = self.VITAL_LIB['vital_camera_intrinsics_get_dist_coeffs']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type('X', 1, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(dynamic_rows=1, from_cptr=m_ptr, owns_data=True)
def __eq__(self, other):
if isinstance(other, CameraIntrinsics):
return (
self.focal_length == other.focal_length and
numpy.allclose(self.principle_point, other.principle_point) and
self.aspect_ratio == other.aspect_ratio and
self.skew == other.skew and
numpy.allclose(self.dist_coeffs, other.dist_coeffs)
)
return False
def __ne__(self, other):
return not (self == other)
def as_matrix(self):
"""
Access the intrinsics as an upper triangular matrix
**Note:** *This matrix includes the focal length, principal point,
aspect ratio, and skew, but does not model distortion.*
:return: 3x3 upper triangular matrix
"""
f = self.VITAL_LIB['vital_camera_intrinsics_as_matrix']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(3, 3, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(3, 3, from_cptr=m_ptr, owns_data=True)
def map_2d(self, norm_pt):
"""
Map normalized image coordinates into actual image coordinates
This function applies both distortion and application of the
calibration matrix to map into actual image coordinates.
:param norm_pt: Normalized image coordinate to map to an image
coordinate (2-element sequence).
:type norm_pt: collections.Sequence[float]
:return: Mapped 2D image coordinate
:rtype: EigenArray[float]
"""
assert len(norm_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_map_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = norm_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def map_3d(self, norm_hpt):
"""
Map a 3D point in camera coordinates into actual image coordinates
:param norm_hpt: Normalized coordinate to map to an image coordinate
(3-element sequence)
:type norm_hpt: collections.Sequence[float]
:return: Mapped 2D image coordinate
:rtype: EigenArray[float]
"""
assert len(norm_hpt) == 3, "Input sequence was not of length 3"
f = self.VITAL_LIB['vital_camera_intrinsics_map_3d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(3, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(3)
p.T[:] = norm_hpt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def unmap_2d(self, pt):
"""
Unmap actual image coordinates back into normalized image coordinates
This function applies both application of the inverse calibration matrix
and undistortion of the normalized coordinates
:param pt: Actual image 2D point to un-map.
:return: Un-mapped normalized image coordinate.
"""
assert len(pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_unmap_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def distort_2d(self, norm_pt):
"""
Map normalized image coordinates into distorted coordinates
:param norm_pt: Normalized 2D image coordinate.
:return: Distorted 2D coordinate.
"""
assert len(norm_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_distort_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = norm_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def undistort_2d(self, dist_pt):
"""
Unmap distorted normalized coordinates into normalized coordinates
:param dist_pt: Distorted 2D coordinate to un-distort.
:return: Normalized 2D image coordinate.
"""
assert len(dist_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_undistort_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = dist_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
|
[
"vital.types.eigen.EigenArray.from_iterable",
"numpy.allclose",
"vital.types.eigen.EigenArray.c_ptr_type",
"vital.types.eigen.EigenArray",
"vital.util.VitalErrorHandle"
] |
[((3614, 3676), 'vital.types.eigen.EigenArray.from_iterable', 'EigenArray.from_iterable', (['principle_point'], {'target_shape': '(2, 1)'}), '(principle_point, target_shape=(2, 1))\n', (3638, 3676), False, 'from vital.types.eigen import EigenArray\n'), ((4645, 4689), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (4666, 4689), False, 'from vital.types.eigen import EigenArray\n'), ((5641, 5687), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['"""X"""', '(1)', 'ctypes.c_double'], {}), "('X', 1, ctypes.c_double)\n", (5662, 5687), False, 'from vital.types.eigen import EigenArray\n'), ((6798, 6842), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(3)', '(3)', 'ctypes.c_double'], {}), '(3, 3, ctypes.c_double)\n', (6819, 6842), False, 'from vital.types.eigen import EigenArray\n'), ((7803, 7847), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (7824, 7847), False, 'from vital.types.eigen import EigenArray\n'), ((7860, 7873), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (7870, 7873), False, 'from vital.types.eigen import EigenArray\n'), ((8728, 8772), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (8749, 8772), False, 'from vital.types.eigen import EigenArray\n'), ((8785, 8798), 'vital.types.eigen.EigenArray', 'EigenArray', (['(3)'], {}), '(3)\n', (8795, 8798), False, 'from vital.types.eigen import EigenArray\n'), ((9654, 9698), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (9675, 9698), False, 'from vital.types.eigen import EigenArray\n'), ((9711, 9724), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (9721, 9724), False, 'from vital.types.eigen import EigenArray\n'), ((10431, 10475), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (10452, 10475), False, 'from vital.types.eigen import EigenArray\n'), ((10488, 10501), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (10498, 10501), False, 'from vital.types.eigen import EigenArray\n'), ((11238, 11282), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (11259, 11282), False, 'from vital.types.eigen import EigenArray\n'), ((11295, 11308), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (11305, 11308), False, 'from vital.types.eigen import EigenArray\n'), ((3320, 3364), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (3341, 3364), False, 'from vital.types.eigen import EigenArray\n'), ((3436, 3482), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['"""X"""', '(1)', 'ctypes.c_double'], {}), "('X', 1, ctypes.c_double)\n", (3457, 3482), False, 'from vital.types.eigen import EigenArray\n'), ((3815, 3833), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (3831, 3833), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4093, 4111), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (4109, 4111), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4380, 4398), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (4396, 4398), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4703, 4721), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (4719, 4721), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4780, 4826), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, from_cptr=m_ptr, owns_data=True)\n', (4790, 4826), False, 'from vital.types.eigen import EigenArray\n'), ((5058, 5076), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (5074, 5076), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((5330, 5348), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (5346, 5348), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((5701, 5719), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (5717, 5719), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((5778, 5837), 'vital.types.eigen.EigenArray', 'EigenArray', ([], {'dynamic_rows': '(1)', 'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(dynamic_rows=1, from_cptr=m_ptr, owns_data=True)\n', (5788, 5837), False, 'from vital.types.eigen import EigenArray\n'), ((6856, 6874), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (6872, 6874), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((6933, 6982), 'vital.types.eigen.EigenArray', 'EigenArray', (['(3)', '(3)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(3, 3, from_cptr=m_ptr, owns_data=True)\n', (6943, 6982), False, 'from vital.types.eigen import EigenArray\n'), ((7686, 7730), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (7707, 7730), False, 'from vital.types.eigen import EigenArray\n'), ((7912, 7930), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (7928, 7930), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((7992, 8041), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (8002, 8041), False, 'from vital.types.eigen import EigenArray\n'), ((8611, 8655), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(3)', '(1)', 'ctypes.c_double'], {}), '(3, 1, ctypes.c_double)\n', (8632, 8655), False, 'from vital.types.eigen import EigenArray\n'), ((8838, 8856), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (8854, 8856), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((8918, 8967), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (8928, 8967), False, 'from vital.types.eigen import EigenArray\n'), ((9537, 9581), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (9558, 9581), False, 'from vital.types.eigen import EigenArray\n'), ((9758, 9776), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (9774, 9776), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((9838, 9887), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (9848, 9887), False, 'from vital.types.eigen import EigenArray\n'), ((10314, 10358), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (10335, 10358), False, 'from vital.types.eigen import EigenArray\n'), ((10540, 10558), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (10556, 10558), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((10620, 10669), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (10630, 10669), False, 'from vital.types.eigen import EigenArray\n'), ((11121, 11165), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (11142, 11165), False, 'from vital.types.eigen import EigenArray\n'), ((11347, 11365), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (11363, 11365), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((11427, 11476), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (11437, 11476), False, 'from vital.types.eigen import EigenArray\n'), ((6013, 6072), 'numpy.allclose', 'numpy.allclose', (['self.principle_point', 'other.principle_point'], {}), '(self.principle_point, other.principle_point)\n', (6027, 6072), False, 'import numpy\n'), ((6197, 6248), 'numpy.allclose', 'numpy.allclose', (['self.dist_coeffs', 'other.dist_coeffs'], {}), '(self.dist_coeffs, other.dist_coeffs)\n', (6211, 6248), False, 'import numpy\n')]
|
import numpy as np
import util.data
def ndcg(X_test, y_test, y_pred, ):
Xy_pred = X_test.copy([['srch_id', 'prop_id', 'score']])
Xy_pred['score_pred'] = y_pred
Xy_pred['score'] = y_test
Xy_pred.sort_values(['srch_id', 'score_pred'], ascending=[True, False])
dcg_test = DCG_dict(Xy_pred)
ndcg = np.mean(np.array(list(dcg_test.values())))
return ndcg
def sort_pred_test(x_test, y_test, y_pred):
# calculate dcg of test set per srch_id
Xy_pred = util.data.Xy_pred(x_test, y_pred)
# put true y values on indexes, do not sort !
Xy_true = util.data.Xy_pred(x_test, y_test)
return Xy_pred, Xy_true
def dcg_at_k(r, k, method=0):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def DCG_dict(data):
DCG = {}
# for id in data['srch_id']:
# rows = rows_srch_id(data, id)
# r = relevance_scores(rows)
r = []
prev_srch_id = -1
position = 0
for i in data.index.tolist():
if prev_srch_id == -1:
row = data.loc[i]
cur_srch_id = row.srch_id
prev_srch_id = 0
row = data.loc[i]
next_id = row.srch_id
score = row.score
# compute position
if cur_srch_id != next_id:
DCG[cur_srch_id] = ndcg_at_k(r, k=len(r))
cur_srch_id = next_id
r = []
r.append(score)
position += 1
else:
r.append(score)
position += 1
DCG[cur_srch_id] = ndcg_at_k(r, k=len(r))
return DCG
|
[
"numpy.asfarray",
"numpy.arange"
] |
[((683, 697), 'numpy.asfarray', 'np.asfarray', (['r'], {}), '(r)\n', (694, 697), True, 'import numpy as np\n'), ((790, 814), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 1)'], {}), '(2, r.size + 1)\n', (799, 814), True, 'import numpy as np\n'), ((881, 905), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 2)'], {}), '(2, r.size + 2)\n', (890, 905), True, 'import numpy as np\n')]
|
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resource object."""
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
import retrying
import six
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.db import api as db_api
from heat.objects import base as heat_base
from heat.objects import fields as heat_fields
from heat.objects import resource_data
cfg.CONF.import_opt('encrypt_parameters_and_properties', 'heat.common.config')
def retry_on_conflict(func):
def is_conflict(ex):
return isinstance(ex, exception.ConcurrentTransaction)
wrapper = retrying.retry(stop_max_attempt_number=11,
wait_random_min=0.0, wait_random_max=2.0,
retry_on_exception=is_conflict)
return wrapper(func)
class Resource(
heat_base.HeatObject,
base.VersionedObjectDictCompat,
base.ComparableVersionedObject,
):
fields = {
'id': fields.IntegerField(),
'uuid': fields.StringField(),
'stack_id': fields.StringField(),
'created_at': fields.DateTimeField(read_only=True),
'updated_at': fields.DateTimeField(nullable=True),
'physical_resource_id': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'status_reason': fields.StringField(nullable=True),
'action': fields.StringField(nullable=True),
'rsrc_metadata': heat_fields.JsonField(nullable=True),
'properties_data': heat_fields.JsonField(nullable=True),
'properties_data_encrypted': fields.BooleanField(default=False),
'data': fields.ListOfObjectsField(
resource_data.ResourceData,
nullable=True
),
'engine_id': fields.StringField(nullable=True),
'atomic_key': fields.IntegerField(nullable=True),
'current_template_id': fields.IntegerField(),
'needed_by': heat_fields.ListField(nullable=True, default=None),
'requires': heat_fields.ListField(nullable=True, default=None),
'replaces': fields.IntegerField(nullable=True),
'replaced_by': fields.IntegerField(nullable=True),
'root_stack_id': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(resource, context, db_resource):
if db_resource is None:
return None
for field in resource.fields:
if field == 'data':
resource['data'] = [resource_data.ResourceData._from_db_object(
resource_data.ResourceData(context), resd
) for resd in db_resource.data]
else:
resource[field] = db_resource[field]
if resource.properties_data_encrypted and resource.properties_data:
properties_data = {}
for prop_name, prop_value in resource.properties_data.items():
method, value = prop_value
decrypted_value = crypt.decrypt(method, value)
prop_string = jsonutils.loads(decrypted_value)
properties_data[prop_name] = prop_string
resource.properties_data = properties_data
resource._context = context
resource.obj_reset_changes()
return resource
@classmethod
def get_obj(cls, context, resource_id, refresh=False):
resource_db = db_api.resource_get(context, resource_id,
refresh=refresh)
return cls._from_db_object(cls(context), context, resource_db)
@classmethod
def get_all(cls, context):
resources_db = db_api.resource_get_all(context)
resources = [
(
resource_name,
cls._from_db_object(cls(context), context, resource_db)
)
for resource_name, resource_db in six.iteritems(resources_db)
]
return dict(resources)
@classmethod
def create(cls, context, values):
return cls._from_db_object(cls(context), context,
db_api.resource_create(context, values))
@classmethod
def delete(cls, context, resource_id):
db_api.resource_delete(context, resource_id)
@classmethod
def exchange_stacks(cls, context, resource_id1, resource_id2):
return db_api.resource_exchange_stacks(
context,
resource_id1,
resource_id2)
@classmethod
def get_all_by_stack(cls, context, stack_id, filters=None):
resources_db = db_api.resource_get_all_by_stack(context, stack_id,
filters)
return cls._resources_to_dict(context, resources_db)
@classmethod
def _resources_to_dict(cls, context, resources_db):
resources = [
(
resource_name,
cls._from_db_object(cls(context), context, resource_db)
)
for resource_name, resource_db in six.iteritems(resources_db)
]
return dict(resources)
@classmethod
def get_all_active_by_stack(cls, context, stack_id):
resources_db = db_api.resource_get_all_active_by_stack(context,
stack_id)
resources = [
(
resource_id,
cls._from_db_object(cls(context), context, resource_db)
)
for resource_id, resource_db in six.iteritems(resources_db)
]
return dict(resources)
@classmethod
def get_all_by_root_stack(cls, context, stack_id, filters):
resources_db = db_api.resource_get_all_by_root_stack(
context,
stack_id,
filters)
return cls._resources_to_dict(context, resources_db)
@classmethod
def purge_deleted(cls, context, stack_id):
return db_api.resource_purge_deleted(context, stack_id)
@classmethod
def get_by_name_and_stack(cls, context, resource_name, stack_id):
resource_db = db_api.resource_get_by_name_and_stack(
context,
resource_name,
stack_id)
return cls._from_db_object(cls(context), context, resource_db)
@classmethod
def get_by_physical_resource_id(cls, context, physical_resource_id):
resource_db = db_api.resource_get_by_physical_resource_id(
context,
physical_resource_id)
return cls._from_db_object(cls(context), context, resource_db)
@classmethod
def update_by_id(cls, context, resource_id, values):
db_api.resource_update_and_save(context, resource_id, values)
def update_and_save(self, values):
db_api.resource_update_and_save(self._context, self.id, values)
def select_and_update(self, values, expected_engine_id=None,
atomic_key=0):
return db_api.resource_update(self._context, self.id, values,
atomic_key=atomic_key,
expected_engine_id=expected_engine_id)
def refresh(self):
resource_db = db_api.resource_get(self._context, self.id, refresh=True)
return self.__class__._from_db_object(
self,
self._context,
resource_db)
@staticmethod
def encrypt_properties_data(data):
if cfg.CONF.encrypt_parameters_and_properties and data:
result = {}
for prop_name, prop_value in data.items():
prop_string = jsonutils.dumps(prop_value)
encrypted_value = crypt.encrypt(prop_string)
result[prop_name] = encrypted_value
return (True, result)
return (False, data)
def update_metadata(self, metadata):
if self.rsrc_metadata != metadata:
rows_updated = self.select_and_update(
{'rsrc_metadata': metadata}, self.engine_id, self.atomic_key)
if not rows_updated:
action = _('metadata setting for resource %s') % self.name
raise exception.ConcurrentTransaction(action=action)
|
[
"heat.objects.resource_data.ResourceData",
"oslo_versionedobjects.fields.ListOfObjectsField",
"heat.db.api.resource_delete",
"heat.db.api.resource_purge_deleted",
"heat.db.api.resource_get_all_by_stack",
"oslo_serialization.jsonutils.dumps",
"heat.objects.fields.JsonField",
"heat.db.api.resource_get",
"heat.db.api.resource_create",
"oslo_versionedobjects.fields.IntegerField",
"heat.db.api.resource_update",
"heat.common.crypt.decrypt",
"retrying.retry",
"oslo_versionedobjects.fields.StringField",
"heat.objects.fields.ListField",
"oslo_config.cfg.CONF.import_opt",
"heat.db.api.resource_get_all_by_root_stack",
"heat.common.crypt.encrypt",
"heat.db.api.resource_get_all",
"heat.db.api.resource_update_and_save",
"heat.common.exception.ConcurrentTransaction",
"oslo_versionedobjects.fields.BooleanField",
"oslo_serialization.jsonutils.loads",
"six.iteritems",
"heat.db.api.resource_get_by_name_and_stack",
"heat.db.api.resource_get_by_physical_resource_id",
"heat.common.i18n._",
"heat.db.api.resource_get_all_active_by_stack",
"oslo_versionedobjects.fields.DateTimeField",
"heat.db.api.resource_exchange_stacks"
] |
[((1033, 1111), 'oslo_config.cfg.CONF.import_opt', 'cfg.CONF.import_opt', (['"""encrypt_parameters_and_properties"""', '"""heat.common.config"""'], {}), "('encrypt_parameters_and_properties', 'heat.common.config')\n", (1052, 1111), False, 'from oslo_config import cfg\n'), ((1245, 1365), 'retrying.retry', 'retrying.retry', ([], {'stop_max_attempt_number': '(11)', 'wait_random_min': '(0.0)', 'wait_random_max': '(2.0)', 'retry_on_exception': 'is_conflict'}), '(stop_max_attempt_number=11, wait_random_min=0.0,\n wait_random_max=2.0, retry_on_exception=is_conflict)\n', (1259, 1365), False, 'import retrying\n'), ((1593, 1614), 'oslo_versionedobjects.fields.IntegerField', 'fields.IntegerField', ([], {}), '()\n', (1612, 1614), False, 'from oslo_versionedobjects import fields\n'), ((1632, 1652), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (1650, 1652), False, 'from oslo_versionedobjects import fields\n'), ((1674, 1694), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (1692, 1694), False, 'from oslo_versionedobjects import fields\n'), ((1718, 1754), 'oslo_versionedobjects.fields.DateTimeField', 'fields.DateTimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1738, 1754), False, 'from oslo_versionedobjects import fields\n'), ((1778, 1813), 'oslo_versionedobjects.fields.DateTimeField', 'fields.DateTimeField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1798, 1813), False, 'from oslo_versionedobjects import fields\n'), ((1847, 1880), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1865, 1880), False, 'from oslo_versionedobjects import fields\n'), ((1898, 1931), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1916, 1931), False, 'from oslo_versionedobjects import fields\n'), ((1951, 1984), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1969, 1984), False, 'from oslo_versionedobjects import fields\n'), ((2011, 2044), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2029, 2044), False, 'from oslo_versionedobjects import fields\n'), ((2064, 2097), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2082, 2097), False, 'from oslo_versionedobjects import fields\n'), ((2124, 2160), 'heat.objects.fields.JsonField', 'heat_fields.JsonField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2145, 2160), True, 'from heat.objects import fields as heat_fields\n'), ((2189, 2225), 'heat.objects.fields.JsonField', 'heat_fields.JsonField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2210, 2225), True, 'from heat.objects import fields as heat_fields\n'), ((2264, 2298), 'oslo_versionedobjects.fields.BooleanField', 'fields.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2283, 2298), False, 'from oslo_versionedobjects import fields\n'), ((2316, 2384), 'oslo_versionedobjects.fields.ListOfObjectsField', 'fields.ListOfObjectsField', (['resource_data.ResourceData'], {'nullable': '(True)'}), '(resource_data.ResourceData, nullable=True)\n', (2341, 2384), False, 'from oslo_versionedobjects import fields\n'), ((2441, 2474), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2459, 2474), False, 'from oslo_versionedobjects import fields\n'), ((2498, 2532), 'oslo_versionedobjects.fields.IntegerField', 'fields.IntegerField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2517, 2532), False, 'from oslo_versionedobjects import fields\n'), ((2565, 2586), 'oslo_versionedobjects.fields.IntegerField', 'fields.IntegerField', ([], {}), '()\n', (2584, 2586), False, 'from oslo_versionedobjects import fields\n'), ((2609, 2659), 'heat.objects.fields.ListField', 'heat_fields.ListField', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2630, 2659), True, 'from heat.objects import fields as heat_fields\n'), ((2681, 2731), 'heat.objects.fields.ListField', 'heat_fields.ListField', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2702, 2731), True, 'from heat.objects import fields as heat_fields\n'), ((2753, 2787), 'oslo_versionedobjects.fields.IntegerField', 'fields.IntegerField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2772, 2787), False, 'from oslo_versionedobjects import fields\n'), ((2812, 2846), 'oslo_versionedobjects.fields.IntegerField', 'fields.IntegerField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2831, 2846), False, 'from oslo_versionedobjects import fields\n'), ((2873, 2906), 'oslo_versionedobjects.fields.StringField', 'fields.StringField', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2891, 2906), False, 'from oslo_versionedobjects import fields\n'), ((4040, 4098), 'heat.db.api.resource_get', 'db_api.resource_get', (['context', 'resource_id'], {'refresh': 'refresh'}), '(context, resource_id, refresh=refresh)\n', (4059, 4098), True, 'from heat.db import api as db_api\n'), ((4284, 4316), 'heat.db.api.resource_get_all', 'db_api.resource_get_all', (['context'], {}), '(context)\n', (4307, 4316), True, 'from heat.db import api as db_api\n'), ((4844, 4888), 'heat.db.api.resource_delete', 'db_api.resource_delete', (['context', 'resource_id'], {}), '(context, resource_id)\n', (4866, 4888), True, 'from heat.db import api as db_api\n'), ((4989, 5057), 'heat.db.api.resource_exchange_stacks', 'db_api.resource_exchange_stacks', (['context', 'resource_id1', 'resource_id2'], {}), '(context, resource_id1, resource_id2)\n', (5020, 5057), True, 'from heat.db import api as db_api\n'), ((5200, 5260), 'heat.db.api.resource_get_all_by_stack', 'db_api.resource_get_all_by_stack', (['context', 'stack_id', 'filters'], {}), '(context, stack_id, filters)\n', (5232, 5260), True, 'from heat.db import api as db_api\n'), ((5818, 5876), 'heat.db.api.resource_get_all_active_by_stack', 'db_api.resource_get_all_active_by_stack', (['context', 'stack_id'], {}), '(context, stack_id)\n', (5857, 5876), True, 'from heat.db import api as db_api\n'), ((6309, 6374), 'heat.db.api.resource_get_all_by_root_stack', 'db_api.resource_get_all_by_root_stack', (['context', 'stack_id', 'filters'], {}), '(context, stack_id, filters)\n', (6346, 6374), True, 'from heat.db import api as db_api\n'), ((6553, 6601), 'heat.db.api.resource_purge_deleted', 'db_api.resource_purge_deleted', (['context', 'stack_id'], {}), '(context, stack_id)\n', (6582, 6601), True, 'from heat.db import api as db_api\n'), ((6712, 6783), 'heat.db.api.resource_get_by_name_and_stack', 'db_api.resource_get_by_name_and_stack', (['context', 'resource_name', 'stack_id'], {}), '(context, resource_name, stack_id)\n', (6749, 6783), True, 'from heat.db import api as db_api\n'), ((7005, 7079), 'heat.db.api.resource_get_by_physical_resource_id', 'db_api.resource_get_by_physical_resource_id', (['context', 'physical_resource_id'], {}), '(context, physical_resource_id)\n', (7048, 7079), True, 'from heat.db import api as db_api\n'), ((7259, 7320), 'heat.db.api.resource_update_and_save', 'db_api.resource_update_and_save', (['context', 'resource_id', 'values'], {}), '(context, resource_id, values)\n', (7290, 7320), True, 'from heat.db import api as db_api\n'), ((7369, 7432), 'heat.db.api.resource_update_and_save', 'db_api.resource_update_and_save', (['self._context', 'self.id', 'values'], {}), '(self._context, self.id, values)\n', (7400, 7432), True, 'from heat.db import api as db_api\n'), ((7555, 7676), 'heat.db.api.resource_update', 'db_api.resource_update', (['self._context', 'self.id', 'values'], {'atomic_key': 'atomic_key', 'expected_engine_id': 'expected_engine_id'}), '(self._context, self.id, values, atomic_key=\n atomic_key, expected_engine_id=expected_engine_id)\n', (7577, 7676), True, 'from heat.db import api as db_api\n'), ((7794, 7851), 'heat.db.api.resource_get', 'db_api.resource_get', (['self._context', 'self.id'], {'refresh': '(True)'}), '(self._context, self.id, refresh=True)\n', (7813, 7851), True, 'from heat.db import api as db_api\n'), ((4734, 4773), 'heat.db.api.resource_create', 'db_api.resource_create', (['context', 'values'], {}), '(context, values)\n', (4756, 4773), True, 'from heat.db import api as db_api\n'), ((3639, 3667), 'heat.common.crypt.decrypt', 'crypt.decrypt', (['method', 'value'], {}), '(method, value)\n', (3652, 3667), False, 'from heat.common import crypt\n'), ((3698, 3730), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['decrypted_value'], {}), '(decrypted_value)\n', (3713, 3730), False, 'from oslo_serialization import jsonutils\n'), ((4516, 4543), 'six.iteritems', 'six.iteritems', (['resources_db'], {}), '(resources_db)\n', (4529, 4543), False, 'import six\n'), ((5651, 5678), 'six.iteritems', 'six.iteritems', (['resources_db'], {}), '(resources_db)\n', (5664, 5678), False, 'import six\n'), ((6135, 6162), 'six.iteritems', 'six.iteritems', (['resources_db'], {}), '(resources_db)\n', (6148, 6162), False, 'import six\n'), ((8200, 8227), 'oslo_serialization.jsonutils.dumps', 'jsonutils.dumps', (['prop_value'], {}), '(prop_value)\n', (8215, 8227), False, 'from oslo_serialization import jsonutils\n'), ((8262, 8288), 'heat.common.crypt.encrypt', 'crypt.encrypt', (['prop_string'], {}), '(prop_string)\n', (8275, 8288), False, 'from heat.common import crypt\n'), ((8748, 8794), 'heat.common.exception.ConcurrentTransaction', 'exception.ConcurrentTransaction', ([], {'action': 'action'}), '(action=action)\n', (8779, 8794), False, 'from heat.common import exception\n'), ((8676, 8713), 'heat.common.i18n._', '_', (['"""metadata setting for resource %s"""'], {}), "('metadata setting for resource %s')\n", (8677, 8713), False, 'from heat.common.i18n import _\n'), ((3216, 3251), 'heat.objects.resource_data.ResourceData', 'resource_data.ResourceData', (['context'], {}), '(context)\n', (3242, 3251), False, 'from heat.objects import resource_data\n')]
|
import struct
from six import binary_type
from capnpy import ptr
from capnpy.packing import mychr
from capnpy.printer import print_buffer
class SegmentBuilder(object):
def __init__(self, length=None):
self.buf = bytearray()
def get_length(self):
return len(self.buf)
def as_string(self):
return binary_type(self.buf)
def _print(self):
print_buffer(self.as_string())
def write_generic(self, ifmt, i, value):
struct.pack_into(mychr(ifmt), self.buf, i, value)
def write_int8(self, i, value):
struct.pack_into('b', self.buf, i, value)
def write_uint8(self, i, value):
struct.pack_into('B', self.buf, i, value)
def write_int16(self, i, value):
struct.pack_into('h', self.buf, i, value)
def write_uint16(self, i, value):
struct.pack_into('H', self.buf, i, value)
def write_int32(self, i, value):
struct.pack_into('i', self.buf, i, value)
def write_uint32(self, i, value):
struct.pack_into('I', self.buf, i, value)
def write_int64(self, i, value):
struct.pack_into('q', self.buf, i, value)
def write_uint64(self, i, value):
struct.pack_into('Q', self.buf, i, value)
def write_float32(self, i, value):
struct.pack_into('f', self.buf, i, value)
def write_float64(self, i, value):
struct.pack_into('d', self.buf, i, value)
def write_bool(self, byteoffset, bitoffset, value):
current = struct.unpack_from('B', self.buf, byteoffset)[0]
current |= (value << bitoffset)
struct.pack_into('B', self.buf, byteoffset, current)
def write_slice(self, i, src, start, n):
self.buf[i:i+n] = src.buf[start:start+n]
def allocate(self, length):
# XXX: check whether there is a better method to zero-extend the array in PyPy
result = len(self.buf)
self.buf += b'\x00'*length
return result
def alloc_struct(self, pos, data_size, ptrs_size):
"""
Allocate a new struct of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
length = (data_size+ptrs_size) * 8
result = self.allocate(length)
offet = result - (pos+8)
p = ptr.new_struct(offet//8, data_size, ptrs_size)
self.write_int64(pos, p)
return result
def alloc_list(self, pos, size_tag, item_count, body_length):
"""
Allocate a new list of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
body_length = ptr.round_up_to_word(body_length)
result = self.allocate(body_length)
offet = result - (pos+8)
p = ptr.new_list(offet//8, size_tag, item_count)
self.write_int64(pos, p)
return result
def alloc_text(self, pos, s, trailing_zero=1):
if s is None:
self.write_int64(pos, 0)
return -1
n = len(s)
nn = n + trailing_zero
result = self.alloc_list(pos, ptr.LIST_SIZE_8, nn, nn)
self.buf[result:result+n] = s
# there is no need to write the trailing 0 as the byte is already
# guaranteed to be 0
return result
def alloc_data(self, pos, s):
return self.alloc_text(pos, s, trailing_zero=0)
def copy_from_struct(self, dst_pos, structcls, value):
if value is None:
self.write_int64(dst_pos, 0)
return
if not isinstance(value, structcls):
raise TypeError("Expected %s instance, got %s" %
(structcls.__class__.__name__, value))
self.copy_from_pointer(dst_pos, value._seg, value._as_pointer(0), 0)
def copy_from_pointer(self, dst_pos, src, p, src_pos):
return copy_pointer(src, p, src_pos, self, dst_pos)
def copy_inline_struct(self, dst_pos, src, p, src_pos):
"""
Similar to copy_from_pointer but:
1. it assumes that p is a pointer to a struct
2. it does NOT allocate a new struct in dst_pos: instead, it writes
the struct directly into dst_pos
"""
return _copy_struct_inline(src, p, src_pos, self, dst_pos)
def copy_from_list(self, pos, item_type, lst):
return copy_from_list(self, pos, item_type, lst)
from capnpy.segment._copy_pointer import copy_pointer, _copy_struct_inline
from capnpy.segment._copy_list import copy_from_list
|
[
"capnpy.segment._copy_pointer.copy_pointer",
"capnpy.segment._copy_list.copy_from_list",
"capnpy.ptr.new_struct",
"capnpy.ptr.new_list",
"six.binary_type",
"capnpy.segment._copy_pointer._copy_struct_inline",
"struct.pack_into",
"capnpy.ptr.round_up_to_word",
"capnpy.packing.mychr",
"struct.unpack_from"
] |
[((336, 357), 'six.binary_type', 'binary_type', (['self.buf'], {}), '(self.buf)\n', (347, 357), False, 'from six import binary_type\n'), ((569, 610), 'struct.pack_into', 'struct.pack_into', (['"""b"""', 'self.buf', 'i', 'value'], {}), "('b', self.buf, i, value)\n", (585, 610), False, 'import struct\n'), ((657, 698), 'struct.pack_into', 'struct.pack_into', (['"""B"""', 'self.buf', 'i', 'value'], {}), "('B', self.buf, i, value)\n", (673, 698), False, 'import struct\n'), ((745, 786), 'struct.pack_into', 'struct.pack_into', (['"""h"""', 'self.buf', 'i', 'value'], {}), "('h', self.buf, i, value)\n", (761, 786), False, 'import struct\n'), ((834, 875), 'struct.pack_into', 'struct.pack_into', (['"""H"""', 'self.buf', 'i', 'value'], {}), "('H', self.buf, i, value)\n", (850, 875), False, 'import struct\n'), ((922, 963), 'struct.pack_into', 'struct.pack_into', (['"""i"""', 'self.buf', 'i', 'value'], {}), "('i', self.buf, i, value)\n", (938, 963), False, 'import struct\n'), ((1011, 1052), 'struct.pack_into', 'struct.pack_into', (['"""I"""', 'self.buf', 'i', 'value'], {}), "('I', self.buf, i, value)\n", (1027, 1052), False, 'import struct\n'), ((1099, 1140), 'struct.pack_into', 'struct.pack_into', (['"""q"""', 'self.buf', 'i', 'value'], {}), "('q', self.buf, i, value)\n", (1115, 1140), False, 'import struct\n'), ((1188, 1229), 'struct.pack_into', 'struct.pack_into', (['"""Q"""', 'self.buf', 'i', 'value'], {}), "('Q', self.buf, i, value)\n", (1204, 1229), False, 'import struct\n'), ((1278, 1319), 'struct.pack_into', 'struct.pack_into', (['"""f"""', 'self.buf', 'i', 'value'], {}), "('f', self.buf, i, value)\n", (1294, 1319), False, 'import struct\n'), ((1368, 1409), 'struct.pack_into', 'struct.pack_into', (['"""d"""', 'self.buf', 'i', 'value'], {}), "('d', self.buf, i, value)\n", (1384, 1409), False, 'import struct\n'), ((1582, 1634), 'struct.pack_into', 'struct.pack_into', (['"""B"""', 'self.buf', 'byteoffset', 'current'], {}), "('B', self.buf, byteoffset, current)\n", (1598, 1634), False, 'import struct\n'), ((2286, 2334), 'capnpy.ptr.new_struct', 'ptr.new_struct', (['(offet // 8)', 'data_size', 'ptrs_size'], {}), '(offet // 8, data_size, ptrs_size)\n', (2300, 2334), False, 'from capnpy import ptr\n'), ((2640, 2673), 'capnpy.ptr.round_up_to_word', 'ptr.round_up_to_word', (['body_length'], {}), '(body_length)\n', (2660, 2673), False, 'from capnpy import ptr\n'), ((2763, 2809), 'capnpy.ptr.new_list', 'ptr.new_list', (['(offet // 8)', 'size_tag', 'item_count'], {}), '(offet // 8, size_tag, item_count)\n', (2775, 2809), False, 'from capnpy import ptr\n'), ((3834, 3878), 'capnpy.segment._copy_pointer.copy_pointer', 'copy_pointer', (['src', 'p', 'src_pos', 'self', 'dst_pos'], {}), '(src, p, src_pos, self, dst_pos)\n', (3846, 3878), False, 'from capnpy.segment._copy_pointer import copy_pointer, _copy_struct_inline\n'), ((4202, 4253), 'capnpy.segment._copy_pointer._copy_struct_inline', '_copy_struct_inline', (['src', 'p', 'src_pos', 'self', 'dst_pos'], {}), '(src, p, src_pos, self, dst_pos)\n', (4221, 4253), False, 'from capnpy.segment._copy_pointer import copy_pointer, _copy_struct_inline\n'), ((4321, 4362), 'capnpy.segment._copy_list.copy_from_list', 'copy_from_list', (['self', 'pos', 'item_type', 'lst'], {}), '(self, pos, item_type, lst)\n', (4335, 4362), False, 'from capnpy.segment._copy_list import copy_from_list\n'), ((491, 502), 'capnpy.packing.mychr', 'mychr', (['ifmt'], {}), '(ifmt)\n', (496, 502), False, 'from capnpy.packing import mychr\n'), ((1485, 1530), 'struct.unpack_from', 'struct.unpack_from', (['"""B"""', 'self.buf', 'byteoffset'], {}), "('B', self.buf, byteoffset)\n", (1503, 1530), False, 'import struct\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-14 07:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='data',
name='address',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='data',
name='age',
field=models.IntegerField(default=22),
),
migrations.AlterField(
model_name='data',
name='name',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='data',
name='price',
field=models.IntegerField(default=10),
),
migrations.AlterField(
model_name='data',
name='type',
field=models.IntegerField(default=0),
),
]
|
[
"django.db.models.TextField",
"django.db.models.IntegerField"
] |
[((389, 417), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (405, 417), False, 'from django.db import migrations, models\n'), ((534, 565), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(22)'}), '(default=22)\n', (553, 565), False, 'from django.db import migrations, models\n'), ((683, 711), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (699, 711), False, 'from django.db import migrations, models\n'), ((830, 861), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(10)'}), '(default=10)\n', (849, 861), False, 'from django.db import migrations, models\n'), ((979, 1009), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (998, 1009), False, 'from django.db import migrations, models\n')]
|
import unittest.mock as mock
from app import utils
@mock.patch("app.utils.get_current_timestamp")
def test_generate_filename_generates_formatted_timestamp(mock_timestamp):
mock_timestamp.return_value = 1_555_555_555.555_555
filename = utils.generate_filename()
assert mock_timestamp.called is True
assert filename == "20190417194555.json"
|
[
"app.utils.generate_filename",
"unittest.mock.patch"
] |
[((55, 100), 'unittest.mock.patch', 'mock.patch', (['"""app.utils.get_current_timestamp"""'], {}), "('app.utils.get_current_timestamp')\n", (65, 100), True, 'import unittest.mock as mock\n'), ((247, 272), 'app.utils.generate_filename', 'utils.generate_filename', ([], {}), '()\n', (270, 272), False, 'from app import utils\n')]
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikipediaToxicitySubtypes from Jigsaw Toxic Comment Classification Challenge."""
import csv
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@inproceedings{10.1145/3038912.3052591,
author = {<NAME> and <NAME> and <NAME>},
title = {Ex Machina: Personal Attacks Seen at Scale},
year = {2017},
isbn = {9781450349130},
publisher = {International World Wide Web Conferences Steering Committee},
address = {Republic and Canton of Geneva, CHE},
url = {https://doi.org/10.1145/3038912.3052591},
doi = {10.1145/3038912.3052591},
booktitle = {Proceedings of the 26th International Conference on World Wide Web},
pages = {1391-1399},
numpages = {9},
keywords = {online discussions, wikipedia, online harassment},
location = {Perth, Australia},
series = {WWW '17}
}
"""
_DESCRIPTION = """
This version of the Wikipedia Toxicity Subtypes dataset provides access to the
primary toxicity label, as well the five toxicity subtype labels annotated by
crowd workers. The toxicity and toxicity subtype labels are binary values
(0 or 1) indicating whether the majority of annotators assigned that
attributes to the comment text.
The comments in this dataset come from an archive of Wikipedia talk pages
comments. These have been annotated by Jigsaw for toxicity, as well as a variety
of toxicity subtypes, including severe toxicity, obscenity, threatening
language, insulting language, and identity attacks. This dataset is a replica of
the data released for the Jigsaw Toxic Comment Classification Challenge on
Kaggle, with the training set unchanged, and the test dataset merged with the
test_labels released after the end of the competition. Test data not used for
scoring has been dropped. This dataset is released under CC0, as is the
underlying comment text.
See the Kaggle documentation or
https://figshare.com/articles/Wikipedia_Talk_Labels_Toxicity/4563973 for more
details.
"""
_DOWNLOAD_URL = 'https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/wikipedia_toxicity_subtypes.zip'
class WikipediaToxicitySubtypes(tfds.core.GeneratorBasedBuilder):
"""Classification of 220K Wikipedia talk page comments for types of toxicity.
This version of the Wikipedia Toxicity Subtypes dataset provides access to the
primary toxicity label, as well the five toxicity subtype labels annotated by
crowd workers. The toxicity and toxicity subtype labels are binary values
(0 or 1) indicating whether the majority of annotators assigned that
attributes to the comment text.
See the Kaggle documentation or
https://figshare.com/articles/Wikipedia_Talk_Labels_Toxicity/4563973 for more
details.
"""
VERSION = tfds.core.Version('0.2.0')
RELEASE_NOTES = {
'0.2.0': 'Updated features for consistency with CivilComments dataset.',
}
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'text': tfds.features.Text(),
'toxicity': tf.float32,
'severe_toxicity': tf.float32,
'obscene': tf.float32,
'threat': tf.float32,
'insult': tf.float32,
'identity_attack': tf.float32,
}),
supervised_keys=('text', 'toxicity'),
homepage='https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'filename': os.path.join(dl_path, 'wikidata_train.csv')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={'filename': os.path.join(dl_path, 'wikidata_test.csv')},
),
]
def _generate_examples(self, filename):
"""Yields examples.
Each example contains a text input and then six annotation labels.
Args:
filename: the path of the file to be read for this split.
Yields:
A dictionary of features, all floating point except the input text.
"""
with tf.io.gfile.GFile(filename) as f:
reader = csv.DictReader(f)
for row in reader:
example = {}
example['text'] = row['comment_text']
example['toxicity'] = float(row['toxic'])
example['severe_toxicity'] = float(row['severe_toxic'])
example['identity_attack'] = float(row['identity_hate'])
for label in ['obscene', 'threat', 'insult']:
example[label] = float(row[label])
yield row['id'], example
|
[
"csv.DictReader",
"tensorflow_datasets.public_api.features.Text",
"os.path.join",
"tensorflow_datasets.public_api.core.Version",
"tensorflow.compat.v2.io.gfile.GFile"
] |
[((3341, 3367), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""0.2.0"""'], {}), "('0.2.0')\n", (3358, 3367), True, 'import tensorflow_datasets.public_api as tfds\n'), ((4911, 4938), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.io.gfile.GFile', (['filename'], {}), '(filename)\n', (4928, 4938), True, 'import tensorflow.compat.v2 as tf\n'), ((4960, 4977), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (4974, 4977), False, 'import csv\n'), ((3647, 3667), 'tensorflow_datasets.public_api.features.Text', 'tfds.features.Text', ([], {}), '()\n', (3665, 3667), True, 'import tensorflow_datasets.public_api as tfds\n'), ((4357, 4400), 'os.path.join', 'os.path.join', (['dl_path', '"""wikidata_train.csv"""'], {}), "(dl_path, 'wikidata_train.csv')\n", (4369, 4400), False, 'import os\n'), ((4531, 4573), 'os.path.join', 'os.path.join', (['dl_path', '"""wikidata_test.csv"""'], {}), "(dl_path, 'wikidata_test.csv')\n", (4543, 4573), False, 'import os\n')]
|
import os
from functools import partial
from django.conf.urls.i18n import i18n_patterns
from django.urls import include, path, re_path
from django.utils.translation import gettext_lazy as _
from django.views import defaults, i18n, static
from . import views
base_dir = os.path.dirname(os.path.abspath(__file__))
media_dir = os.path.join(base_dir, "media")
locale_dir = os.path.join(base_dir, "locale")
urlpatterns = [
path("", views.index_page),
# Default views
path("nonexistent_url/", partial(defaults.page_not_found, exception=None)),
path("server_error/", defaults.server_error),
# a view that raises an exception for the debug view
path("raises/", views.raises),
path("raises400/", views.raises400),
path("raises400_bad_request/", views.raises400_bad_request),
path("raises403/", views.raises403),
path("raises404/", views.raises404),
path("raises500/", views.raises500),
path("custom_reporter_class_view/", views.custom_reporter_class_view),
path("technical404/", views.technical404, name="my404"),
path("classbased404/", views.Http404View.as_view()),
# i18n views
path("i18n/", include("django.conf.urls.i18n")),
path("jsi18n/", i18n.JavaScriptCatalog.as_view(packages=["view_tests"])),
path("jsi18n/app1/", i18n.JavaScriptCatalog.as_view(packages=["view_tests.app1"])),
path("jsi18n/app2/", i18n.JavaScriptCatalog.as_view(packages=["view_tests.app2"])),
path("jsi18n/app5/", i18n.JavaScriptCatalog.as_view(packages=["view_tests.app5"])),
path(
"jsi18n_english_translation/",
i18n.JavaScriptCatalog.as_view(packages=["view_tests.app0"]),
),
path(
"jsi18n_multi_packages1/",
i18n.JavaScriptCatalog.as_view(packages=["view_tests.app1", "view_tests.app2"]),
),
path(
"jsi18n_multi_packages2/",
i18n.JavaScriptCatalog.as_view(packages=["view_tests.app3", "view_tests.app4"]),
),
path(
"jsi18n_admin/",
i18n.JavaScriptCatalog.as_view(packages=["django.contrib.admin", "view_tests"]),
),
path("jsi18n_template/", views.jsi18n),
path("jsi18n_multi_catalogs/", views.jsi18n_multi_catalogs),
path("jsoni18n/", i18n.JSONCatalog.as_view(packages=["view_tests"])),
# Static views
re_path(
r"^site_media/(?P<path>.*)$",
static.serve,
{"document_root": media_dir, "show_indexes": True},
),
]
urlpatterns += i18n_patterns(
re_path(_(r"^translated/$"), views.index_page, name="i18n_prefixed"),
)
urlpatterns += [
path("template_exception/", views.template_exception, name="template_exception"),
path(
"raises_template_does_not_exist/<path:path>",
views.raises_template_does_not_exist,
name="raises_template_does_not_exist",
),
path("render_no_template/", views.render_no_template, name="render_no_template"),
re_path(
r"^test-setlang/(?P<parameter>[^/]+)/$",
views.with_parameter,
name="with_parameter",
),
# Patterns to test the technical 404.
re_path(r"^regex-post/(?P<pk>[0-9]+)/$", views.index_page, name="regex-post"),
path("path-post/<int:pk>/", views.index_page, name="path-post"),
]
|
[
"django.urls.include",
"django.utils.translation.gettext_lazy",
"os.path.join",
"django.views.i18n.JSONCatalog.as_view",
"django.views.i18n.JavaScriptCatalog.as_view",
"functools.partial",
"os.path.abspath",
"django.urls.re_path",
"django.urls.path"
] |
[((327, 358), 'os.path.join', 'os.path.join', (['base_dir', '"""media"""'], {}), "(base_dir, 'media')\n", (339, 358), False, 'import os\n'), ((372, 404), 'os.path.join', 'os.path.join', (['base_dir', '"""locale"""'], {}), "(base_dir, 'locale')\n", (384, 404), False, 'import os\n'), ((288, 313), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (303, 313), False, 'import os\n'), ((426, 452), 'django.urls.path', 'path', (['""""""', 'views.index_page'], {}), "('', views.index_page)\n", (430, 452), False, 'from django.urls import include, path, re_path\n'), ((558, 602), 'django.urls.path', 'path', (['"""server_error/"""', 'defaults.server_error'], {}), "('server_error/', defaults.server_error)\n", (562, 602), False, 'from django.urls import include, path, re_path\n'), ((665, 694), 'django.urls.path', 'path', (['"""raises/"""', 'views.raises'], {}), "('raises/', views.raises)\n", (669, 694), False, 'from django.urls import include, path, re_path\n'), ((700, 735), 'django.urls.path', 'path', (['"""raises400/"""', 'views.raises400'], {}), "('raises400/', views.raises400)\n", (704, 735), False, 'from django.urls import include, path, re_path\n'), ((741, 800), 'django.urls.path', 'path', (['"""raises400_bad_request/"""', 'views.raises400_bad_request'], {}), "('raises400_bad_request/', views.raises400_bad_request)\n", (745, 800), False, 'from django.urls import include, path, re_path\n'), ((806, 841), 'django.urls.path', 'path', (['"""raises403/"""', 'views.raises403'], {}), "('raises403/', views.raises403)\n", (810, 841), False, 'from django.urls import include, path, re_path\n'), ((847, 882), 'django.urls.path', 'path', (['"""raises404/"""', 'views.raises404'], {}), "('raises404/', views.raises404)\n", (851, 882), False, 'from django.urls import include, path, re_path\n'), ((888, 923), 'django.urls.path', 'path', (['"""raises500/"""', 'views.raises500'], {}), "('raises500/', views.raises500)\n", (892, 923), False, 'from django.urls import include, path, re_path\n'), ((929, 998), 'django.urls.path', 'path', (['"""custom_reporter_class_view/"""', 'views.custom_reporter_class_view'], {}), "('custom_reporter_class_view/', views.custom_reporter_class_view)\n", (933, 998), False, 'from django.urls import include, path, re_path\n'), ((1004, 1059), 'django.urls.path', 'path', (['"""technical404/"""', 'views.technical404'], {'name': '"""my404"""'}), "('technical404/', views.technical404, name='my404')\n", (1008, 1059), False, 'from django.urls import include, path, re_path\n'), ((2073, 2111), 'django.urls.path', 'path', (['"""jsi18n_template/"""', 'views.jsi18n'], {}), "('jsi18n_template/', views.jsi18n)\n", (2077, 2111), False, 'from django.urls import include, path, re_path\n'), ((2117, 2176), 'django.urls.path', 'path', (['"""jsi18n_multi_catalogs/"""', 'views.jsi18n_multi_catalogs'], {}), "('jsi18n_multi_catalogs/', views.jsi18n_multi_catalogs)\n", (2121, 2176), False, 'from django.urls import include, path, re_path\n'), ((2275, 2381), 'django.urls.re_path', 're_path', (['"""^site_media/(?P<path>.*)$"""', 'static.serve', "{'document_root': media_dir, 'show_indexes': True}"], {}), "('^site_media/(?P<path>.*)$', static.serve, {'document_root':\n media_dir, 'show_indexes': True})\n", (2282, 2381), False, 'from django.urls import include, path, re_path\n'), ((2542, 2627), 'django.urls.path', 'path', (['"""template_exception/"""', 'views.template_exception'], {'name': '"""template_exception"""'}), "('template_exception/', views.template_exception, name='template_exception'\n )\n", (2546, 2627), False, 'from django.urls import include, path, re_path\n'), ((2628, 2760), 'django.urls.path', 'path', (['"""raises_template_does_not_exist/<path:path>"""', 'views.raises_template_does_not_exist'], {'name': '"""raises_template_does_not_exist"""'}), "('raises_template_does_not_exist/<path:path>', views.\n raises_template_does_not_exist, name='raises_template_does_not_exist')\n", (2632, 2760), False, 'from django.urls import include, path, re_path\n'), ((2792, 2877), 'django.urls.path', 'path', (['"""render_no_template/"""', 'views.render_no_template'], {'name': '"""render_no_template"""'}), "('render_no_template/', views.render_no_template, name='render_no_template'\n )\n", (2796, 2877), False, 'from django.urls import include, path, re_path\n'), ((2878, 2975), 'django.urls.re_path', 're_path', (['"""^test-setlang/(?P<parameter>[^/]+)/$"""', 'views.with_parameter'], {'name': '"""with_parameter"""'}), "('^test-setlang/(?P<parameter>[^/]+)/$', views.with_parameter, name=\n 'with_parameter')\n", (2885, 2975), False, 'from django.urls import include, path, re_path\n'), ((3050, 3126), 'django.urls.re_path', 're_path', (['"""^regex-post/(?P<pk>[0-9]+)/$"""', 'views.index_page'], {'name': '"""regex-post"""'}), "('^regex-post/(?P<pk>[0-9]+)/$', views.index_page, name='regex-post')\n", (3057, 3126), False, 'from django.urls import include, path, re_path\n'), ((3133, 3196), 'django.urls.path', 'path', (['"""path-post/<int:pk>/"""', 'views.index_page'], {'name': '"""path-post"""'}), "('path-post/<int:pk>/', views.index_page, name='path-post')\n", (3137, 3196), False, 'from django.urls import include, path, re_path\n'), ((503, 551), 'functools.partial', 'partial', (['defaults.page_not_found'], {'exception': 'None'}), '(defaults.page_not_found, exception=None)\n', (510, 551), False, 'from functools import partial\n'), ((1153, 1185), 'django.urls.include', 'include', (['"""django.conf.urls.i18n"""'], {}), "('django.conf.urls.i18n')\n", (1160, 1185), False, 'from django.urls import include, path, re_path\n'), ((1208, 1263), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['view_tests']"}), "(packages=['view_tests'])\n", (1238, 1263), False, 'from django.views import defaults, i18n, static\n'), ((1291, 1351), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['view_tests.app1']"}), "(packages=['view_tests.app1'])\n", (1321, 1351), False, 'from django.views import defaults, i18n, static\n'), ((1379, 1439), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['view_tests.app2']"}), "(packages=['view_tests.app2'])\n", (1409, 1439), False, 'from django.views import defaults, i18n, static\n'), ((1467, 1527), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['view_tests.app5']"}), "(packages=['view_tests.app5'])\n", (1497, 1527), False, 'from django.views import defaults, i18n, static\n'), ((1587, 1647), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['view_tests.app0']"}), "(packages=['view_tests.app0'])\n", (1617, 1647), False, 'from django.views import defaults, i18n, static\n'), ((1709, 1788), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['view_tests.app1', 'view_tests.app2']"}), "(packages=['view_tests.app1', 'view_tests.app2'])\n", (1739, 1788), False, 'from django.views import defaults, i18n, static\n'), ((1850, 1929), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['view_tests.app3', 'view_tests.app4']"}), "(packages=['view_tests.app3', 'view_tests.app4'])\n", (1880, 1929), False, 'from django.views import defaults, i18n, static\n'), ((1981, 2060), 'django.views.i18n.JavaScriptCatalog.as_view', 'i18n.JavaScriptCatalog.as_view', ([], {'packages': "['django.contrib.admin', 'view_tests']"}), "(packages=['django.contrib.admin', 'view_tests'])\n", (2011, 2060), False, 'from django.views import defaults, i18n, static\n'), ((2200, 2249), 'django.views.i18n.JSONCatalog.as_view', 'i18n.JSONCatalog.as_view', ([], {'packages': "['view_tests']"}), "(packages=['view_tests'])\n", (2224, 2249), False, 'from django.views import defaults, i18n, static\n'), ((2456, 2474), 'django.utils.translation.gettext_lazy', '_', (['"""^translated/$"""'], {}), "('^translated/$')\n", (2457, 2474), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
import sys
import os
import tempfile
from pathlib import Path
import pytest
sys.path.insert(1, os.path.join(sys.path[0], "../../"))
import rips
import dataroot
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_create_and_export_surface(rips_instance, initialize_test):
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert len(case.grids()) == 1
surface_collection = rips_instance.project.descendants(rips.SurfaceCollection)[0]
surface = surface_collection.new_surface(case, 5)
assert surface
with tempfile.TemporaryDirectory(prefix="rips") as tmpdirname:
path = Path(tmpdirname, "mysurface.ts")
print("Temporary folder: ", path.as_posix())
fname = surface.export_to_file(path.as_posix())
assert len(fname.values) == 1
assert path.exists()
|
[
"tempfile.TemporaryDirectory",
"os.path.join",
"sys.platform.startswith",
"pathlib.Path"
] |
[((96, 131), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../../"""'], {}), "(sys.path[0], '../../')\n", (108, 131), False, 'import os\n'), ((189, 221), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (212, 221), False, 'import sys\n'), ((701, 743), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""rips"""'}), "(prefix='rips')\n", (728, 743), False, 'import tempfile\n'), ((774, 806), 'pathlib.Path', 'Path', (['tmpdirname', '"""mysurface.ts"""'], {}), "(tmpdirname, 'mysurface.ts')\n", (778, 806), False, 'from pathlib import Path\n')]
|
"""
Entry point for the CLI
"""
import logging
import click
from samcli import __version__
from .options import debug_option
from .context import Context
from .command import BaseCommand
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
pass_context = click.make_pass_decorator(Context)
def common_options(f):
"""
Common CLI options used by all commands. Ex: --debug
:param f: Callback function passed by Click
:return: Callback function
"""
f = debug_option(f)
return f
@click.command(cls=BaseCommand)
@common_options
@click.version_option(version=__version__, prog_name="SAM CLI")
@pass_context
def cli(ctx):
"""
AWS Serverless Application Model (SAM) CLI
The AWS Serverless Application Model extends AWS CloudFormation to provide a simplified way of defining the
Amazon API Gateway APIs, AWS Lambda functions, and Amazon DynamoDB tables needed by your serverless application.
You can find more in-depth guide about the SAM specification here:
https://github.com/awslabs/serverless-application-model.
"""
pass
|
[
"logging.getLogger",
"logging.basicConfig",
"click.make_pass_decorator",
"click.version_option",
"click.command"
] |
[((199, 226), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (216, 226), False, 'import logging\n'), ((227, 333), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (246, 333), False, 'import logging\n'), ((347, 381), 'click.make_pass_decorator', 'click.make_pass_decorator', (['Context'], {}), '(Context)\n', (372, 381), False, 'import click\n'), ((599, 629), 'click.command', 'click.command', ([], {'cls': 'BaseCommand'}), '(cls=BaseCommand)\n', (612, 629), False, 'import click\n'), ((647, 709), 'click.version_option', 'click.version_option', ([], {'version': '__version__', 'prog_name': '"""SAM CLI"""'}), "(version=__version__, prog_name='SAM CLI')\n", (667, 709), False, 'import click\n')]
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/4 0004 2:09
# @Author : Gpp
# @File : obtain_url.py
from app.web import api
from flask_restful import Resource
from flask import make_response, send_from_directory, jsonify
from app.helper.encrypt import two_encrypting
from app.crud.proxy_crud import ProtocolCrud
from app.helper.get_one_encrypt import get_one_encrypt_data
from app.helper.update_subscribe import add_proxy
# @api.resource('/generate')
# class Generate(Resource):
# def get(self):
# proxies = ProtocolCrud.get_all_share()
# one_encrypt = get_one_encrypt_data(proxies)
# result = add_proxy(two_encrypting(''.join(one_encrypt)))
# return jsonify(result)
@api.resource('/generate/<proxy_information>')
class GetUrl(Resource):
def get(self, proxy_information):
# 获取代理元数据
proxies = ProtocolCrud.get_all_share()
one_encrypt = get_one_encrypt_data(proxies)
add_proxy(two_encrypting(''.join(one_encrypt)))
# 获取代理实时信息
# 获取prometheus数据存入别名
# 生成订阅链接
response = make_response(send_from_directory('url_file', f'{proxy_information}.txt', as_attachment=True))
response.headers["Content-Disposition"] = f"attachment; filename={proxy_information}.txt"
return response
|
[
"flask.send_from_directory",
"app.crud.proxy_crud.ProtocolCrud.get_all_share",
"app.web.api.resource",
"app.helper.get_one_encrypt.get_one_encrypt_data"
] |
[((735, 780), 'app.web.api.resource', 'api.resource', (['"""/generate/<proxy_information>"""'], {}), "('/generate/<proxy_information>')\n", (747, 780), False, 'from app.web import api\n'), ((879, 907), 'app.crud.proxy_crud.ProtocolCrud.get_all_share', 'ProtocolCrud.get_all_share', ([], {}), '()\n', (905, 907), False, 'from app.crud.proxy_crud import ProtocolCrud\n'), ((930, 959), 'app.helper.get_one_encrypt.get_one_encrypt_data', 'get_one_encrypt_data', (['proxies'], {}), '(proxies)\n', (950, 959), False, 'from app.helper.get_one_encrypt import get_one_encrypt_data\n'), ((1115, 1194), 'flask.send_from_directory', 'send_from_directory', (['"""url_file"""', 'f"""{proxy_information}.txt"""'], {'as_attachment': '(True)'}), "('url_file', f'{proxy_information}.txt', as_attachment=True)\n", (1134, 1194), False, 'from flask import make_response, send_from_directory, jsonify\n')]
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.contrib.auth import login
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from Aluno.views.utils import aluno_exist
from annoying.decorators import render_to
from django.contrib.auth.models import User
from Avaliacao.models import *
from Aluno.models import *
@render_to('avaliacao/exibir.html')
@aluno_exist
def exibir(request,template_id):
aluno = request.user.aluno_set.get()
avaliacao=Avaliacao.objects.get(pk=template_id)
questoes=avaliacao.questoes.all()
return locals()
|
[
"annoying.decorators.render_to"
] |
[((405, 439), 'annoying.decorators.render_to', 'render_to', (['"""avaliacao/exibir.html"""'], {}), "('avaliacao/exibir.html')\n", (414, 439), False, 'from annoying.decorators import render_to\n')]
|
from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import gsd
import gsd.fl
import numpy as np
import os
import sys
import datetime
import time
import pickle
from shutil import copyfile
import inspect
import md_tools27 as md_tools
from multiprocessing import Pool
"""
This script plots diffusion vs Gamma in log(D)-log(Gamma) or log(D)-gamma format. The data from a .dat file is used, must be precalculated by plotDiff_pG_parallel.py.
Arguments: --cmfree, --cmfixed for the free-moving center of mass regime, and v_cm subtracted respectively.
--sf <fubfolder>: subfolder to process (e.g. p32)
--NP <number>: number of subprocesses to use for parallelization. Very efficient acceleration by a factor of <number>.
"""
#Use LaTex for text
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('text', usetex=True)
def read_log(path):
coulomb_status = ''
with open(path + '/log.txt', 'r') as f:
for i, line in enumerate(f.readlines()):
if i == 0:
timestamp = line.rstrip()
if line[:10] == '# Periodic':
words = line.split(' ')
p = int(words[9])
A = float(words[6])
if line[:4] == '# a ':
words = line.split(' ')
repeat_x = int(words[6])
repeat_y = int(words[9])
Np = 2*repeat_x*repeat_y
if line[:7] == '# Gamma':
words = line.split(' ')
dt = float(words[9])
if line[:9] == '# Coulomb':
words = line.split(' ')
coulomb_status = words[-1]
if line[:9] == '# N_therm':
words = line.split(' ')
snap_period = int(float(words[5]))
# T_gamma = 31.8265130646
if line[:9] == '# T_gamma':
words = line.split(' ')
T_gamma = float(words[3])
return {'timestamp': timestamp,'A':A, 'p':p, 'Np': Np, 'coulomb_status':coulomb_status, 'snap_period':snap_period,\
'dt':dt, 'T_gamma':T_gamma}
def OLS(x, y):
'''OLS: x must be a vertical two-dimensional array'''
X = np.hstack((np.reshape(np.ones(x.shape[0]), (-1,1)), x))#.transpose()
Xpr = X.transpose()
beta = np.dot(np.dot(np.linalg.inv(np.dot(Xpr, X)), Xpr), y)
#Estimate errors
sigma_sq = np.dot(y - np.dot(X, beta), y - np.dot(X, beta))/(len(y) - 1.)
sigma_beta_sq = sigma_sq*np.linalg.inv(np.dot(Xpr, X))
return beta, sigma_beta_sq # = [f_0, df/d(A^2)]
def diffusion_from_transport_gsd(folder_path, f_name, center_fixed = True, useframes = -1):
"""
Diffusion constant D is calculated from 4Dt = <(r(t) - r(0))^2>, or 2D_x*t = <(x(t) - x(0))^2>.
The average is calculated over all particles and over different time origins.
Time origins go from 0 to n_frames/2, and t goes from 0 to n_frames/2. This way,
the data are always within the trajectory.
center_fixed = True: eliminate oveall motion of center of mass
return D_x, D_y
D_x, D_y diffusion for x- and y-coordinates;
"""
params = read_log(folder_path)
if folder_path[-1] != '/':
folder_path = folder_path + '/'
with gsd.fl.GSDFile(folder_path + f_name, 'rb') as f:
n_frames = f.nframes
box = f.read_chunk(frame=0, name='configuration/box')
half_frames = int(n_frames/2) - 1 #sligtly less than half to avoid out of bound i
if useframes < 1 or useframes > half_frames:
useframes = half_frames
t_step = f.read_chunk(frame=0, name='configuration/step')
n_p = f.read_chunk(frame=0, name='particles/N')
x_sq_av = np.zeros(useframes)
y_sq_av = np.zeros(useframes)
for t_origin in range(n_frames - useframes - 1):
pos_0 = f.read_chunk(frame=t_origin, name='particles/position')
mean_pos_0 = np.mean(pos_0, axis = 0)
pos = pos_0
pos_raw = pos_0
for j_frame in range(useframes):
pos_m1 = pos
pos_m1_raw = pos_raw
pos_raw = f.read_chunk(frame=j_frame + t_origin, name='particles/position') - pos_0
pos = md_tools.correct_jumps(pos_raw, pos_m1, pos_m1_raw, box[0], box[1])
if center_fixed:
pos -= np.mean(pos, axis = 0) - mean_pos_0 #correct for center of mass movement
x_sq_av[j_frame] += np.mean(pos[:,0]**2)
y_sq_av[j_frame] += np.mean(pos[:,1]**2)
x_sq_av /= (n_frames - useframes - 1)
y_sq_av /= (n_frames - useframes - 1)
# OLS estimate for beta_x[0] + beta_x[1]*t = <|x_i(t) - x_i(0)|^2>
a = np.ones((useframes, 2)) # matrix a = ones(half_frames) | (0; dt; 2dt; 3dt; ...)
a[:,1] = params['snap_period']*params['dt']*np.cumsum(np.ones(useframes), axis = 0) - params['dt']
b_cutoff = int(useframes/10) #cutoff to get only linear part of x_sq_av, makes results a bit more clean
beta_x = np.linalg.lstsq(a[b_cutoff:, :], x_sq_av[b_cutoff:], rcond=-1)
beta_y = np.linalg.lstsq(a[b_cutoff:, :], y_sq_av[b_cutoff:], rcond=-1)
fig, ax = plt.subplots(1,1, figsize=(7,5))
ax.scatter(a[:,1], x_sq_av, label='$\\langle x^2\\rangle$')
ax.scatter(a[:,1], y_sq_av, label='$\\langle y^2\\rangle$')
ax.legend(loc=7)
ax.set_xlabel('$t$')
ax.set_ylabel('$\\langle r_i^2 \\rangle$')
if center_fixed:
center_fixed_str = 'cm_fixed'
else:
center_fixed_str = 'cm_free'
fig.savefig(folder_path + 'r2_diff_' + f_name +'_' + center_fixed_str + '.png')
plt.close('all')
D_x = beta_x[0][1]/2
D_y = beta_y[0][1]/2
print('D_x = {}'.format(D_x))
print('D_y = {}'.format(D_y))
return (D_x, D_y)
def diffusion_helper(arg_dict):
return diffusion_from_transport_gsd(arg_dict['sf'], arg_dict['fname'], center_fixed=arg_dict['center_fixed'], useframes = arg_dict['useframes'])
def Teff_from_gsd(args):
fpath = args['sf'] + '/' + args['fname']
with gsd.fl.GSDFile(fpath, 'rb') as f:
n_frames = f.nframes
N = f.read_chunk(frame=0, name='particles/N')
v = np.zeros((n_frames, int(N), 2))
for t in range(n_frames):
v_t = f.read_chunk(frame=t, name='particles/velocity')
v[t, :, 0] = v_t[:,0]
v[t, :, 1] = v_t[:,1]
#v_cm = np.mean(v, axis=1)
#mean_v_cmx = np.mean(v_cm[:,0])
#print("mean v_cm = {}".format(mean_v_cmx))
#sigma_v_cmx = np.sqrt(np.mean((v_cm[:,0] - mean_v_cmx)**2))/np.sqrt(n_frames)
#print("error = {}".format(sigma_v_cmx))
#mean_v_cmy = np.mean(v_cm[:,1])
#print("mean v_cm_y = {}".format(mean_v_cmy))
#sigma_v_cmy = np.sqrt(np.mean((v_cm[:,1] - mean_v_cmy)**2))/np.sqrt(n_frames)
#print("error_y = {}".format(sigma_v_cmy))
#v_rel = np.swapaxes(v, 0,1) - v_cm
v_swap = np.swapaxes(v, 0,1)
#T_eff = 0.5*np.mean(v_rel[:,:,0]**2 + v_rel[:,:,1]**2, axis = 0)
T_eff = 0.5*np.mean(v_swap[:,:,0]**2 + v_swap[:,:,1]**2, axis = 0)
print('T_eff = {}'.format(np.mean(T_eff)))
return np.mean(T_eff)
def print_help():
print('This script plots diffusion vs Gamma for data taken in diffusion measurements.')
print('===========================================================')
print('Usage: python plotDiff_pG.py diffusion_data/a32x32_* [--options]')
print('This will process all folders that match mobility_data/a32x32_*')
print('===========================================================')
print('Options:')
print('\t--cmfixed will subtract the displacement of the center of mass in diffusion calculation (default behavior)')
print('\t--cmfree will NOT subtract the displacement of the center of mass in diffusion calculation (default behavior)')
print('\t--showtext will print text info on the plots')
print('\t--NP N - will use N parallel processes in the calculations')
print('\t--sf [subfolder] - will only process the specified subfolder in all folders')
print('\t--help or -h will print this help')
## =======================================================================
# Units
unit_M = 9.10938356e-31 # kg, electron mass
unit_D = 1e-6 # m, micron
unit_E = 1.38064852e-23 # m^2*kg/s^2
unit_t = np.sqrt(unit_M*unit_D**2/unit_E) # = 2.568638150515e-10 s
epsilon_0 = 8.854187817e-12 # F/m = C^2/(J*m), vacuum permittivity
hbar = 1.0545726e-27/(unit_E*1e7)/unit_t
m_e = 9.10938356e-31/unit_M
unit_Q = np.sqrt(unit_E*1e7*unit_D*1e2) # Coulombs
unit_Qe = unit_Q/4.8032068e-10 # e, unit charge in units of elementary charge e
e_charge = 1/unit_Qe # electron charge in units of unit_Q
curr_fname = inspect.getfile(inspect.currentframe())
curr_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
##=======================================================================
# Make a list of folders we want to process
cm_fixed = True #default that can be changed by --cmfree
cm_fixed_str = 'cm_fixed'
show_text = False
Nproc = 1
selected_subfolders = []
folder_list = []
for i in range(len(sys.argv)):
if os.path.isdir(sys.argv[i]):
folder_list.append(sys.argv[i])
elif sys.argv[i] == '--sf':
try:
selected_subfolders.append(sys.argv[i+1])
except:
raise RuntimeError('Could not recognize the value of --sf. argv={}'.format(argv))
elif sys.argv[i] == '--showtext':
show_text = True
elif sys.argv[i] == '--GC':
gamma_c = float(sys.argv[i+1])
elif sys.argv[i] == '--help' or sys.argv[i] == '-h':
print_help()
exit()
try:
print('Gamma_c = {}'.format(gamma_c))
except:
raise RuntimeError('Gamma_c not specified. Use --GC argument.')
print('Selected subfolders: {}'.format(selected_subfolders))
# Make a list of subfolders p### in each folders
subfolder_lists = []
for folder in folder_list:
sf_list = []
for item in os.walk(folder):
# subfolder name and contained files
sf_list.append((item[0], item[2]))
sf_list = sf_list[1:]
subfolder_lists.append(sf_list)
##=======================================================================
for ifold, folder in enumerate(folder_list):
print('==========================================================')
print(folder)
print('==========================================================')
# Keep only selected subfolders in the list is there is selection
if len(selected_subfolders) > 0:
sf_lists_to_go = []
for isf, sf in enumerate(subfolder_lists[ifold]):
sf_words = sf[0].split('/')
if sf_words[-1] in selected_subfolders:
sf_lists_to_go.append(sf)
else:
sf_lists_to_go = subfolder_lists[ifold]
for isf, sf in enumerate(sf_lists_to_go):
sf_words = sf[0].split('/')
print(sf_words[-1])
if sf_words[-1][0] != 'p':
raise ValueError("Expected subfolder name to start with `p`, in {}".format(fname))
log_data = read_log(sf[0])
folder_name = folder.split('/')[-1]
if sf[0][-1] == '/':
sf[0] = sf[0][:-1]
sf_name = sf[0].split('/')[-1]
#Read Dx Dy vs Gamma from the .dat file
#DxDy_data = {'Dx_arr':Dx_arr, 'Dy_arr':Dy_arr, 'Dx_arr_gauss': Dx_arr*cm2s_convert, 'Dy_arr_gauss':Dy_arr*cm2s_convert, \
# 'gamma_arr':gamma_arr, 'gamma_eff_arr':gamma_eff_arr}
cm_fixed_str = 'cm_fixed'
with open(sf[0] + '/DxDy_data_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.dat', 'r') as ff:
DxDy_data = pickle.load(ff)
Dx_arr = DxDy_data['Dx_arr']
Dy_arr = DxDy_data['Dy_arr']
gamma_eff_arr = DxDy_data['gamma_eff_arr']
# Remove points where gamma > gamma_c
clip_ind = np.where(gamma_eff_arr < gamma_c)[0]
Dx_arr_clip = Dx_arr[clip_ind]
Dy_arr_clip = Dy_arr[clip_ind]
gamma_arr_clip = gamma_eff_arr[clip_ind]
print('Dx_arr = {}'.format(Dx_arr_clip))
print('Dy_arr = {}'.format(Dy_arr_clip))
## ======================================================================
## Plot Dx,Dy vs effective G (calculated from data rather then read from the log)
# in Gaussian units
labelfont = 28
tickfont = labelfont - 4
legendfont = labelfont - 4
cm2s_convert = unit_D**2/unit_t*1e4
fig, ax1 = plt.subplots(1,1, figsize=(7,6))
scatter1 = ax1.scatter(gamma_arr_clip, np.log(Dx_arr_clip*cm2s_convert), label='$D_\\perp$', color = 'green', marker='o')
ax1.set_xlabel('$\\Gamma$', fontsize=labelfont)
ax1.set_ylabel('$\\log(D/D_0)$', fontsize=labelfont)
scatter2 = ax1.scatter(gamma_arr_clip, np.log(Dy_arr_clip*cm2s_convert), label='$D_\\parallel$', color = 'red', marker='s')
#ax1.set_xlim([np.min(gamma_eff_arr) - 2, np.max(gamma_eff_arr) + 2])
ax1.legend(loc=1, fontsize=legendfont)
ax1.tick_params(labelsize= tickfont)
ax1.locator_params(nbins=6, axis='y')
formatter = mticker.ScalarFormatter(useMathText=True)
formatter.set_powerlimits((-3,2))
ax1.yaxis.set_major_formatter(formatter)
#Place text
if show_text:
text_list = ['$\\Gamma_c = {:.1f}$'.format(gamma_c)]
y_lim = ax1.get_ylim()
x_lim = ax1.get_xlim()
h = y_lim[1] - y_lim[0]
w = x_lim[1] - x_lim[0]
text_x = x_lim[0] + 0.5*w
text_y = y_lim[1] - 0.05*h
if type(text_list) == list:
n_str = len(text_list)
for i_fig in range(n_str):
ax1.text(text_x, text_y - 0.05*h*i_fig, text_list[i_fig])
elif type(text_list) == str:
ax1.text(text_x, text_y, text_list)
else:
raise TypeError('text_list must be a list of strings or a string')
#fig.patch.set_alpha(alpha=1)
plt.tight_layout()
fig.savefig(folder + '/' + 'DxDy_G_log_' + sf_name + '_' + folder_name + '_{:.2f}'.format(gamma_c) + '.pdf')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.png')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.eps')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.pdf')
plt.close('all')
|
[
"numpy.sqrt",
"numpy.log",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.rc",
"os.walk",
"numpy.mean",
"md_tools27.correct_jumps",
"numpy.where",
"matplotlib.pyplot.close",
"numpy.dot",
"os.path.isdir",
"numpy.linalg.lstsq",
"numpy.ones",
"matplotlib.use",
"pickle.load",
"inspect.currentframe",
"numpy.swapaxes",
"gsd.fl.GSDFile",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] |
[((59, 73), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (66, 73), True, 'import matplotlib as mpl\n'), ((880, 949), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\n", (882, 949), False, 'from matplotlib import rc\n'), ((947, 970), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (949, 970), False, 'from matplotlib import rc\n'), ((8605, 8643), 'numpy.sqrt', 'np.sqrt', (['(unit_M * unit_D ** 2 / unit_E)'], {}), '(unit_M * unit_D ** 2 / unit_E)\n', (8612, 8643), True, 'import numpy as np\n'), ((8812, 8857), 'numpy.sqrt', 'np.sqrt', (['(unit_E * 10000000.0 * unit_D * 100.0)'], {}), '(unit_E * 10000000.0 * unit_D * 100.0)\n', (8819, 8857), True, 'import numpy as np\n'), ((4963, 4986), 'numpy.ones', 'np.ones', (['(useframes, 2)'], {}), '((useframes, 2))\n', (4970, 4986), True, 'import numpy as np\n'), ((5270, 5332), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['a[b_cutoff:, :]', 'x_sq_av[b_cutoff:]'], {'rcond': '(-1)'}), '(a[b_cutoff:, :], x_sq_av[b_cutoff:], rcond=-1)\n', (5285, 5332), True, 'import numpy as np\n'), ((5347, 5409), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['a[b_cutoff:, :]', 'y_sq_av[b_cutoff:]'], {'rcond': '(-1)'}), '(a[b_cutoff:, :], y_sq_av[b_cutoff:], rcond=-1)\n', (5362, 5409), True, 'import numpy as np\n'), ((5431, 5465), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 5)'}), '(1, 1, figsize=(7, 5))\n', (5443, 5465), True, 'import matplotlib.pyplot as plt\n'), ((5890, 5906), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5899, 5906), True, 'import matplotlib.pyplot as plt\n'), ((7189, 7209), 'numpy.swapaxes', 'np.swapaxes', (['v', '(0)', '(1)'], {}), '(v, 0, 1)\n', (7200, 7209), True, 'import numpy as np\n'), ((7412, 7426), 'numpy.mean', 'np.mean', (['T_eff'], {}), '(T_eff)\n', (7419, 7426), True, 'import numpy as np\n'), ((9030, 9052), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (9050, 9052), False, 'import inspect\n'), ((9462, 9488), 'os.path.isdir', 'os.path.isdir', (['sys.argv[i]'], {}), '(sys.argv[i])\n', (9475, 9488), False, 'import os\n'), ((10307, 10322), 'os.walk', 'os.walk', (['folder'], {}), '(folder)\n', (10314, 10322), False, 'import os\n'), ((3473, 3515), 'gsd.fl.GSDFile', 'gsd.fl.GSDFile', (['(folder_path + f_name)', '"""rb"""'], {}), "(folder_path + f_name, 'rb')\n", (3487, 3515), False, 'import gsd\n'), ((3940, 3959), 'numpy.zeros', 'np.zeros', (['useframes'], {}), '(useframes)\n', (3948, 3959), True, 'import numpy as np\n'), ((3979, 3998), 'numpy.zeros', 'np.zeros', (['useframes'], {}), '(useframes)\n', (3987, 3998), True, 'import numpy as np\n'), ((6325, 6352), 'gsd.fl.GSDFile', 'gsd.fl.GSDFile', (['fpath', '"""rb"""'], {}), "(fpath, 'rb')\n", (6339, 6352), False, 'import gsd\n'), ((7297, 7357), 'numpy.mean', 'np.mean', (['(v_swap[:, :, 0] ** 2 + v_swap[:, :, 1] ** 2)'], {'axis': '(0)'}), '(v_swap[:, :, 0] ** 2 + v_swap[:, :, 1] ** 2, axis=0)\n', (7304, 7357), True, 'import numpy as np\n'), ((12908, 12942), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 6)'}), '(1, 1, figsize=(7, 6))\n', (12920, 12942), True, 'import matplotlib.pyplot as plt\n'), ((13577, 13618), 'matplotlib.ticker.ScalarFormatter', 'mticker.ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (13600, 13618), True, 'import matplotlib.ticker as mticker\n'), ((14540, 14558), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14556, 14558), True, 'import matplotlib.pyplot as plt\n'), ((15020, 15036), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (15029, 15036), True, 'import matplotlib.pyplot as plt\n'), ((2635, 2649), 'numpy.dot', 'np.dot', (['Xpr', 'X'], {}), '(Xpr, X)\n', (2641, 2649), True, 'import numpy as np\n'), ((4160, 4182), 'numpy.mean', 'np.mean', (['pos_0'], {'axis': '(0)'}), '(pos_0, axis=0)\n', (4167, 4182), True, 'import numpy as np\n'), ((7383, 7397), 'numpy.mean', 'np.mean', (['T_eff'], {}), '(T_eff)\n', (7390, 7397), True, 'import numpy as np\n'), ((9115, 9137), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (9135, 9137), False, 'import inspect\n'), ((12043, 12058), 'pickle.load', 'pickle.load', (['ff'], {}), '(ff)\n', (12054, 12058), False, 'import pickle\n'), ((12254, 12287), 'numpy.where', 'np.where', (['(gamma_eff_arr < gamma_c)'], {}), '(gamma_eff_arr < gamma_c)\n', (12262, 12287), True, 'import numpy as np\n'), ((12989, 13023), 'numpy.log', 'np.log', (['(Dx_arr_clip * cm2s_convert)'], {}), '(Dx_arr_clip * cm2s_convert)\n', (12995, 13023), True, 'import numpy as np\n'), ((13239, 13273), 'numpy.log', 'np.log', (['(Dy_arr_clip * cm2s_convert)'], {}), '(Dy_arr_clip * cm2s_convert)\n', (13245, 13273), True, 'import numpy as np\n'), ((2352, 2371), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (2359, 2371), True, 'import numpy as np\n'), ((2464, 2478), 'numpy.dot', 'np.dot', (['Xpr', 'X'], {}), '(Xpr, X)\n', (2470, 2478), True, 'import numpy as np\n'), ((2539, 2554), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (2545, 2554), True, 'import numpy as np\n'), ((2560, 2575), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (2566, 2575), True, 'import numpy as np\n'), ((4477, 4544), 'md_tools27.correct_jumps', 'md_tools.correct_jumps', (['pos_raw', 'pos_m1', 'pos_m1_raw', 'box[0]', 'box[1]'], {}), '(pos_raw, pos_m1, pos_m1_raw, box[0], box[1])\n', (4499, 4544), True, 'import md_tools27 as md_tools\n'), ((4717, 4740), 'numpy.mean', 'np.mean', (['(pos[:, 0] ** 2)'], {}), '(pos[:, 0] ** 2)\n', (4724, 4740), True, 'import numpy as np\n'), ((4775, 4798), 'numpy.mean', 'np.mean', (['(pos[:, 1] ** 2)'], {}), '(pos[:, 1] ** 2)\n', (4782, 4798), True, 'import numpy as np\n'), ((5102, 5120), 'numpy.ones', 'np.ones', (['useframes'], {}), '(useframes)\n', (5109, 5120), True, 'import numpy as np\n'), ((4607, 4627), 'numpy.mean', 'np.mean', (['pos'], {'axis': '(0)'}), '(pos, axis=0)\n', (4614, 4627), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
from conans.errors import ConanException
import os
import shutil
def sort_libs(correct_order, libs, lib_suffix='', reverse_result=False):
# Add suffix for correct string matching
correct_order[:] = [s.__add__(lib_suffix) for s in correct_order]
result = []
for expectedLib in correct_order:
for lib in libs:
if expectedLib == lib:
result.append(lib)
if reverse_result:
# Linking happens in reversed order
result.reverse()
return result
class CorradeConan(ConanFile):
name = "corrade"
version = "2019.10"
description = "Corrade is a multiplatform utility library written \
in C++11/C++14. It's used as a base for the Magnum \
graphics engine, among other things."
# topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library
topics = ("conan", "corrad", "magnum", "filesystem", "console", "environment", "os")
url = "https://github.com/mosra/corrade"
homepage = "https://magnum.graphics/corrade"
author = "helmesjo <<EMAIL>>"
license = "MIT" # Indicates license type of the packaged library; please use SPDX Identifiers https://spdx.org/licenses/
exports = ["COPYING"]
exports_sources = ["CMakeLists.txt", "src/*", "package/conan/*", "modules/*"]
generators = "cmake"
short_paths = True # Some folders go out of the 260 chars path length scope (windows)
# Options may need to change depending on the packaged library.
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"build_deprecated": [True, False],
"with_interconnect": [True, False],
"with_pluginmanager": [True, False],
"with_rc": [True, False],
"with_testsuite": [True, False],
"with_utility": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"build_deprecated": True,
"with_interconnect": True,
"with_pluginmanager": True,
"with_rc": True,
"with_testsuite": True,
"with_utility": True,
}
_build_subfolder = "build_subfolder"
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def configure(self):
if self.settings.compiler == 'Visual Studio' and int(self.settings.compiler.version.value) < 14:
raise ConanException("{} requires Visual Studio version 14 or greater".format(self.name))
def source(self):
# Wrap the original CMake file to call conan_basic_setup
shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt")
shutil.move(os.path.join("package", "conan", "CMakeLists.txt"), "CMakeLists.txt")
def _configure_cmake(self):
cmake = CMake(self)
def add_cmake_option(option, value):
var_name = "{}".format(option).upper()
value_str = "{}".format(value)
var_value = "ON" if value_str == 'True' else "OFF" if value_str == 'False' else value_str
cmake.definitions[var_name] = var_value
for option, value in self.options.items():
add_cmake_option(option, value)
# Corrade uses suffix on the resulting 'lib'-folder when running cmake.install()
# Set it explicitly to empty, else Corrade might set it implicitly (eg. to "64")
add_cmake_option("LIB_SUFFIX", "")
add_cmake_option("BUILD_STATIC", not self.options.shared)
if self.settings.compiler == 'Visual Studio':
add_cmake_option("MSVC2015_COMPATIBILITY", int(self.settings.compiler.version.value) == 14)
add_cmake_option("MSVC2017_COMPATIBILITY", int(self.settings.compiler.version.value) == 17)
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("COPYING", dst="licenses", src=".")
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
# See dependency order here: https://doc.magnum.graphics/magnum/custom-buildsystems.html
allLibs = [
#1
"CorradeUtility",
"CorradeContainers",
#2
"CorradeInterconnect",
"CorradePluginManager",
"CorradeTestSuite",
]
# Sort all built libs according to above, and reverse result for correct link order
suffix = '-d' if self.settings.build_type == "Debug" else ''
builtLibs = tools.collect_libs(self)
self.cpp_info.libs = sort_libs(correct_order=allLibs, libs=builtLibs, lib_suffix=suffix, reverse_result=True)
|
[
"conans.tools.collect_libs",
"conans.CMake",
"os.path.join",
"shutil.move"
] |
[((2766, 2821), 'shutil.move', 'shutil.move', (['"""CMakeLists.txt"""', '"""CMakeListsOriginal.txt"""'], {}), "('CMakeLists.txt', 'CMakeListsOriginal.txt')\n", (2777, 2821), False, 'import shutil\n'), ((2961, 2972), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (2966, 2972), False, 'from conans import ConanFile, CMake, tools\n'), ((4758, 4782), 'conans.tools.collect_libs', 'tools.collect_libs', (['self'], {}), '(self)\n', (4776, 4782), False, 'from conans import ConanFile, CMake, tools\n'), ((2842, 2892), 'os.path.join', 'os.path.join', (['"""package"""', '"""conan"""', '"""CMakeLists.txt"""'], {}), "('package', 'conan', 'CMakeLists.txt')\n", (2854, 2892), False, 'import os\n')]
|
import pytest
from fastjsonschema import JsonSchemaException
exc = JsonSchemaException('data must be null', value='{data}', name='data', definition='{definition}', rule='type')
@pytest.mark.parametrize('value, expected', [
(0, exc),
(None, None),
(True, exc),
('abc', exc),
([], exc),
({}, exc),
])
def test_null(asserter, value, expected):
asserter({'type': 'null'}, value, expected)
|
[
"pytest.mark.parametrize",
"fastjsonschema.JsonSchemaException"
] |
[((70, 183), 'fastjsonschema.JsonSchemaException', 'JsonSchemaException', (['"""data must be null"""'], {'value': '"""{data}"""', 'name': '"""data"""', 'definition': '"""{definition}"""', 'rule': '"""type"""'}), "('data must be null', value='{data}', name='data',\n definition='{definition}', rule='type')\n", (89, 183), False, 'from fastjsonschema import JsonSchemaException\n'), ((181, 302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value, expected"""', "[(0, exc), (None, None), (True, exc), ('abc', exc), ([], exc), ({}, exc)]"], {}), "('value, expected', [(0, exc), (None, None), (True,\n exc), ('abc', exc), ([], exc), ({}, exc)])\n", (204, 302), False, 'import pytest\n')]
|
import boto3
import json
def init(ACCESS_KEY, SECRET_KEY):
return boto3.client(service_name='comprehend', region_name="us-west-2", aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
def get_entities(client, title):
return client.detect_entities(Text=title, LanguageCode='en').get('Entities')
def get_key_phrases(client, title):
return client.detect_key_phrases(Text=title, LanguageCode='en').get('KeyPhrases')
def get_sentiment(client, title):
sentiment = client.detect_sentiment(Text=title, LanguageCode='en')
return [sentiment.get('Sentiment').title(), sentiment.get('SentimentScore')]
|
[
"boto3.client"
] |
[((77, 209), 'boto3.client', 'boto3.client', ([], {'service_name': '"""comprehend"""', 'region_name': '"""us-west-2"""', 'aws_access_key_id': 'ACCESS_KEY', 'aws_secret_access_key': 'SECRET_KEY'}), "(service_name='comprehend', region_name='us-west-2',\n aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\n", (89, 209), False, 'import boto3\n')]
|
import discord
from discord.ext import commands
from discord import Embed, Permissions
from Util import logger
import os
import database
# Import the config
try:
import config
except ImportError:
print("Couldn't import config.py! Exiting!")
exit()
# Import a monkey patch, if that exists
try:
import monkeyPatch
except ImportError:
print("DEBUG: No Monkey patch found!")
bot = commands.Bot(command_prefix=os.getenv('prefix'), description='Well boys, we did it. Baddies are no more.',
activity=discord.Game(name="with the banhammer"))
startup_extensions = ["essentials",
"moderation",
"info",
"listenerCog"]
# Function to update the database on startup
async def updateDatabase():
# Fetch bans from the banlistguild, and smack them into the db
banguild = bot.get_guild(int(os.getenv('banlistguild')))
ban_list = await banguild.bans()
for BanEntry in ban_list:
if BanEntry.reason is not None:
if "not global" in BanEntry.reason.lower():
continue
if not database.isBanned(BanEntry.user.id):
database.newBan(userid=BanEntry.user.id, discordtag=BanEntry.user.name + "#" + BanEntry.user.discriminator,
avatarurl=BanEntry.user.avatar_url)
# Make sure appeal guild is set up properly
async def checkAppealGuild():
appealguild = bot.get_guild(int(os.getenv('appealguild')))
appealchannel = None
for channel in appealguild.channels:
if channel.name == "appeal-here":
appealchannel = channel
break
if appealchannel is None:
await logger.log("No appealchannel found! Trying to create one!", bot, "INFO")
try:
overwrites = {
appealguild.default_role: discord.PermissionOverwrite(read_messages=True, send_messages=False),
appealguild.me: discord.PermissionOverwrite(read_messages=True, send_messages=True,
manage_messages=True, embed_links=True,
add_reactions=True)
}
appealchannel = await appealguild.create_text_channel("appeal-here", overwrites=overwrites)
except Exception as e:
await logger.log("Could not create an appeal channel! Returning! - " + str(e), bot, "ERROR")
return
history = await appealchannel.history(limit=5).flatten()
# check if no messages
if len(history) == 0: # no messages
# Sending the message
await logger.log("Sending the appeal channel message", bot, "INFO")
message = await appealchannel.send(content="Hello there! Welcome to the WatchDog Appeal Server!\n" +
"\nTo begin your appeal process, please click this reaction!")
# now we add a reaction to the message
await message.add_reaction("✅")
@bot.event
async def on_connect():
logger.logDebug("----------[LOGIN SUCESSFULL]----------", "INFO")
logger.logDebug(" Username: " + bot.user.name, "INFO")
logger.logDebug(" UserID: " + str(bot.user.id), "INFO")
logger.logDebug("--------------------------------------", "INFO")
print("\n")
logger.logDebug("Updating the database!", "INFO")
await updateDatabase()
logger.logDebug("Done updating the database!", "INFO")
print("\n")
# Ban appeal server setup
await checkAppealGuild()
# Bot done starting up
await logger.log("Bot startup done!", bot, "INFO", "Bot startup done.\n")
@bot.event
async def on_ready():
# Bot startup is now done...
logger.logDebug("WatchDog has (re)connected to Discord!")
@bot.event
async def on_command_error(ctx: commands.Context, error):
if isinstance(error, commands.NoPrivateMessage):
await ctx.send("This command cannot be used in private messages")
elif isinstance(error, commands.BotMissingPermissions):
await ctx.send(
embed=Embed(color=discord.Color.red(), description="I need the permission `Ban Members` to sync the bans!"))
elif isinstance(error, commands.MissingPermissions):
await ctx.send(
embed=Embed(color=discord.Color.red(), description="You are missing the permission `Ban Members`!"))
elif isinstance(error, commands.CheckFailure):
return
elif isinstance(error, commands.CommandOnCooldown):
return
elif isinstance(error, commands.MissingRequiredArgument):
return
elif isinstance(error, commands.BadArgument):
return
elif isinstance(error, commands.CommandNotFound):
return
else:
await ctx.send("Something went wrong while executing that command... Sorry!")
await logger.log("%s" % error, bot, "ERROR")
@bot.event
async def on_guild_join(guild):
await logger.log("Joined a new guild (`%s` - `%s`)" % (guild.name, guild.id), bot, "INFO")
# Check the bot's ban permission
if Permissions.ban_members in guild.get_member(bot.user.id).guild_permissions:
# Get bans from db
bans = database.getBans()
# make new list for userid in bans, if member is in guild
ban_members = [userid for userid in bans if guild.get_member(userid)]
logger.logDebug(str(ban_members))
# Ban the found users
for userid in ban_members:
await guild.ban(bot.get_user(int(userid)), reason="WatchDog - Global Ban")
logger.logDebug("Banned user in guild hahayes")
@bot.event
async def on_message(message: discord.Message):
if message.author.bot:
return
ctx: commands.Context = await bot.get_context(message)
if message.content.startswith(os.getenv('prefix')):
if ctx.command is not None:
if isinstance(message.channel, discord.DMChannel):
await logger.log("`%s` (%s) used the `%s` command in their DM's" % (
ctx.author.name, ctx.author.id, ctx.invoked_with), bot, "INFO")
else:
await logger.log("`%s` (%s) used the `%s` command in the guild `%s` (%s), in the channel `%s` (%s)" % (
ctx.author.name, ctx.author.id, ctx.invoked_with, ctx.guild.name, ctx.guild.id, ctx.channel.name,
ctx.channel.id), bot, "INFO")
await bot.invoke(ctx)
else:
return
if __name__ == '__main__':
logger.setup_logger()
# Load extensions
for extension in startup_extensions:
try:
bot.load_extension(f"cogs.{extension}")
except Exception as e:
logger.logDebug(f"Failed to load extension {extension}. - {e}", "ERROR")
bot.run(os.getenv('token'))
|
[
"database.getBans",
"Util.logger.logDebug",
"discord.PermissionOverwrite",
"os.getenv",
"discord.Game",
"database.newBan",
"Util.logger.setup_logger",
"Util.logger.log",
"database.isBanned",
"discord.Color.red"
] |
[((3055, 3120), 'Util.logger.logDebug', 'logger.logDebug', (['"""----------[LOGIN SUCESSFULL]----------"""', '"""INFO"""'], {}), "('----------[LOGIN SUCESSFULL]----------', 'INFO')\n", (3070, 3120), False, 'from Util import logger\n'), ((3125, 3183), 'Util.logger.logDebug', 'logger.logDebug', (["(' Username: ' + bot.user.name)", '"""INFO"""'], {}), "(' Username: ' + bot.user.name, 'INFO')\n", (3140, 3183), False, 'from Util import logger\n'), ((3254, 3319), 'Util.logger.logDebug', 'logger.logDebug', (['"""--------------------------------------"""', '"""INFO"""'], {}), "('--------------------------------------', 'INFO')\n", (3269, 3319), False, 'from Util import logger\n'), ((3341, 3390), 'Util.logger.logDebug', 'logger.logDebug', (['"""Updating the database!"""', '"""INFO"""'], {}), "('Updating the database!', 'INFO')\n", (3356, 3390), False, 'from Util import logger\n'), ((3422, 3476), 'Util.logger.logDebug', 'logger.logDebug', (['"""Done updating the database!"""', '"""INFO"""'], {}), "('Done updating the database!', 'INFO')\n", (3437, 3476), False, 'from Util import logger\n'), ((3730, 3787), 'Util.logger.logDebug', 'logger.logDebug', (['"""WatchDog has (re)connected to Discord!"""'], {}), "('WatchDog has (re)connected to Discord!')\n", (3745, 3787), False, 'from Util import logger\n'), ((6485, 6506), 'Util.logger.setup_logger', 'logger.setup_logger', ([], {}), '()\n', (6504, 6506), False, 'from Util import logger\n'), ((6761, 6779), 'os.getenv', 'os.getenv', (['"""token"""'], {}), "('token')\n", (6770, 6779), False, 'import os\n'), ((428, 447), 'os.getenv', 'os.getenv', (['"""prefix"""'], {}), "('prefix')\n", (437, 447), False, 'import os\n'), ((535, 574), 'discord.Game', 'discord.Game', ([], {'name': '"""with the banhammer"""'}), "(name='with the banhammer')\n", (547, 574), False, 'import discord\n'), ((3591, 3658), 'Util.logger.log', 'logger.log', (['"""Bot startup done!"""', 'bot', '"""INFO"""', '"""Bot startup done.\n"""'], {}), "('Bot startup done!', bot, 'INFO', 'Bot startup done.\\n')\n", (3601, 3658), False, 'from Util import logger\n'), ((4937, 5025), 'Util.logger.log', 'logger.log', (["('Joined a new guild (`%s` - `%s`)' % (guild.name, guild.id))", 'bot', '"""INFO"""'], {}), "('Joined a new guild (`%s` - `%s`)' % (guild.name, guild.id), bot,\n 'INFO')\n", (4947, 5025), False, 'from Util import logger\n'), ((5184, 5202), 'database.getBans', 'database.getBans', ([], {}), '()\n', (5200, 5202), False, 'import database\n'), ((5797, 5816), 'os.getenv', 'os.getenv', (['"""prefix"""'], {}), "('prefix')\n", (5806, 5816), False, 'import os\n'), ((891, 916), 'os.getenv', 'os.getenv', (['"""banlistguild"""'], {}), "('banlistguild')\n", (900, 916), False, 'import os\n'), ((1123, 1158), 'database.isBanned', 'database.isBanned', (['BanEntry.user.id'], {}), '(BanEntry.user.id)\n', (1140, 1158), False, 'import database\n'), ((1172, 1319), 'database.newBan', 'database.newBan', ([], {'userid': 'BanEntry.user.id', 'discordtag': "(BanEntry.user.name + '#' + BanEntry.user.discriminator)", 'avatarurl': 'BanEntry.user.avatar_url'}), "(userid=BanEntry.user.id, discordtag=BanEntry.user.name +\n '#' + BanEntry.user.discriminator, avatarurl=BanEntry.user.avatar_url)\n", (1187, 1319), False, 'import database\n'), ((1456, 1480), 'os.getenv', 'os.getenv', (['"""appealguild"""'], {}), "('appealguild')\n", (1465, 1480), False, 'import os\n'), ((1689, 1761), 'Util.logger.log', 'logger.log', (['"""No appealchannel found! Trying to create one!"""', 'bot', '"""INFO"""'], {}), "('No appealchannel found! Trying to create one!', bot, 'INFO')\n", (1699, 1761), False, 'from Util import logger\n'), ((2641, 2702), 'Util.logger.log', 'logger.log', (['"""Sending the appeal channel message"""', 'bot', '"""INFO"""'], {}), "('Sending the appeal channel message', bot, 'INFO')\n", (2651, 2702), False, 'from Util import logger\n'), ((5553, 5600), 'Util.logger.logDebug', 'logger.logDebug', (['"""Banned user in guild hahayes"""'], {}), "('Banned user in guild hahayes')\n", (5568, 5600), False, 'from Util import logger\n'), ((1844, 1912), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(True)', 'send_messages': '(False)'}), '(read_messages=True, send_messages=False)\n', (1871, 1912), False, 'import discord\n'), ((1946, 2077), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(True)', 'send_messages': '(True)', 'manage_messages': '(True)', 'embed_links': '(True)', 'add_reactions': '(True)'}), '(read_messages=True, send_messages=True,\n manage_messages=True, embed_links=True, add_reactions=True)\n', (1973, 2077), False, 'import discord\n'), ((6679, 6751), 'Util.logger.logDebug', 'logger.logDebug', (['f"""Failed to load extension {extension}. - {e}"""', '"""ERROR"""'], {}), "(f'Failed to load extension {extension}. - {e}', 'ERROR')\n", (6694, 6751), False, 'from Util import logger\n'), ((5940, 6070), 'Util.logger.log', 'logger.log', (['("`%s` (%s) used the `%s` command in their DM\'s" % (ctx.author.name, ctx.\n author.id, ctx.invoked_with))', 'bot', '"""INFO"""'], {}), '("`%s` (%s) used the `%s` command in their DM\'s" % (ctx.author.\n name, ctx.author.id, ctx.invoked_with), bot, \'INFO\')\n', (5950, 6070), False, 'from Util import logger\n'), ((6127, 6365), 'Util.logger.log', 'logger.log', (["('`%s` (%s) used the `%s` command in the guild `%s` (%s), in the channel `%s` (%s)'\n % (ctx.author.name, ctx.author.id, ctx.invoked_with, ctx.guild.name,\n ctx.guild.id, ctx.channel.name, ctx.channel.id))", 'bot', '"""INFO"""'], {}), "(\n '`%s` (%s) used the `%s` command in the guild `%s` (%s), in the channel `%s` (%s)'\n % (ctx.author.name, ctx.author.id, ctx.invoked_with, ctx.guild.name,\n ctx.guild.id, ctx.channel.name, ctx.channel.id), bot, 'INFO')\n", (6137, 6365), False, 'from Util import logger\n'), ((4100, 4119), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (4117, 4119), False, 'import discord\n'), ((4302, 4321), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (4319, 4321), False, 'import discord\n'), ((4843, 4881), 'Util.logger.log', 'logger.log', (["('%s' % error)", 'bot', '"""ERROR"""'], {}), "('%s' % error, bot, 'ERROR')\n", (4853, 4881), False, 'from Util import logger\n')]
|
from django.conf.urls import url
from django.conf.urls.i18n import i18n_patterns
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
app_name = 'account'
urlpatterns = i18n_patterns(
url(_(r'^register/$'), view, name='register'),
)
|
[
"django.views.generic.TemplateView.as_view",
"django.utils.translation.gettext_lazy"
] |
[((195, 243), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""dummy.html"""'}), "(template_name='dummy.html')\n", (215, 243), False, 'from django.views.generic import TemplateView\n'), ((307, 323), 'django.utils.translation.gettext_lazy', '_', (['"""^register/$"""'], {}), "('^register/$')\n", (308, 323), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from moz_sql_parser import parse as parse_sql
import pyparsing
import re
from six.moves.urllib import parse
FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE)
def get_url(url, headers=0, gid=0, sheet=None):
parts = parse.urlparse(url)
if parts.path.endswith('/edit'):
path = parts.path[:-len('/edit')]
else:
path = parts.path
path = '/'.join((path.rstrip('/'), 'gviz/tq'))
qs = parse.parse_qs(parts.query)
if 'headers' in qs:
headers = int(qs['headers'][-1])
if 'gid' in qs:
gid = qs['gid'][-1]
if 'sheet' in qs:
sheet = qs['sheet'][-1]
if parts.fragment.startswith('gid='):
gid = parts.fragment[len('gid='):]
args = OrderedDict()
if headers > 0:
args['headers'] = headers
if sheet is not None:
args['sheet'] = sheet
else:
args['gid'] = gid
params = parse.urlencode(args)
return parse.urlunparse(
(parts.scheme, parts.netloc, path, None, params, None))
def extract_url(sql):
try:
url = parse_sql(sql)['from']
except pyparsing.ParseException:
# fallback to regex to extract from
match = FROM_REGEX.search(sql)
if match:
return match.group(1).strip('"')
return
while isinstance(url, dict):
url = url['value']['from']
return url
# Function to extract url from any sql statement
def url_from_sql(sql):
"""
Extract url from any sql statement.
:param sql:
:return:
"""
try:
parsed_sql = re.split('[( , " )]', str(sql))
for i, val in enumerate(parsed_sql):
if val.startswith('https:'):
sql_url = parsed_sql[i]
return sql_url
except Exception as e:
print("Error: {}".format(e))
|
[
"six.moves.urllib.parse.parse_qs",
"collections.OrderedDict",
"re.compile",
"six.moves.urllib.parse.urlparse",
"six.moves.urllib.parse.urlunparse",
"six.moves.urllib.parse.urlencode",
"moz_sql_parser.parse"
] |
[((310, 356), 're.compile', 're.compile', (['""" from ("http.*?")"""', 're.IGNORECASE'], {}), '(\' from ("http.*?")\', re.IGNORECASE)\n', (320, 356), False, 'import re\n'), ((420, 439), 'six.moves.urllib.parse.urlparse', 'parse.urlparse', (['url'], {}), '(url)\n', (434, 439), False, 'from six.moves.urllib import parse\n'), ((617, 644), 'six.moves.urllib.parse.parse_qs', 'parse.parse_qs', (['parts.query'], {}), '(parts.query)\n', (631, 644), False, 'from six.moves.urllib import parse\n'), ((913, 926), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (924, 926), False, 'from collections import OrderedDict\n'), ((1086, 1107), 'six.moves.urllib.parse.urlencode', 'parse.urlencode', (['args'], {}), '(args)\n', (1101, 1107), False, 'from six.moves.urllib import parse\n'), ((1120, 1192), 'six.moves.urllib.parse.urlunparse', 'parse.urlunparse', (['(parts.scheme, parts.netloc, path, None, params, None)'], {}), '((parts.scheme, parts.netloc, path, None, params, None))\n', (1136, 1192), False, 'from six.moves.urllib import parse\n'), ((1250, 1264), 'moz_sql_parser.parse', 'parse_sql', (['sql'], {}), '(sql)\n', (1259, 1264), True, 'from moz_sql_parser import parse as parse_sql\n')]
|
# -*- coding:utf-8 -*-
# Bot2Human
#
# Replaces messages from bots to humans
# typically used in channels that are connected with other IMs using bots
#
# For example, if a bot send messages from XMPP is like `[nick] content`,
# weechat would show `bot | [nick] content` which looks bad; this script
# make weecaht display `nick | content` so that the messages looks like
# normal IRC message
#
# Options
#
# plugins.var.python.bot2human.bot_nicks
# space seperated nicknames to forwarding bots
# example: teleboto toxsync tg2arch
#
# plugins.var.python.nick_content_re.X
# X is a 0-2 number. This options specifies regex to match nickname
# and content. Default regexes are r'\[(?P<nick>.+?)\] (?P<text>.*)',
# r'\((?P<nick>.+?)\) (?P<text>.*)', and r'<(?P<nick>.+?)> (?P<text>.*)'
#
# plugins.var.python.nick_re_count
# Number of rules defined
#
# Changelog:
# 0.3.0: Add relayed nicks into nicklist, enabling completion
# 0.2.2: Support ZNC timestamp
# 0.2.1: Color filtering only applies on nicknames
# More than 3 nick rules can be defined
# 0.2.0: Filter mIRC color and other control seq from message
# 0.1.1: Bug Fixes
# 0.1: Initial Release
#
import weechat as w
import re
SCRIPT_NAME = "bot2human"
SCRIPT_AUTHOR = "<NAME> & Hexchain & quietlynn"
SCRIPT_DESC = "Replace IRC message nicknames with regex match from chat text"
SCRIPT_VERSION = "0.3.0"
SCRIPT_LICENSE = "GPLv3"
DEFAULTS = {
'nick_re_count': '4',
'nick_content_re.0': r'\[(?:\x03[0-9,]+)?(?P<nick>[^:]+?)\x0f?\] (?P<text>.*)',
'nick_content_re.1': r'(?:\x03[0-9,]+)?\[(?P<nick>[^:]+?)\]\x0f? (?P<text>.*)',
'nick_content_re.2': r'\((?P<nick>[^:]+?)\) (?P<text>.*)',
'nick_content_re.3': r'<(?:\x03[0-9,]+)?(?P<nick>[^:]+?)\x0f?> (?P<text>.*)',
'bot_nicks': "",
'znc_ts_re': r'\[\d\d:\d\d:\d\d\]\s+',
}
CONFIG = {
'nick_re_count': -1,
'nick_content_res': [],
'bot_nicks': [],
'znc_ts_re': None,
}
def parse_config():
for option, default in DEFAULTS.items():
# print(option, w.config_get_plugin(option))
if not w.config_is_set_plugin(option):
w.config_set_plugin(option, default)
CONFIG['nick_re_count'] = int(w.config_get_plugin('nick_re_count'))
CONFIG['bot_nicks'] = w.config_get_plugin('bot_nicks').split(' ')
for i in range(CONFIG['nick_re_count']):
option = "nick_content_re.{}".format(i)
CONFIG['nick_content_res'].append(
re.compile(w.config_get_plugin(option))
)
CONFIG['znc_ts_re'] = re.compile(w.config_get_plugin('znc_ts_re'))
def config_cb(data, option, value):
parse_config()
return w.WEECHAT_RC_OK
def filter_color(msg):
# filter \x01 - \x19 control seq
# filter \x03{foreground}[,{background}] color string
return re.sub(r'\x03[\d,]+|[\x00-\x1f]', '', msg)
def msg_cb(data, modifier, modifier_data, string):
# w.prnt("blue", "test_msg_cb " + string)
parsed = w.info_get_hashtable("irc_message_parse", {'message': string})
# w.prnt("", "%s" % parsed)
matched = False
for bot in CONFIG['bot_nicks']:
# w.prnt("", "%s, %s" % (parsed["nick"], bot))
if parsed['nick'] == bot:
t = parsed.get(
'text',
parsed["arguments"][len(parsed["channel"])+2:]
)
# ZNC timestamp
ts = ""
mts = CONFIG['znc_ts_re'].match(t)
if mts:
ts = mts.group()
t = t[mts.end():]
for r in CONFIG['nick_content_res']:
# parsed['text'] only exists in weechat version >= 1.3
m = r.match(t)
if not m:
continue
nick, text = m.group('nick'), m.group('text')
nick = filter_color(nick)
nick = re.sub(r'\s', '_', nick)
parsed['host'] = parsed['host'].replace(bot, nick)
parsed['text'] = ts + text
matched = True
buffer = w.info_get("irc_buffer", "%s,%s" % (modifier_data, parsed['channel']))
add_nick(nick, buffer, "")
break
if matched:
break
else:
return string
return ":{host} {command} {channel} :{text}".format(**parsed)
def add_nick(name, buffer, group):
group = get_nick_group(buffer, 'bot2human')
if not w.nicklist_search_nick(buffer, group, name):
w.nicklist_add_nick(buffer, group, name, "weechat.color.nicklist_group", "~", "lightgreen", 1)
return w.WEECHAT_RC_OK
def get_nick_group(buffer, group_name):
group = w.nicklist_search_group(buffer, "", group_name)
if not group:
group = w.nicklist_add_group(buffer, "", group_name, "weechat.color.nicklist_group", 1)
return group
def nicklist_nick_added_cb(data, signal, buffer):
group = get_nick_group(buffer, 'bot2human')
return w.WEECHAT_RC_OK
if __name__ == '__main__':
w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", "")
parse_config()
w.hook_modifier("irc_in_privmsg", "msg_cb", "")
w.hook_config("plugins.var.python."+SCRIPT_NAME+".*", "config_cb", "")
# Glowing Bear will choke if a nick is added into a newly created group.
# As a workaround, we add the group as soon as possible BEFORE Glowing Bear loads groups,
# and we must do that AFTER EVERY nicklist reload. nicklist_nick_added satisfies both.
# TODO(quietlynn): Find better signals to hook instead.
w.hook_signal("nicklist_nick_added", "nicklist_nick_added_cb", "")
# vim: ts=4 sw=4 sts=4 expandtab
|
[
"weechat.config_get_plugin",
"weechat.hook_config",
"weechat.info_get",
"weechat.config_is_set_plugin",
"weechat.register",
"weechat.config_set_plugin",
"weechat.info_get_hashtable",
"weechat.hook_modifier",
"weechat.nicklist_search_nick",
"weechat.nicklist_add_nick",
"re.sub",
"weechat.nicklist_add_group",
"weechat.nicklist_search_group",
"weechat.hook_signal"
] |
[((2810, 2855), 're.sub', 're.sub', (['"""\\\\x03[\\\\d,]+|[\\\\x00-\\\\x1f]"""', '""""""', 'msg'], {}), "('\\\\x03[\\\\d,]+|[\\\\x00-\\\\x1f]', '', msg)\n", (2816, 2855), False, 'import re\n'), ((2964, 3026), 'weechat.info_get_hashtable', 'w.info_get_hashtable', (['"""irc_message_parse"""', "{'message': string}"], {}), "('irc_message_parse', {'message': string})\n", (2984, 3026), True, 'import weechat as w\n'), ((4646, 4693), 'weechat.nicklist_search_group', 'w.nicklist_search_group', (['buffer', '""""""', 'group_name'], {}), "(buffer, '', group_name)\n", (4669, 4693), True, 'import weechat as w\n'), ((4984, 5079), 'weechat.register', 'w.register', (['SCRIPT_NAME', 'SCRIPT_AUTHOR', 'SCRIPT_VERSION', 'SCRIPT_LICENSE', 'SCRIPT_DESC', '""""""', '""""""'], {}), "(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, '', '')\n", (4994, 5079), True, 'import weechat as w\n'), ((5116, 5163), 'weechat.hook_modifier', 'w.hook_modifier', (['"""irc_in_privmsg"""', '"""msg_cb"""', '""""""'], {}), "('irc_in_privmsg', 'msg_cb', '')\n", (5131, 5163), True, 'import weechat as w\n'), ((5168, 5242), 'weechat.hook_config', 'w.hook_config', (["('plugins.var.python.' + SCRIPT_NAME + '.*')", '"""config_cb"""', '""""""'], {}), "('plugins.var.python.' + SCRIPT_NAME + '.*', 'config_cb', '')\n", (5181, 5242), True, 'import weechat as w\n'), ((5570, 5636), 'weechat.hook_signal', 'w.hook_signal', (['"""nicklist_nick_added"""', '"""nicklist_nick_added_cb"""', '""""""'], {}), "('nicklist_nick_added', 'nicklist_nick_added_cb', '')\n", (5583, 5636), True, 'import weechat as w\n'), ((2217, 2253), 'weechat.config_get_plugin', 'w.config_get_plugin', (['"""nick_re_count"""'], {}), "('nick_re_count')\n", (2236, 2253), True, 'import weechat as w\n'), ((2560, 2592), 'weechat.config_get_plugin', 'w.config_get_plugin', (['"""znc_ts_re"""'], {}), "('znc_ts_re')\n", (2579, 2592), True, 'import weechat as w\n'), ((4418, 4461), 'weechat.nicklist_search_nick', 'w.nicklist_search_nick', (['buffer', 'group', 'name'], {}), '(buffer, group, name)\n', (4440, 4461), True, 'import weechat as w\n'), ((4471, 4569), 'weechat.nicklist_add_nick', 'w.nicklist_add_nick', (['buffer', 'group', 'name', '"""weechat.color.nicklist_group"""', '"""~"""', '"""lightgreen"""', '(1)'], {}), "(buffer, group, name, 'weechat.color.nicklist_group',\n '~', 'lightgreen', 1)\n", (4490, 4569), True, 'import weechat as w\n'), ((4728, 4807), 'weechat.nicklist_add_group', 'w.nicklist_add_group', (['buffer', '""""""', 'group_name', '"""weechat.color.nicklist_group"""', '(1)'], {}), "(buffer, '', group_name, 'weechat.color.nicklist_group', 1)\n", (4748, 4807), True, 'import weechat as w\n'), ((2101, 2131), 'weechat.config_is_set_plugin', 'w.config_is_set_plugin', (['option'], {}), '(option)\n', (2123, 2131), True, 'import weechat as w\n'), ((2145, 2181), 'weechat.config_set_plugin', 'w.config_set_plugin', (['option', 'default'], {}), '(option, default)\n', (2164, 2181), True, 'import weechat as w\n'), ((2281, 2313), 'weechat.config_get_plugin', 'w.config_get_plugin', (['"""bot_nicks"""'], {}), "('bot_nicks')\n", (2300, 2313), True, 'import weechat as w\n'), ((2484, 2511), 'weechat.config_get_plugin', 'w.config_get_plugin', (['option'], {}), '(option)\n', (2503, 2511), True, 'import weechat as w\n'), ((3850, 3874), 're.sub', 're.sub', (['"""\\\\s"""', '"""_"""', 'nick'], {}), "('\\\\s', '_', nick)\n", (3856, 3874), False, 'import re\n'), ((4041, 4111), 'weechat.info_get', 'w.info_get', (['"""irc_buffer"""', "('%s,%s' % (modifier_data, parsed['channel']))"], {}), "('irc_buffer', '%s,%s' % (modifier_data, parsed['channel']))\n", (4051, 4111), True, 'import weechat as w\n')]
|
import xlrd
import pandas as pd
from openpyxl import load_workbook
from xlrd import open_workbook
import nltk
from nltk.tree import Tree
from nltk.parse.generate import generate
from nltk.tree import *
import os
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
import xml.etree.ElementTree as etree
import xlrd
import time
import sys
from nltk import induce_pcfg
from nltk.parse import pchart
from nltk import PCFG
from nltk.draw.util import CanvasFrame
import nltk
import re
import pandas
sys.setrecursionlimit(5000)
##start = time.time()
##PERIOD_OF_TIME = 15 # 5min
##while True :
sen = input("Enter your sentence: ")
sent = word_tokenize(sen)
#sen = "مهربانی وکړه بیاي ووايه . يوسف غلے شو . دیړ وخت وشو نہ خکاری"
##for i in sent_tokenize(sen):
## print(i)
##
##gram =("""
##S -> NP VP [1.0]
##NP -> ADJ [0.0041666667] | N [0.0041666667] | N N [0.3] | PN [0.0041666667] | ADJ N [0.0041666667] | AV N [0.0041666667] | N ADJ [0.1] | NU NU [0.5] | NU AP [0.0041666667] | ADJ AP [0.0041666667] | AV [0.0041666667] | ADJ AP [0.0041666667] | N PN [0.0041666667] | VP N [0.0041666667] | PN ADV [0.0041666667] | AV ADV [0.0041666667] | N VP [0.0041666667] | NU N [0.0041666667] | NU [0.0041666667] | V [0.0041666667] | AV AP [0.0041666667] | ADJ VP [0.0041666667] | N AP [0.0041666667] | ADJ AP [0.0041666667] | ADJ NP [0.0041666667] | N NP [0.0041666667]
##VP -> V AP [0.557] | ADJ V [0.05] | AP [0.00625] | NP [0.00625] | AV PN [0.056] | V ADV [0.00625] | V [0.00625] | AV AP [0.00625] | N ADV [0.00625] | N [0.00625] | NU N [0.1] | N V [0.0375] | ADJ AP [0.00625] | N AV [0.10] | V ADJ [0.00625] | ADJ NP [0.00625] | N AP [0.00625] | N NP [0.00625] | NP NP [0.00625] | AV VP [0.00625] | ADJ VP [0.00625] | N VP [0.00625]
##AP -> AV V [0.056] | V NP [0.166] | ADJ V [0.051] | NP VP [0.0142857143] | AV NP [0.0142857143] | PN NP [0.0142857143] | N V [0.037] | NU N [0.2] | AV N [0.2] | ADJ PN [0.066] | V VP [0.0142857143] | N ADV [0.0142857143] | PN AV [0.024] | ADJ VP [0.0142857143] | PN N [0.1] | AV ADV [0.0142857143]
##ADV -> ADV ADJ [0.4] | PN VP [0.025] | N AP [0.025] | AV AV [0.5] | V AP [0.025] | N V [0.025]
##""")
#0.0769231
gram = ("""
S -> NP NP RP VP RP NP PRP VP [0.0769230769]
NP -> N [0.0294118]
NP -> PRP N [0.0294118]
VP -> V [0.05]
NP -> N N [0.0294118]
VP -> V [0.05]
S -> NP RP POP NP NP PP ADJ VP [0.0769230769]
NP -> PRP N [0.0294118]
NP -> N [0.0294118]
NP -> PRP N [0.0294118]
PP -> NP POP [0.2]
NP -> PRP N [0.0294118]
VP -> V [0.05]
S -> ADVP INT CO PP ADV INT RP ADJ PP NP ADV VP [0.0769230769]
ADVP -> ADV NP [0.333333]
NP -> N [0.0294118]
PP -> NP POP [0.6]
NP -> N [0.0294118]
NP -> N [0.0294118]
NP -> PRN [0.0294118]
VP -> V [0.1]
S -> NP PP NP NP VP [0.0769230769]
NP -> N [0.0294118]
PP -> PRP NP [0.2]
NP -> PRP N [0.0294118]
NP -> PRP N [0.0294118]
NP -> PRP N N [0.0294118]
VP -> V [0.05]
S -> NP ADJP ADVP VP [0.0769230769]
NP -> NP CO NP [0.0294118]
NP -> PRP N [0.0294118]
NP -> PRP N [0.0294118]
ADJP -> ADJ ADJ NP [0.333333]
NP -> N [0.0294118]
ADVP -> ADV NP [0.333333]
NP -> N [0.0294118]
VP -> V [0.05]
S -> PP VP CO NP VP [0.0769230769]
NP -> N N [0.0294118]
VP -> V [0.05]
NP -> N [0.0294118]
VP -> V [0.05]
S -> NP NP NP VP VP [0.0769230769]
NP -> PRN [0.0294118]
NP -> PRP N N [0.0294118]
NP -> PRP N [0.0294118]
VP -> V [0.05]
VP -> V [0.1]
S -> NP NP VP [0.0769230769]
NP -> PRN [0.0294118]
NP -> N [0.0294118]
VP -> V [0.05]
S -> NP ADJP VP [0.0769230769]
NP -> PRN [0.0294118]
ADJP -> ADJ NP [0.333333]
NP -> N N [0.0294118]
VP -> V [0.05]
S -> NP ADJP VP VP [0.0769230769]
NP -> PRN [0.0294118]
ADJP -> ADJ NP [0.333333]
NP -> N [0.0294118]
VP -> V [0.05]
VP -> V [0.05]
S -> NP ADJ VP VP [0.0769230769]
NP -> PRN [0.0588235]
VP -> V [0.1]
S -> NP VP VP VP [0.0769230769]
VP -> V [0.05]
S -> NP ADVP VP [0.0769230769]
NP -> PRN [0.0294118]
ADVP -> PRP ADV RP [0.333333]
VP -> V [0.05]
""")
##gram =("""
##S -> NP VP [1.0]
##NP -> ADJ [0] | N [0] | N N [0.4] | PN [0] | ADJ N [0] | AV N [0] | N ADJ [0.1] | NU NU [0.5] | NU AP [0] | ADJ AP [0] | AV [0] | ADJ AP [0] | N PN [0] | VP N [0] | PN ADV [0] | AV ADV [0] | N VP [0] | NU N [0] | NU [0] | V [0] | AV AP [0] | ADJ VP [0] | N AP [0] | ADJ AP [0] | ADJ NP [0] | N NP [0]
##VP -> V AP [0.557] | ADJ V [0.05] | AP [0.00625] | NP [0.00625] | AV PN [0.056] | V ADV [0.00625] | V [0.00625] | AV AP [0.00625] | N ADV [0.00625] | N [0.00625] | NU N [0.1] | N V [0.0375] | ADJ AP [0.00625] | N AV [0.10] | V ADJ [0.00625] | ADJ NP [0.00625] | N AP [0.00625] | N NP [0.00625] | NP NP [0.00625] | AV VP [0.00625] | ADJ VP [0.00625] | N VP [0.00625]
##AP -> AV V [0.056] | V NP [0.166] | ADJ V [0.051] | NP VP [0.0142857143] | AV NP [0.0142857143] | PN NP [0.0142857143] | N V [0.037] | NU N [0.2] | AV N [0.2] | ADJ PN [0.066] | V VP [0.0142857143] | N ADV [0.0142857143] | PN AV [0.024] | ADJ VP [0.0142857143] | PN N [0.1] | AV ADV [0.0142857143]
##ADV -> ADV ADJ [0.4] | PN VP [0.025] | N AP [0.025] | AV AV [0.5] | V AP [0.025] | N V [0.025]
##""")
##
##د هغه ناوړه ملګري وویل
##
##gram = ("""
##S -> NP VP [1.0]
##NP -> AV [0.5] | ADJ AP [0.5]
##VP -> AP [1.0]
##AP -> PN NP [0.5] | N V [0.5]
##AV -> "د" [1.0]
##PN -> "هغه" [1.0]
##ADJ -> "ناوړه" [1.0]
##V -> "وویل" [1.0]
##N -> "ملګري" [1.0]
##""")
##یوه وفاداره میرمن جوړه شوه
##gram = ("""
##S -> NP VP
##NP -> NU | N N
##VP -> NP NP
##
##""")
#دویم تن وویل
##gram =("""
##S -> NP VP
##NP -> V
##VP -> N V
##""")
##dic = pandas.read_csv("dictionary.csv")
##doc = pandas.read_csv("corpus2.csv", quotechar='"', delimiter=',')
#book = open_workbook("Pastho dictionary2.xlsx")
##for sheet in book.sheets():
## for rowidx in range(sheet.nrows):
## row = sheet.row(rowidx)
## for i in sent:
## for colidx,cell in enumerate(row):
## if cell.value == i:#row value
## #print ("Found Row Element")
## #print(rowidx, colidx)
## #print(cell.value)
## print(row)
## print('\n')
##
##book = load_workbook("Pastho dictionary2.xlsx")
##worksheet = book.sheetnames
##sheet = book["Sheet1"]
##c=1
##for i in sheet:
## d = sheet.cell(row=c, column=2)
##
## if(d.value is None):
## print(" Try Again ")
##
##
## elif (d.value == " Noun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "N ->" + "'" + cell.value + "'" + " " + "[0.0000851934]" + "\n"
##
##
## elif (d.value == "Noun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "N ->" + "'" + cell.value + "'" + " " + "[0.0000851934]" + "\n"
##
##
## elif (d.value == " Verb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "V ->" + "'" + cell.value + "'" + " " + "[0.0005530973]" + "\n"
##
##
## elif (d.value == "Verb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "V ->" + "'" + cell.value + "'" + " " + "[0.0005530973]" + "\n"
##
##
## elif (d.value == " Adjective "):
##
## cell = sheet.cell(row=c, column=1)
## gram = gram + "ADJ ->" + "'" + cell.value + "'" + " " + "[0.000280112]" + "\n"
##
##
## elif (d.value == "Adjective "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "ADJ ->" + "'" + cell.value + "'" + " " + "[0.000280112]" + "\n"
##
##
## elif (d.value == " Participles "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PP ->" + "'" + cell.value + "'" + " " + "[0.0588235294]" + "\n"
## #print("hi")
##
## elif (d.value == " Adverb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "AV ->" + "'" + cell.value + "'" + " " + "[0.0025380711]" + "\n"
##
##
## elif (d.value == "Adverb "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "AV ->" + "'" + cell.value + "'" + " " + "[0.0025380711]" + "\n"
##
##
## elif (d.value == " numerical "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "NU ->" + "'" + cell.value + "'" + " " + "[0.0222222222]" + "\n"
##
##
## elif (d.value == "numerical "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "NU ->" + "'" + cell.value + "'" + " " + "[0.0222222222]" + "\n"
##
##
## elif (d.value == " proNoun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PN ->" + "'" + cell.value + "'" + " " + "[0.0125]" + "\n"
##
##
##
## elif (d.value == " ProNoun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PN ->" + "'" + cell.value + "'" + " " + "[0.0125]" + "\n"
##
##
##
## elif (d.value == "ProNoun "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "PN ->" + "'" + cell.value + "'" + " " + "[0.0125]" + "\n"
##
##
##
## elif (d.value == " suffix "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "SA ->" + "'" + cell.value + "'" + " " + "[0.0476190476]" + "\n"
##
##
##
## elif (d.value == " Suffix "):
## cell = sheet.cell(row=c, column=1)
## gram = gram + "SA ->" + "'" + cell.value + "'" + " " + "[0.0476190476]" + "\n"
## c=c+1
#print(gram)
grammar1 = nltk.PCFG.fromstring(gram)
sr_parser = nltk.ViterbiParser(grammar1)
#max=0
for tree in sr_parser.parse(sent):
print(tree)
##
## with open("prob.txt", "a", encoding='utf-8') as output:
## output.write(str(tree))
## output.write("\n")
##
## if (tree.prob() > max):
## max=tree.prob()
## max_tree=tree
##
##print(max)
##print(max_tree)
##sr_parser = nltk.parse.chart.ChartParser(grammar1)
#sr_parser = nltk.RecursiveDescentParser(grammar1)
#sr_parser = nltk.ShiftReduceParser(grammar1)
##for tree in sr_parser.parse(sent):
## #values = tree
##
## with open("test.txt", "a", encoding='utf-8') as output:
## output.write(str(tree))
## output.write("\n")
##
## print(tree)
## #break
##
|
[
"nltk.PCFG.fromstring",
"sys.setrecursionlimit",
"nltk.ViterbiParser",
"nltk.tokenize.word_tokenize"
] |
[((547, 574), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(5000)'], {}), '(5000)\n', (568, 574), False, 'import sys\n'), ((696, 714), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['sen'], {}), '(sen)\n', (709, 714), False, 'from nltk.tokenize import word_tokenize\n'), ((9894, 9920), 'nltk.PCFG.fromstring', 'nltk.PCFG.fromstring', (['gram'], {}), '(gram)\n', (9914, 9920), False, 'import nltk\n'), ((9934, 9962), 'nltk.ViterbiParser', 'nltk.ViterbiParser', (['grammar1'], {}), '(grammar1)\n', (9952, 9962), False, 'import nltk\n')]
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import string
import unittest
from uuid import uuid4
from unittest import mock
from random import random, randint
from datetime import datetime, timedelta
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
import dss
from dss import Replica
from dss.util.version import datetime_to_version_format
from dss.storage.identifiers import UUID_REGEX, TOMBSTONE_SUFFIX
from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles
from dss.logging import configure_test_logging
from tests.infra import testmode, MockStorageHandler
class MockCloudBlobstoreHandle:
bundle_uuid: str = None
tombstoned_bundles: list = None
untombstoned_bundles: list = None
tombstones: list = None
listing: list = None
@classmethod
def list(cls, bucket, pfx):
for fqid in cls.listing:
yield fqid
@classmethod
def gen_bundle_listing(cls,
number_of_versions: int,
versioned_tombstone_probability: float=0.0,
unversioned_tombstone_probability: float=0.0):
cls.bundle_uuid = str(uuid4())
untombstoned_bundles = list()
tombstoned_bundles = list()
tombstones = list()
for _ in range(number_of_versions):
random_date = datetime.utcnow() - timedelta(days=randint(0, 364),
hours=randint(0, 23),
minutes=randint(0, 59))
bundle_fqid = f"{cls.bundle_uuid}.{datetime_to_version_format(random_date)}"
bundle_key = f"bundles/{bundle_fqid}"
if random() <= versioned_tombstone_probability:
tombstones.append(f"{bundle_key}.{TOMBSTONE_SUFFIX}")
tombstoned_bundles.append(bundle_key)
else:
untombstoned_bundles.append(bundle_key)
cls.tombstoned_bundles = tombstoned_bundles
cls.untombstoned_bundles = untombstoned_bundles
cls.tombstones = tombstones
listing = untombstoned_bundles + tombstoned_bundles + tombstones
if random() <= unversioned_tombstone_probability:
listing.append(f"bundles/{cls.bundle_uuid}.{TOMBSTONE_SUFFIX}")
cls.listing = sorted(listing)
def setUpModule():
configure_test_logging()
@testmode.standalone
class TestRegexIdentifiers(unittest.TestCase):
def test_REGEX_MATCHING(self):
chars = string.ascii_lowercase + string.digits
for i, c in enumerate(chars):
uuid = f'{c*8}-{c*4}-{c*4}-{c*4}-{c*12}'
self.assertTrue(UUID_REGEX.match(uuid), uuid)
for i in range(100):
uuid = str(uuid4())
self.assertTrue(UUID_REGEX.match(uuid), uuid)
@testmode.standalone
class TestStorageBundles(unittest.TestCase):
@classmethod
def setUpClass(cls):
dss.Config.set_config(dss.BucketConfig.TEST)
@mock.patch("dss.Config.get_blobstore_handle")
def test_uuid_enumeration(self, mock_list_v2):
mock_list_v2.return_value = MockStorageHandler()
resp = enumerate_available_bundles(replica='aws')
for x in resp['bundles']:
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles)
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles_without_suffix)
@mock.patch("dss.Config.get_blobstore_handle")
def test_tombstone_pages(self, mock_list_v2):
mock_list_v2.return_value = MockStorageHandler()
for tests in MockStorageHandler.test_per_page:
test_size = tests['size']
last_good_bundle = tests['last_good_bundle']
resp = enumerate_available_bundles(replica='aws', per_page=test_size)
page_one = resp['bundles']
for x in resp['bundles']:
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles)
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles_without_suffix)
self.assertDictEqual(last_good_bundle, resp['bundles'][-1])
search_after = resp['search_after']
resp = enumerate_available_bundles(replica='aws', per_page=test_size,
search_after=search_after)
for x in resp['bundles']:
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles)
self.assertNotIn('.'.join([x['uuid'], x['version']]), MockStorageHandler.dead_bundles_without_suffix)
self.assertNotIn(x, page_one)
# TODO add test to enumerate list and ensure all bundles that should be present are there.
# TODO: Add test for dss.storage.bundles.get_bundle_manifest
# TODO: Add test for dss.storage.bundles.save_bundle_manifest
@mock.patch("dss.storage.bundles.Config.get_blobstore_handle", return_value=MockCloudBlobstoreHandle)
def test_get_tombstoned_bundles(self, _):
with self.subTest("Retrieve bundle fqid associated with versioned tombstone"):
mock_handle = MockCloudBlobstoreHandle
mock_handle.gen_bundle_listing(1, versioned_tombstone_probability=1.0)
for e in get_tombstoned_bundles(Replica.aws, mock_handle.tombstones[-1]):
self.assertEqual(mock_handle.tombstoned_bundles[0], e)
with self.subTest("Retrieve bundle fqids associated with unversioned tombstone"):
mock_handle.gen_bundle_listing(10,
versioned_tombstone_probability=0.5,
unversioned_tombstone_probability=1.0)
unversioned_tombstone_key = f"bundles/{mock_handle.bundle_uuid}.{TOMBSTONE_SUFFIX}"
listed_keys = {e for e in get_tombstoned_bundles(Replica.aws, unversioned_tombstone_key)}
expected_keys = {e for e in mock_handle.untombstoned_bundles}
unexpected_keys = {e for e in mock_handle.tombstoned_bundles}
self.assertEqual(listed_keys, expected_keys)
self.assertNotIn(unversioned_tombstone_key, listed_keys)
self.assertEqual(0, len(unexpected_keys.intersection(listed_keys)))
with self.subTest("Passing in non-tombstone key should raise"):
mock_handle.gen_bundle_listing(1, versioned_tombstone_probability=1.0)
with self.assertRaises(ValueError):
for e in get_tombstoned_bundles(Replica.aws, "asdf"):
pass
if __name__ == '__main__':
unittest.main()
|
[
"dss.util.version.datetime_to_version_format",
"sys.path.insert",
"dss.logging.configure_test_logging",
"datetime.datetime.utcnow",
"dss.storage.identifiers.UUID_REGEX.match",
"dss.storage.bundles.get_tombstoned_bundles",
"tests.infra.MockStorageHandler",
"uuid.uuid4",
"os.path.dirname",
"dss.Config.set_config",
"dss.storage.bundles.enumerate_available_bundles",
"unittest.main",
"random.random",
"unittest.mock.patch",
"random.randint"
] |
[((298, 326), 'sys.path.insert', 'sys.path.insert', (['(0)', 'pkg_root'], {}), '(0, pkg_root)\n', (313, 326), False, 'import sys\n'), ((2450, 2474), 'dss.logging.configure_test_logging', 'configure_test_logging', ([], {}), '()\n', (2472, 2474), False, 'from dss.logging import configure_test_logging\n'), ((3073, 3118), 'unittest.mock.patch', 'mock.patch', (['"""dss.Config.get_blobstore_handle"""'], {}), "('dss.Config.get_blobstore_handle')\n", (3083, 3118), False, 'from unittest import mock\n'), ((3538, 3583), 'unittest.mock.patch', 'mock.patch', (['"""dss.Config.get_blobstore_handle"""'], {}), "('dss.Config.get_blobstore_handle')\n", (3548, 3583), False, 'from unittest import mock\n'), ((5035, 5140), 'unittest.mock.patch', 'mock.patch', (['"""dss.storage.bundles.Config.get_blobstore_handle"""'], {'return_value': 'MockCloudBlobstoreHandle'}), "('dss.storage.bundles.Config.get_blobstore_handle', return_value=\n MockCloudBlobstoreHandle)\n", (5045, 5140), False, 'from unittest import mock\n'), ((6744, 6759), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6757, 6759), False, 'import unittest\n'), ((256, 281), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (271, 281), False, 'import os\n'), ((3022, 3066), 'dss.Config.set_config', 'dss.Config.set_config', (['dss.BucketConfig.TEST'], {}), '(dss.BucketConfig.TEST)\n', (3043, 3066), False, 'import dss\n'), ((3206, 3226), 'tests.infra.MockStorageHandler', 'MockStorageHandler', ([], {}), '()\n', (3224, 3226), False, 'from tests.infra import testmode, MockStorageHandler\n'), ((3242, 3284), 'dss.storage.bundles.enumerate_available_bundles', 'enumerate_available_bundles', ([], {'replica': '"""aws"""'}), "(replica='aws')\n", (3269, 3284), False, 'from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles\n'), ((3671, 3691), 'tests.infra.MockStorageHandler', 'MockStorageHandler', ([], {}), '()\n', (3689, 3691), False, 'from tests.infra import testmode, MockStorageHandler\n'), ((1248, 1255), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1253, 1255), False, 'from uuid import uuid4\n'), ((2264, 2272), 'random.random', 'random', ([], {}), '()\n', (2270, 2272), False, 'from random import random, randint\n'), ((3861, 3923), 'dss.storage.bundles.enumerate_available_bundles', 'enumerate_available_bundles', ([], {'replica': '"""aws"""', 'per_page': 'test_size'}), "(replica='aws', per_page=test_size)\n", (3888, 3923), False, 'from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles\n'), ((4361, 4455), 'dss.storage.bundles.enumerate_available_bundles', 'enumerate_available_bundles', ([], {'replica': '"""aws"""', 'per_page': 'test_size', 'search_after': 'search_after'}), "(replica='aws', per_page=test_size, search_after\n =search_after)\n", (4388, 4455), False, 'from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles\n'), ((5424, 5487), 'dss.storage.bundles.get_tombstoned_bundles', 'get_tombstoned_bundles', (['Replica.aws', 'mock_handle.tombstones[-1]'], {}), '(Replica.aws, mock_handle.tombstones[-1])\n', (5446, 5487), False, 'from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles\n'), ((1429, 1446), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1444, 1446), False, 'from datetime import datetime, timedelta\n'), ((1793, 1801), 'random.random', 'random', ([], {}), '()\n', (1799, 1801), False, 'from random import random, randint\n'), ((2754, 2776), 'dss.storage.identifiers.UUID_REGEX.match', 'UUID_REGEX.match', (['uuid'], {}), '(uuid)\n', (2770, 2776), False, 'from dss.storage.identifiers import UUID_REGEX, TOMBSTONE_SUFFIX\n'), ((2837, 2844), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2842, 2844), False, 'from uuid import uuid4\n'), ((2874, 2896), 'dss.storage.identifiers.UUID_REGEX.match', 'UUID_REGEX.match', (['uuid'], {}), '(uuid)\n', (2890, 2896), False, 'from dss.storage.identifiers import UUID_REGEX, TOMBSTONE_SUFFIX\n'), ((6641, 6684), 'dss.storage.bundles.get_tombstoned_bundles', 'get_tombstoned_bundles', (['Replica.aws', '"""asdf"""'], {}), "(Replica.aws, 'asdf')\n", (6663, 6684), False, 'from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles\n'), ((1686, 1725), 'dss.util.version.datetime_to_version_format', 'datetime_to_version_format', (['random_date'], {}), '(random_date)\n', (1712, 1725), False, 'from dss.util.version import datetime_to_version_format\n'), ((5994, 6056), 'dss.storage.bundles.get_tombstoned_bundles', 'get_tombstoned_bundles', (['Replica.aws', 'unversioned_tombstone_key'], {}), '(Replica.aws, unversioned_tombstone_key)\n', (6016, 6056), False, 'from dss.storage.bundles import enumerate_available_bundles, get_tombstoned_bundles\n'), ((1464, 1479), 'random.randint', 'randint', (['(0)', '(364)'], {}), '(0, 364)\n', (1471, 1479), False, 'from random import random, randint\n'), ((1543, 1557), 'random.randint', 'randint', (['(0)', '(23)'], {}), '(0, 23)\n', (1550, 1557), False, 'from random import random, randint\n'), ((1623, 1637), 'random.randint', 'randint', (['(0)', '(59)'], {}), '(0, 59)\n', (1630, 1637), False, 'from random import random, randint\n')]
|
#!/usr/bin/env python
# coding: utf-8
# BEGIN --- required only for testing, remove in real world code --- BEGIN
import os
import sys
THISDIR = os.path.dirname(os.path.abspath(__file__))
APPDIR = os.path.abspath(os.path.join(THISDIR, os.path.pardir, os.path.pardir))
sys.path.insert(0, APPDIR)
# END --- required only for testing, remove in real world code --- END
#
# See http://tools.cherrypy.org/wiki/ModWSGI
#
import cherrypy
from pyjsonrpc.cp import CherryPyJsonRpc, rpcmethod
class Root(CherryPyJsonRpc):
@rpcmethod
def add(self, a, b):
"""Test method"""
return a + b
index = CherryPyJsonRpc.request_handler
# WSGI-Application
application = cherrypy.Application(Root())
|
[
"os.path.abspath",
"sys.path.insert",
"os.path.join"
] |
[((268, 294), 'sys.path.insert', 'sys.path.insert', (['(0)', 'APPDIR'], {}), '(0, APPDIR)\n', (283, 294), False, 'import sys\n'), ((161, 186), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (176, 186), False, 'import os\n'), ((213, 266), 'os.path.join', 'os.path.join', (['THISDIR', 'os.path.pardir', 'os.path.pardir'], {}), '(THISDIR, os.path.pardir, os.path.pardir)\n', (225, 266), False, 'import os\n')]
|
# Copyright (c) 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import logging
import argparse
import sys
import warnings
import sys
import time
import json
import cudf
from sklearn import metrics
import pandas as pd
import tritonclient.http as httpclient
import tritonclient.grpc as grpcclient
from tritonclient.utils import *
from google.cloud import pubsub_v1
from google.protobuf.json_format import MessageToJson
from google.pubsub_v1.types import Encoding
def publish_batch(project_id, topic_id, current_batch, pred_label):
# Initialize a Publisher client.
client = pubsub_v1.PublisherClient()
topic_path = client.topic_path(project_id, topic_id)
batch_size = len(pred_label)
df = current_batch.to_pandas()
for i in range(batch_size):
row = df.iloc[i]
frame = {
"input0": row[CONTINUOUS_COLUMNS].values.tolist(),
"input1": row[CATEGORICAL_COLUMNS].values.tolist(),
"trueval": row['label'],
"predval": response.as_numpy("OUTPUT0")[i].astype('float64')
}
payload = json.dumps(frame).encode('utf-8')
# When you publish a message, the client returns a future.
api_future = client.publish(topic_path, data=''.encode(), payload=payload)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-u',
'--triton_grpc_url',
type=str,
required=False,
default='localhost:8001',
help='URL to Triton gRPC Endpoint')
parser.add_argument('-m',
'--model_name',
type=str,
required=False,
default='dcn_ens',
help='Name of the model ensemble to load')
parser.add_argument('-d',
'--test_data',
type=str,
required=False,
default='/crit_int_pq/day_23.parquet',
help='Path to a test .parquet file. Default')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=64,
help='Batch size. Max is 64 at the moment, but this max size could be specified when create the model and the ensemble.')
parser.add_argument('-n',
'--n_batches',
type=int,
required=False,
default=1,
help='Number of batches of data to send')
parser.add_argument('-v',
'--verbose',
type=bool,
required=False,
default=False,
help='Verbosity, True or False')
parser.add_argument("--project_id",
type=str,
required=True,
default="dl-tme",
help="Google Cloud project ID")
parser.add_argument("--topic_id",
type=str,
required=True,
default="pubsub",
help="Pub/Sub topic ID")
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO, datefmt='%d-%m-%y %H:%M:%S')
logging.info(f"Args: {args}")
# warnings can be disabled
if not sys.warnoptions:
warnings.simplefilter("ignore")
try:
triton_client = grpcclient.InferenceServerClient(url=args.triton_grpc_url, verbose=args.verbose)
logging.info("Triton client created.")
triton_client.is_model_ready(args.model_name)
logging.info(f"Model {args.model_name} is ready!")
except Exception as e:
logging.error(f"Channel creation failed: {str(e)}")
sys.exit()
# Load the dataset
CATEGORICAL_COLUMNS = ['C' + str(x) for x in range(1,27)]
CONTINUOUS_COLUMNS = ['I' + str(x) for x in range(1,14)]
LABEL_COLUMNS = ['label']
col_names = CATEGORICAL_COLUMNS + CONTINUOUS_COLUMNS
col_dtypes = [np.int32]*26 + [np.int64]*13
logging.info("Reading dataset..")
all_batches = cudf.read_parquet(args.test_data, num_rows=args.batch_size*args.n_batches)
results=[]
with grpcclient.InferenceServerClient(url=args.triton_grpc_url) as client:
for batch in range(args.n_batches):
logging.info(f"Requesting inference for batch {batch}..")
start_idx = batch*args.batch_size
end_idx = (batch+1)*(args.batch_size)
# Convert the batch to a triton inputs
current_batch = all_batches[start_idx:end_idx]
columns = [(col, current_batch[col]) for col in col_names]
inputs = []
for i, (name, col) in enumerate(columns):
d = col.values_host.astype(col_dtypes[i])
d = d.reshape(len(d), 1)
inputs.append(grpcclient.InferInput(name, d.shape, np_to_triton_dtype(col_dtypes[i])))
inputs[i].set_data_from_numpy(d)
outputs = []
outputs.append(grpcclient.InferRequestedOutput("OUTPUT0"))
response = client.infer(args.model_name, inputs, request_id=str(1), outputs=outputs)
results.extend(response.as_numpy("OUTPUT0"))
publish_batch(args.project_id, args.topic_id,
current_batch,
response.as_numpy("OUTPUT0"))
logging.info(f"ROC AUC Score: {metrics.roc_auc_score(all_batches[LABEL_COLUMNS].values.tolist(), results)}")
|
[
"logging.basicConfig",
"argparse.ArgumentParser",
"json.dumps",
"logging.info",
"google.cloud.pubsub_v1.PublisherClient",
"sys.exit",
"warnings.simplefilter",
"tritonclient.grpc.InferenceServerClient",
"cudf.read_parquet",
"tritonclient.grpc.InferRequestedOutput"
] |
[((1240, 1267), 'google.cloud.pubsub_v1.PublisherClient', 'pubsub_v1.PublisherClient', ([], {}), '()\n', (1265, 1267), False, 'from google.cloud import pubsub_v1\n'), ((1965, 1990), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1988, 1990), False, 'import argparse\n'), ((4047, 4155), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(message)s"""', 'level': 'logging.INFO', 'datefmt': '"""%d-%m-%y %H:%M:%S"""'}), "(format='%(asctime)s - %(message)s', level=logging.INFO,\n datefmt='%d-%m-%y %H:%M:%S')\n", (4066, 4155), False, 'import logging\n'), ((4156, 4185), 'logging.info', 'logging.info', (['f"""Args: {args}"""'], {}), "(f'Args: {args}')\n", (4168, 4185), False, 'import logging\n'), ((4958, 4991), 'logging.info', 'logging.info', (['"""Reading dataset.."""'], {}), "('Reading dataset..')\n", (4970, 4991), False, 'import logging\n'), ((5010, 5086), 'cudf.read_parquet', 'cudf.read_parquet', (['args.test_data'], {'num_rows': '(args.batch_size * args.n_batches)'}), '(args.test_data, num_rows=args.batch_size * args.n_batches)\n', (5027, 5086), False, 'import cudf\n'), ((4255, 4286), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4276, 4286), False, 'import warnings\n'), ((4321, 4406), 'tritonclient.grpc.InferenceServerClient', 'grpcclient.InferenceServerClient', ([], {'url': 'args.triton_grpc_url', 'verbose': 'args.verbose'}), '(url=args.triton_grpc_url, verbose=args.verbose\n )\n', (4353, 4406), True, 'import tritonclient.grpc as grpcclient\n'), ((4410, 4448), 'logging.info', 'logging.info', (['"""Triton client created."""'], {}), "('Triton client created.')\n", (4422, 4448), False, 'import logging\n'), ((4512, 4562), 'logging.info', 'logging.info', (['f"""Model {args.model_name} is ready!"""'], {}), "(f'Model {args.model_name} is ready!')\n", (4524, 4562), False, 'import logging\n'), ((5111, 5169), 'tritonclient.grpc.InferenceServerClient', 'grpcclient.InferenceServerClient', ([], {'url': 'args.triton_grpc_url'}), '(url=args.triton_grpc_url)\n', (5143, 5169), True, 'import tritonclient.grpc as grpcclient\n'), ((4659, 4669), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4667, 4669), False, 'import sys\n'), ((5238, 5295), 'logging.info', 'logging.info', (['f"""Requesting inference for batch {batch}.."""'], {}), "(f'Requesting inference for batch {batch}..')\n", (5250, 5295), False, 'import logging\n'), ((1737, 1754), 'json.dumps', 'json.dumps', (['frame'], {}), '(frame)\n', (1747, 1754), False, 'import json\n'), ((5957, 5999), 'tritonclient.grpc.InferRequestedOutput', 'grpcclient.InferRequestedOutput', (['"""OUTPUT0"""'], {}), "('OUTPUT0')\n", (5988, 5999), True, 'import tritonclient.grpc as grpcclient\n')]
|
"""
throughput dialog
"""
import tkinter as tk
from tkinter import ttk
from typing import TYPE_CHECKING
from core.gui.dialogs.colorpicker import ColorPickerDialog
from core.gui.dialogs.dialog import Dialog
from core.gui.themes import FRAME_PAD, PADX, PADY
if TYPE_CHECKING:
from core.gui.app import Application
class ThroughputDialog(Dialog):
def __init__(self, master: "Application", app: "Application"):
super().__init__(master, app, "Throughput Config", modal=False)
self.app = app
self.canvas = app.canvas
self.show_throughput = tk.IntVar(value=1)
self.exponential_weight = tk.IntVar(value=1)
self.transmission = tk.IntVar(value=1)
self.reception = tk.IntVar(value=1)
self.threshold = tk.DoubleVar(value=self.canvas.throughput_threshold)
self.width = tk.IntVar(value=self.canvas.throughput_width)
self.color = self.canvas.throughput_color
self.color_button = None
self.top.columnconfigure(0, weight=1)
self.draw()
def draw(self):
button = ttk.Checkbutton(
self.top,
variable=self.show_throughput,
text="Show Throughput Level On Every Link",
)
button.grid(sticky="ew")
button = ttk.Checkbutton(
self.top,
variable=self.exponential_weight,
text="Use Exponential Weighted Moving Average",
)
button.grid(sticky="ew")
button = ttk.Checkbutton(
self.top, variable=self.transmission, text="Include Transmissions"
)
button.grid(sticky="ew")
button = ttk.Checkbutton(
self.top, variable=self.reception, text="Include Receptions"
)
button.grid(sticky="ew")
label_frame = ttk.LabelFrame(self.top, text="Link Highlight", padding=FRAME_PAD)
label_frame.columnconfigure(0, weight=1)
label_frame.grid(sticky="ew")
scale = ttk.Scale(
label_frame,
from_=0,
to=1000,
value=0,
orient=tk.HORIZONTAL,
variable=self.threshold,
)
scale.grid(sticky="ew", pady=PADY)
frame = ttk.Frame(label_frame)
frame.grid(sticky="ew")
frame.columnconfigure(1, weight=1)
label = ttk.Label(frame, text="Threshold Kbps (0 disabled)")
label.grid(row=0, column=0, sticky="ew", padx=PADX)
entry = ttk.Entry(frame, textvariable=self.threshold)
entry.grid(row=0, column=1, sticky="ew", pady=PADY)
label = ttk.Label(frame, text="Width")
label.grid(row=1, column=0, sticky="ew", padx=PADX)
entry = ttk.Entry(frame, textvariable=self.width)
entry.grid(row=1, column=1, sticky="ew", pady=PADY)
label = ttk.Label(frame, text="Color")
label.grid(row=2, column=0, sticky="ew", padx=PADX)
self.color_button = tk.Button(
frame,
text=self.color,
command=self.click_color,
bg=self.color,
highlightthickness=0,
)
self.color_button.grid(row=2, column=1, sticky="ew")
self.draw_spacer()
frame = ttk.Frame(self.top)
frame.grid(sticky="ew")
for i in range(2):
frame.columnconfigure(i, weight=1)
button = ttk.Button(frame, text="Save", command=self.click_save)
button.grid(row=0, column=0, sticky="ew", padx=PADX)
button = ttk.Button(frame, text="Cancel", command=self.destroy)
button.grid(row=0, column=1, sticky="ew")
def click_color(self):
color_picker = ColorPickerDialog(self, self.app, self.color)
self.color = color_picker.askcolor()
self.color_button.config(bg=self.color, text=self.color, bd=0)
def click_save(self):
self.canvas.throughput_threshold = self.threshold.get()
self.canvas.throughput_width = self.width.get()
self.canvas.throughput_color = self.color
self.destroy()
|
[
"tkinter.IntVar",
"tkinter.ttk.Checkbutton",
"tkinter.ttk.Button",
"tkinter.ttk.Scale",
"tkinter.ttk.Entry",
"tkinter.ttk.Frame",
"tkinter.ttk.Label",
"tkinter.Button",
"tkinter.ttk.LabelFrame",
"tkinter.DoubleVar",
"core.gui.dialogs.colorpicker.ColorPickerDialog"
] |
[((577, 595), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': '(1)'}), '(value=1)\n', (586, 595), True, 'import tkinter as tk\n'), ((630, 648), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': '(1)'}), '(value=1)\n', (639, 648), True, 'import tkinter as tk\n'), ((677, 695), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': '(1)'}), '(value=1)\n', (686, 695), True, 'import tkinter as tk\n'), ((721, 739), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': '(1)'}), '(value=1)\n', (730, 739), True, 'import tkinter as tk\n'), ((765, 817), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': 'self.canvas.throughput_threshold'}), '(value=self.canvas.throughput_threshold)\n', (777, 817), True, 'import tkinter as tk\n'), ((839, 884), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': 'self.canvas.throughput_width'}), '(value=self.canvas.throughput_width)\n', (848, 884), True, 'import tkinter as tk\n'), ((1072, 1177), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.top'], {'variable': 'self.show_throughput', 'text': '"""Show Throughput Level On Every Link"""'}), "(self.top, variable=self.show_throughput, text=\n 'Show Throughput Level On Every Link')\n", (1087, 1177), False, 'from tkinter import ttk\n'), ((1270, 1382), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.top'], {'variable': 'self.exponential_weight', 'text': '"""Use Exponential Weighted Moving Average"""'}), "(self.top, variable=self.exponential_weight, text=\n 'Use Exponential Weighted Moving Average')\n", (1285, 1382), False, 'from tkinter import ttk\n'), ((1475, 1563), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.top'], {'variable': 'self.transmission', 'text': '"""Include Transmissions"""'}), "(self.top, variable=self.transmission, text=\n 'Include Transmissions')\n", (1490, 1563), False, 'from tkinter import ttk\n'), ((1631, 1708), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.top'], {'variable': 'self.reception', 'text': '"""Include Receptions"""'}), "(self.top, variable=self.reception, text='Include Receptions')\n", (1646, 1708), False, 'from tkinter import ttk\n'), ((1787, 1853), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.top'], {'text': '"""Link Highlight"""', 'padding': 'FRAME_PAD'}), "(self.top, text='Link Highlight', padding=FRAME_PAD)\n", (1801, 1853), False, 'from tkinter import ttk\n'), ((1958, 2058), 'tkinter.ttk.Scale', 'ttk.Scale', (['label_frame'], {'from_': '(0)', 'to': '(1000)', 'value': '(0)', 'orient': 'tk.HORIZONTAL', 'variable': 'self.threshold'}), '(label_frame, from_=0, to=1000, value=0, orient=tk.HORIZONTAL,\n variable=self.threshold)\n', (1967, 2058), False, 'from tkinter import ttk\n'), ((2198, 2220), 'tkinter.ttk.Frame', 'ttk.Frame', (['label_frame'], {}), '(label_frame)\n', (2207, 2220), False, 'from tkinter import ttk\n'), ((2312, 2364), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Threshold Kbps (0 disabled)"""'}), "(frame, text='Threshold Kbps (0 disabled)')\n", (2321, 2364), False, 'from tkinter import ttk\n'), ((2441, 2486), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.threshold'}), '(frame, textvariable=self.threshold)\n', (2450, 2486), False, 'from tkinter import ttk\n'), ((2563, 2593), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Width"""'}), "(frame, text='Width')\n", (2572, 2593), False, 'from tkinter import ttk\n'), ((2670, 2711), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.width'}), '(frame, textvariable=self.width)\n', (2679, 2711), False, 'from tkinter import ttk\n'), ((2788, 2818), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Color"""'}), "(frame, text='Color')\n", (2797, 2818), False, 'from tkinter import ttk\n'), ((2907, 3007), 'tkinter.Button', 'tk.Button', (['frame'], {'text': 'self.color', 'command': 'self.click_color', 'bg': 'self.color', 'highlightthickness': '(0)'}), '(frame, text=self.color, command=self.click_color, bg=self.color,\n highlightthickness=0)\n', (2916, 3007), True, 'import tkinter as tk\n'), ((3181, 3200), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.top'], {}), '(self.top)\n', (3190, 3200), False, 'from tkinter import ttk\n'), ((3324, 3379), 'tkinter.ttk.Button', 'ttk.Button', (['frame'], {'text': '"""Save"""', 'command': 'self.click_save'}), "(frame, text='Save', command=self.click_save)\n", (3334, 3379), False, 'from tkinter import ttk\n'), ((3458, 3512), 'tkinter.ttk.Button', 'ttk.Button', (['frame'], {'text': '"""Cancel"""', 'command': 'self.destroy'}), "(frame, text='Cancel', command=self.destroy)\n", (3468, 3512), False, 'from tkinter import ttk\n'), ((3614, 3659), 'core.gui.dialogs.colorpicker.ColorPickerDialog', 'ColorPickerDialog', (['self', 'self.app', 'self.color'], {}), '(self, self.app, self.color)\n', (3631, 3659), False, 'from core.gui.dialogs.colorpicker import ColorPickerDialog\n')]
|
class Human():
def __init__(self, name="Human"):
self.name = name
def action(self, game):
safe_input = False
while not safe_input:
pos = input("choose a position: ")
if pos == "draw":
game.draw()
elif pos == "exit":
import sys
sys.exit()
elif pos == "movable":
print(game.movable)
elif len(pos) == 2:
clone = game.clone()
pos = tuple(map(int, tuple(pos)))
if clone.can_play(pos):
safe_input = True
else:
print("// Error: Can't put it down //")
else:
print("Error: Invaild input")
return game.play(pos)
def game_finished(self, game):
pass
def all_game_finished(self):
pass
|
[
"sys.exit"
] |
[((344, 354), 'sys.exit', 'sys.exit', ([], {}), '()\n', (352, 354), False, 'import sys\n')]
|
#!/usr/bin/env python3
import matplotlib
matplotlib.use('pgf')
import matplotlib.pyplot as plt
import numpy as np
from multi_isotope_calculator import Multi_isotope
import plotsettings as ps
plt.style.use('seaborn-darkgrid')
plt.rcParams.update(ps.tex_fonts())
def main():
plot()
#figure5()
def figure1():
"""Compare data to Sharp paper (tails U234 vs product U235)"""
data = np.genfromtxt("../data/sharp_fig1.csv", delimiter=",")
data = data[np.argsort(data[:,0])]
composition = {'234': 5.5e-3, '235': (0.72, 3, 0.2)}
calculator = Multi_isotope(composition, feed=1, process='diffusion',
downblend=False)
results = np.empty(shape=data.shape, dtype=float)
for i, xp in enumerate(data[:,0]):
calculator.set_product_enrichment(xp*100)
calculator.calculate_staging()
results[i,0] = calculator.xp[3]
results[i,1] = calculator.xt[2]
data *= 100
results *= 100
pulls = 100 * (data[:,1]-results[:,1]) / data[:,1]
ylims = (1e299, 0)
for values in (data, results):
ylims = (min(ylims[0], min(values[:,1])),
max(ylims[1], max(values[:,1])))
return data, results, pulls
def figure5():
"""Compare data to Sharp paper (tails qty vs product qty)"""
sharp = np.genfromtxt("../data/sharp_fig5.csv", delimiter=",")
sharp = sharp[np.argsort(sharp[:,0])]
calc = Multi_isotope({'235': (0.711, 5, 0.2)}, max_swu=15000,
process='diffusion', downblend=False)
results = np.empty(shape=sharp.shape, dtype=float)
for i, xp in enumerate(sharp[:,0]):
calc.set_product_enrichment(xp*100)
calc.calculate_staging()
results[i,0] = calc.xp[3] * 100
results[i,1] = calc.t
sharp[:,0] *= 100
pulls = 100 * (sharp[:,1]-results[:,1]) / sharp[:,1]
return sharp, results, pulls
def plot():
fig1 = figure1()
fig5 = figure5()
figsize = ps.set_size(subplots=(2,2))
fig, ax = plt.subplots(figsize=figsize, nrows=2, ncols=2)
plt.rcParams.update({'lines.markersize': 4})
for i, (data, result, pulls) in enumerate((fig1, fig5)):
ax[0,i].plot(result[:,0], result[:,1], color=ps.colors(0),
label="MARC algorithm", zorder=2, linewidth=1)
ax[0,i].scatter(data[::3,0], data[::3,1], marker="x",
color=ps.colors(1), label="Sharp 2013", zorder=3)
ax[1,i].scatter(data[:,0], pulls, s=1, zorder=2)
ax[0,i].legend()
ax[0,i].set_xlim(0, 100)
ax[1,i].set_xlim(0, 100)
ax[1,i].set_xlabel(r"$x_{235,P}$ [\%at]")
ax[1,i].axhline(0, color="C3", zorder=1, linewidth=1)
ax[0,1].ticklabel_format(axis="y", style="sci", scilimits=(-2,2))
ax[0,0].set_ylabel(r"$x_{234,T}$ [\%at]")
ax[1,0].set_ylabel(r"relative difference [%]")
ax[0,1].set_ylabel(r"$T$ [kg/yr]")
ax[1,1].set_ylabel(r"relative difference [%]")
plt.tight_layout()
plt.savefig("../plots/checks_marc_sharp1.pdf")
plt.close()
return
if __name__=='__main__':
main()
|
[
"plotsettings.set_size",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"multi_isotope_calculator.Multi_isotope",
"plotsettings.tex_fonts",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"numpy.argsort",
"numpy.empty",
"matplotlib.pyplot.tight_layout",
"plotsettings.colors",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots"
] |
[((42, 63), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (56, 63), False, 'import matplotlib\n'), ((194, 227), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (207, 227), True, 'import matplotlib.pyplot as plt\n'), ((248, 262), 'plotsettings.tex_fonts', 'ps.tex_fonts', ([], {}), '()\n', (260, 262), True, 'import plotsettings as ps\n'), ((398, 452), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../data/sharp_fig1.csv"""'], {'delimiter': '""","""'}), "('../data/sharp_fig1.csv', delimiter=',')\n", (411, 452), True, 'import numpy as np\n'), ((567, 639), 'multi_isotope_calculator.Multi_isotope', 'Multi_isotope', (['composition'], {'feed': '(1)', 'process': '"""diffusion"""', 'downblend': '(False)'}), "(composition, feed=1, process='diffusion', downblend=False)\n", (580, 639), False, 'from multi_isotope_calculator import Multi_isotope\n'), ((685, 724), 'numpy.empty', 'np.empty', ([], {'shape': 'data.shape', 'dtype': 'float'}), '(shape=data.shape, dtype=float)\n', (693, 724), True, 'import numpy as np\n'), ((1322, 1376), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../data/sharp_fig5.csv"""'], {'delimiter': '""","""'}), "('../data/sharp_fig5.csv', delimiter=',')\n", (1335, 1376), True, 'import numpy as np\n'), ((1435, 1531), 'multi_isotope_calculator.Multi_isotope', 'Multi_isotope', (["{'235': (0.711, 5, 0.2)}"], {'max_swu': '(15000)', 'process': '"""diffusion"""', 'downblend': '(False)'}), "({'235': (0.711, 5, 0.2)}, max_swu=15000, process='diffusion',\n downblend=False)\n", (1448, 1531), False, 'from multi_isotope_calculator import Multi_isotope\n'), ((1568, 1608), 'numpy.empty', 'np.empty', ([], {'shape': 'sharp.shape', 'dtype': 'float'}), '(shape=sharp.shape, dtype=float)\n', (1576, 1608), True, 'import numpy as np\n'), ((1993, 2021), 'plotsettings.set_size', 'ps.set_size', ([], {'subplots': '(2, 2)'}), '(subplots=(2, 2))\n', (2004, 2021), True, 'import plotsettings as ps\n'), ((2035, 2082), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'nrows': '(2)', 'ncols': '(2)'}), '(figsize=figsize, nrows=2, ncols=2)\n', (2047, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2092, 2136), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'lines.markersize': 4}"], {}), "({'lines.markersize': 4})\n", (2111, 2136), True, 'import matplotlib.pyplot as plt\n'), ((3008, 3026), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3024, 3026), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3077), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots/checks_marc_sharp1.pdf"""'], {}), "('../plots/checks_marc_sharp1.pdf')\n", (3042, 3077), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3093), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3091, 3093), True, 'import matplotlib.pyplot as plt\n'), ((469, 491), 'numpy.argsort', 'np.argsort', (['data[:, 0]'], {}), '(data[:, 0])\n', (479, 491), True, 'import numpy as np\n'), ((1395, 1418), 'numpy.argsort', 'np.argsort', (['sharp[:, 0]'], {}), '(sharp[:, 0])\n', (1405, 1418), True, 'import numpy as np\n'), ((2252, 2264), 'plotsettings.colors', 'ps.colors', (['(0)'], {}), '(0)\n', (2261, 2264), True, 'import plotsettings as ps\n'), ((2426, 2438), 'plotsettings.colors', 'ps.colors', (['(1)'], {}), '(1)\n', (2435, 2438), True, 'import plotsettings as ps\n')]
|
import boto3
client = boto3.client('rds')
def lambda_handler(event, context):
target_db_cluster_identifier=event['TargetDBClusterIdentifier']
payload = event.copy()
try:
response = client.describe_db_clusters(DBClusterIdentifier=target_db_cluster_identifier)
payload['status'] = response['DBClusters'][0]['Status']
return payload
except client.exceptions.DBClusterNotFoundFault as e:
print(e)
payload['status'] = 'not-found'
payload['message'] = 'There is no cluster to remove...'
return payload
|
[
"boto3.client"
] |
[((23, 42), 'boto3.client', 'boto3.client', (['"""rds"""'], {}), "('rds')\n", (35, 42), False, 'import boto3\n')]
|
import wx
from wx.lib.agw.floatspin import FloatSpin
from shs.input.fdf_options import ChoiceLine, MeasuredLine, NumberLine, ThreeNumberLine
try:
from geom import Geom
except ImportError:
from shs.geom import Geom
class Bravais(ChoiceLine):
label = 'Composition'
choices = ['BCC', 'FCC', 'SC']
optional = False
class LatticeConstant(MeasuredLine):
label = 'Lattice constant'
value = 1.
digits = 2
increment = 0.01
units = ['Bohr', 'Ang']
optional = False
class DistortionLevel(NumberLine):
label = 'Distortion level (in %)'
value = 0.
digits = 0
increment = 1.
range_val = (0., 100.)
optional = False
class SuperCell(ThreeNumberLine):
label = 'Supercell'
optional = False
class ACInitDialog(wx.Dialog):
def __init__(self, *args, **kwds):
self.types = kwds.pop('types')
wx.Dialog.__init__(self, *args, **kwds)
self.bravais = Bravais(self)
self.type_label = []
self.typefs = []
if len(self.types) == 0:
self.add_type_btn = wx.Button(self, -1, "Add type")
self.add_type_btn.Bind(wx.EVT_BUTTON, self.add_type)
else:
for t in self.types:
self.type_label.append(wx.StaticText(self, -1, t))
self.typefs.append(FloatSpin(self, -1, min_val=0, value=1., digits=0))
self.sc = SuperCell(self)
self.alat = LatticeConstant(self)
self.dist = DistortionLevel(self)
self.__set_properties()
self.__do_layout()
def __set_properties(self):
self.SetTitle("Initialize geometry")
def __do_layout(self):
comp_label = wx.StaticBox(self, -1, 'Composition')
comp_sizer = wx.StaticBoxSizer(comp_label, wx.HORIZONTAL)
self.comp_inside = wx.GridSizer(2, len(self.types), 2, 2)
for l in self.type_label:
self.comp_inside.Add(l, 0, wx.ALIGN_CENTER, 0)
for fs in self.typefs:
self.comp_inside.Add(fs, 0, wx.ALIGN_CENTER, 0)
comp_sizer.Add(self.comp_inside, 1, wx.ALL | wx.EXPAND, 5)
if len(self.types) == 0:
comp_sizer.Add(self.add_type_btn, 0, wx.ALL | wx.EXPAND, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.bravais.sizer, 0, wx.EXPAND, 0)
sizer.Add(comp_sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(self.alat.sizer, 0, wx.EXPAND, 0)
sizer.Add(self.sc.sizer, 0, wx.EXPAND, 0)
sizer.Add(self.dist.sizer, 0, wx.EXPAND, 0)
sizer.Add(self.CreateSeparatedButtonSizer(wx.OK|wx.CANCEL), 0, wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer)
self.Fit()
self.Layout()
def add_type(self, evt):
self.comp_inside.Clear()
self.comp_inside.SetCols(self.comp_inside.GetCols()+1)
self.type_label.append(wx.TextCtrl(self, -1))
self.typefs.append(FloatSpin(self, -1, min_val=0, value=1., digits=0))
for l in self.type_label:
self.comp_inside.Add(l, 0, wx.ALIGN_CENTER, 0)
for fs in self.typefs:
self.comp_inside.Add(fs, 0, wx.ALIGN_CENTER, 0)
self.Fit()
self.Layout()
def init_geom(self):
bravais = self.bravais.GetValue()
alat, unit = self.alat.GetValue()
sc = self.sc.GetValue()
dist = self.dist.GetValue()
if len(self.types) == 0:
comp = dict(zip([il.GetValue() for il in self.type_label], [ifs.GetValue() for ifs in self.typefs]))
else:
comp = dict(zip(self.types, [ifs.GetValue() for ifs in self.typefs]))
g = Geom()
g.initialize(bravais, comp, sc, alat, unit, dist_level=dist)
g.geom2opts()
return g.opts["AtomicCoordinatesAndAtomicSpecies"]
|
[
"wx.Button",
"wx.lib.agw.floatspin.FloatSpin",
"wx.Dialog.__init__",
"wx.BoxSizer",
"wx.StaticBoxSizer",
"wx.TextCtrl",
"wx.StaticText",
"shs.geom.Geom",
"wx.StaticBox"
] |
[((883, 922), 'wx.Dialog.__init__', 'wx.Dialog.__init__', (['self', '*args'], {}), '(self, *args, **kwds)\n', (901, 922), False, 'import wx\n'), ((1692, 1729), 'wx.StaticBox', 'wx.StaticBox', (['self', '(-1)', '"""Composition"""'], {}), "(self, -1, 'Composition')\n", (1704, 1729), False, 'import wx\n'), ((1751, 1795), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['comp_label', 'wx.HORIZONTAL'], {}), '(comp_label, wx.HORIZONTAL)\n', (1768, 1795), False, 'import wx\n'), ((2244, 2268), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (2255, 2268), False, 'import wx\n'), ((3621, 3627), 'shs.geom.Geom', 'Geom', ([], {}), '()\n', (3625, 3627), False, 'from shs.geom import Geom\n'), ((1079, 1110), 'wx.Button', 'wx.Button', (['self', '(-1)', '"""Add type"""'], {}), "(self, -1, 'Add type')\n", (1088, 1110), False, 'import wx\n'), ((2862, 2883), 'wx.TextCtrl', 'wx.TextCtrl', (['self', '(-1)'], {}), '(self, -1)\n', (2873, 2883), False, 'import wx\n'), ((2912, 2963), 'wx.lib.agw.floatspin.FloatSpin', 'FloatSpin', (['self', '(-1)'], {'min_val': '(0)', 'value': '(1.0)', 'digits': '(0)'}), '(self, -1, min_val=0, value=1.0, digits=0)\n', (2921, 2963), False, 'from wx.lib.agw.floatspin import FloatSpin\n'), ((1262, 1288), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', 't'], {}), '(self, -1, t)\n', (1275, 1288), False, 'import wx\n'), ((1325, 1376), 'wx.lib.agw.floatspin.FloatSpin', 'FloatSpin', (['self', '(-1)'], {'min_val': '(0)', 'value': '(1.0)', 'digits': '(0)'}), '(self, -1, min_val=0, value=1.0, digits=0)\n', (1334, 1376), False, 'from wx.lib.agw.floatspin import FloatSpin\n')]
|
#!/usr/bin/env python3
import datetime
import time
import os
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
class handle_data:
data_file = "./data/data.log"
data_list = []
def __init__(self):
pass
def insert_data(self, timestamp, temp, state_onoff, state_light, state_cooling, state_heating):
"""
Insert data to log file and add timestamp.
"""
if state_onoff == 'on':
state_onoff = 1
else:
state_onoff = 0
if state_light == 'on':
state_light = 1
else:
state_light = 0
if state_cooling == 'on':
state_cooling = 1
else:
state_cooling = 0
if state_heating == 'on':
state_heating = 1
else:
state_heating = 0
data_string = str(timestamp) + ";" + str(temp) + ";" + str(state_onoff) + ";" + str(state_light) + ";" + str(state_cooling) + ";" + str(state_heating) + "\n"
self.data_list.append(data_string)
#print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tInserted data: data_list.append len=", len(self.data_list))
return
def append_data_to_file(self):
"""
Append data to log file.
"""
try:
with open(self.data_file, "a") as outfile:
for entry in self.data_list:
outfile.write(str(entry))
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError opening data.log for appending data")
self.data_list.clear()
return
def clean_file(self):
"""
Clean log file in order to reset measurement.
"""
try:
with open(self.data_file, "w") as outfile:
outfile.write("Timestamp; Temp; State_onoff; State_light; State_cooling; State_heating\n")
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError opening data.log for writing")
return
def update_graph(self, path):
"""
Generate or update graph from data file.
"""
lines = sum(1 for _ in open(self.data_file))
if lines > 1:
data=np.genfromtxt(self.data_file, delimiter=';', skip_header=1, names=['Time', 'Temp', 'Onoff', 'Light', 'Cooling', 'Heating'], dtype=([('Time', '<U30'), ('Temp', '<f8'), ('Onoff', '<f8'), ('Light', '<f8'), ('Cooling', '<f8'), ('Heating', '<f8')]))
fig, ax1 = plt.subplots()
if data['Temp'].shape:
if data['Temp'].shape[0] > 120:
ax1.plot(data['Temp'][((data['Temp'].shape[0])-120):(data['Temp'].shape[0])], color = 'r', label = 'Temp.')
else:
ax1.plot(data['Temp'], color = 'r', label = 'Temp.')
else:
ax1.plot(data['Temp'], color = 'r', label = 'Temp.')
ax1.set_xlim([0,120])
ax1.set_xticks([0,30,60,90,120])
ax1.set_ylabel('Temp (°C)', color='r')
ax1.tick_params('y', colors='r')
yt=range(-1,41,1)
ax1.set_yticks(yt, minor=True)
ax1.set_xlabel('last two hours (scale:min.)')
"""
ax2 = ax1.twinx()
ax2.plot(data['Light'], color = 'g', label = 'Light', marker = 'o')
ax2.plot(data['Onoff'], color = 'y', label = 'Onoff', marker = '*')
ax2.plot(data['Heating'], color = 'r', label = 'Heating')
ax2.plot(data['Cooling'], color = 'b', label = 'Cooling')
ax2.set_ylabel('Light (on=1/off=0)', color='b')
ax2.tick_params('y', colors='b')
ax2.set_yticks([0,1], minor=False)
"""
fig.tight_layout()
#plt.legend(['Temp. inside'], loc='upper left')
plt.savefig(path, bbox_inches='tight')
plt.close(fig)
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tGraph generated/updated.")
else:
#os.remove(path)
#os.mknod(path)
#os.chmod(path, 0o644)
try:
with open(path, "w") as outfile:
outfile.write("")
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError: Could not generate empty graph file.")
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tNo data, graph is empty.")
return
# Test:
if __name__ == '__main__':
hd = handle_data()
#hd.clean_file()
hd.update_graph('./static/data_log.png')
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots"
] |
[((1935, 2190), 'numpy.genfromtxt', 'np.genfromtxt', (['self.data_file'], {'delimiter': '""";"""', 'skip_header': '(1)', 'names': "['Time', 'Temp', 'Onoff', 'Light', 'Cooling', 'Heating']", 'dtype': "[('Time', '<U30'), ('Temp', '<f8'), ('Onoff', '<f8'), ('Light', '<f8'), (\n 'Cooling', '<f8'), ('Heating', '<f8')]"}), "(self.data_file, delimiter=';', skip_header=1, names=['Time',\n 'Temp', 'Onoff', 'Light', 'Cooling', 'Heating'], dtype=[('Time', '<U30'\n ), ('Temp', '<f8'), ('Onoff', '<f8'), ('Light', '<f8'), ('Cooling',\n '<f8'), ('Heating', '<f8')])\n", (1948, 2190), True, 'import numpy as np\n'), ((2194, 2208), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2206, 2208), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3304), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""'}), "(path, bbox_inches='tight')\n", (3277, 3304), True, 'import matplotlib.pyplot as plt\n'), ((3308, 3322), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3317, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3332, 3355), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3353, 3355), False, 'import datetime\n'), ((3715, 3738), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3736, 3738), False, 'import datetime\n'), ((1259, 1282), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1280, 1282), False, 'import datetime\n'), ((1664, 1687), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1685, 1687), False, 'import datetime\n'), ((3594, 3617), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3615, 3617), False, 'import datetime\n')]
|
# Created by <NAME> on 2021/6/6, 21:08
from turtle import Turtle
import turtle
def draw_rectangle(turtle: Turtle, llx, lly, width, height):
turtle.up()
turtle.goto(llx, lly)
turtle.begin_fill()
turtle.down()
turtle.goto(llx + width, lly)
turtle.goto(llx + width, lly + height)
turtle.goto(llx, lly + height)
turtle.end_fill()
if __name__ == '__main__':
tur = Turtle()
wn = turtle.Screen()
wn.title("Turtle Demo")
wn.setworldcoordinates(0, 0, 500, 500)
tur.speed(0)
draw_rectangle(tur, 0, 0, 500, 500)
a = input()
|
[
"turtle.begin_fill",
"turtle.down",
"turtle.Screen",
"turtle.goto",
"turtle.up",
"turtle.end_fill",
"turtle.Turtle"
] |
[((147, 158), 'turtle.up', 'turtle.up', ([], {}), '()\n', (156, 158), False, 'import turtle\n'), ((163, 184), 'turtle.goto', 'turtle.goto', (['llx', 'lly'], {}), '(llx, lly)\n', (174, 184), False, 'import turtle\n'), ((189, 208), 'turtle.begin_fill', 'turtle.begin_fill', ([], {}), '()\n', (206, 208), False, 'import turtle\n'), ((213, 226), 'turtle.down', 'turtle.down', ([], {}), '()\n', (224, 226), False, 'import turtle\n'), ((231, 260), 'turtle.goto', 'turtle.goto', (['(llx + width)', 'lly'], {}), '(llx + width, lly)\n', (242, 260), False, 'import turtle\n'), ((265, 303), 'turtle.goto', 'turtle.goto', (['(llx + width)', '(lly + height)'], {}), '(llx + width, lly + height)\n', (276, 303), False, 'import turtle\n'), ((308, 338), 'turtle.goto', 'turtle.goto', (['llx', '(lly + height)'], {}), '(llx, lly + height)\n', (319, 338), False, 'import turtle\n'), ((343, 360), 'turtle.end_fill', 'turtle.end_fill', ([], {}), '()\n', (358, 360), False, 'import turtle\n'), ((400, 408), 'turtle.Turtle', 'Turtle', ([], {}), '()\n', (406, 408), False, 'from turtle import Turtle\n'), ((418, 433), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (431, 433), False, 'import turtle\n')]
|
from pythonneat.neat.Species import Species
import pythonneat.neat.Speciation as Speciation
import pythonneat.neat.utils.Parameters as Parameters
current_genomes = []
def add_genome(genome):
"""Adds genome to the species list based on its
compatability distance to already existing species
Inputs:
genome: The genome to add. type: Genome
"""
for specie in current_genomes:
first = specie.get_champion()
if Speciation.compatibility_distance(genome, first) < Parameters.COMPATABILITY_THRESHOLD:
specie.add_genome(genome)
return
s = Species()
s.add_genome(genome)
current_genomes.append(s)
return
def remove_genome(genome):
for specie in current_genomes:
if genome in specie.genomes:
specie.remove_genome(genome)
def cleanup_species():
for specie in current_genomes:
if specie.get_average_fitness() - specie.prev_fitness >= Parameters.SPECIES_STAGNATE_MIN_IMPROVEMENT:
specie.consec_stagnate = 0
specie.prev_fitness = specie.get_average_fitness()
else:
# Stagnate
specie.consec_stagnate += 1
if specie.consec_stagnate >= Parameters.SPECIES_STAGNATE_GEN_COUNT:
specie.reproduce = False
def population_size():
pop = 0
for specie in current_genomes:
for _ in specie.genomes:
pop += 1
return pop
|
[
"pythonneat.neat.Speciation.compatibility_distance",
"pythonneat.neat.Species.Species"
] |
[((602, 611), 'pythonneat.neat.Species.Species', 'Species', ([], {}), '()\n', (609, 611), False, 'from pythonneat.neat.Species import Species\n'), ((450, 498), 'pythonneat.neat.Speciation.compatibility_distance', 'Speciation.compatibility_distance', (['genome', 'first'], {}), '(genome, first)\n', (483, 498), True, 'import pythonneat.neat.Speciation as Speciation\n')]
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'A man is eating pasta.',
'The girl is carrying a baby.',
'The baby is carried by the woman',
'A man is riding a horse.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'Someone in a gorilla costume is playing a set of drums.',
'A cheetah is running behind its prey.',
'A cheetah chases prey on across a field.'
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) #, affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i+1)
print(cluster)
print("")
|
[
"sklearn.cluster.AgglomerativeClustering",
"sentence_transformers.SentenceTransformer",
"numpy.linalg.norm"
] |
[((318, 364), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""paraphrase-MiniLM-L6-v2"""'], {}), "('paraphrase-MiniLM-L6-v2')\n", (337, 364), False, 'from sentence_transformers import SentenceTransformer\n'), ((1165, 1229), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'None', 'distance_threshold': '(1.5)'}), '(n_clusters=None, distance_threshold=1.5)\n', (1188, 1229), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((1061, 1117), 'numpy.linalg.norm', 'np.linalg.norm', (['corpus_embeddings'], {'axis': '(1)', 'keepdims': '(True)'}), '(corpus_embeddings, axis=1, keepdims=True)\n', (1075, 1117), True, 'import numpy as np\n')]
|
import pandas as pd
wine = pd.read_csv('https://bit.ly/wine-date')
# wine = pd.read_csv('../data/wine.csv')
print(wine.head())
data = wine[['alcohol', 'sugar', 'pH']].to_numpy()
target = wine['class'].to_numpy()
from sklearn.model_selection import train_test_split
train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42)
print(train_input.shape, test_input.shape)
sub_input, val_input, sub_target, val_target = train_test_split(train_input, train_target, test_size=0.2, random_state=42)
print(sub_input.shape, val_input.shape)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=42)
dt.fit(sub_input, sub_target)
print(dt.score(sub_input, sub_target))
print(dt.score(val_input, val_target))
from sklearn.model_selection import cross_validate
scores = cross_validate(dt, train_input, train_target)
print(scores)
import numpy as np
print(np.mean(scores['test_score']))
from sklearn.model_selection import StratifiedKFold
scores = cross_validate(dt, train_input, train_target, cv=StratifiedKFold())
print(np.mean(scores['test_score']))
splitter = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
scores = cross_validate(dt, train_input, train_target, cv=splitter)
print(np.mean(scores['test_score']))
from sklearn.model_selection import GridSearchCV
params = {'min_impurity_decrease': [0.0001, 0.0002, 0.0003, 0.0004, 0.0005]}
gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=1)
gs.fit(train_input, train_target)
dt = gs.best_estimator_
print(dt.score(train_input, train_target))
print(gs.best_params_)
print(gs.cv_results_['mean_test_score'])
best_index = np.argmax(gs.cv_results_['mean_test_score'])
print(gs.cv_results_['params'][best_index])
params = {'min_impurity_decrease': np.arange(0.0001, 0.001, 0.0001),
'max_depth': range(5, 20, 1),
'min_samples_split': range(2, 100, 10)
}
gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
from scipy.stats import uniform, randint
rgen = randint(0, 10)
print(rgen.rvs(10))
print(np.unique(rgen.rvs(1000), return_counts=True))
ugen = uniform(0, 1)
print(ugen.rvs(10))
params = {'min_impurity_decrease': uniform(0.0001, 0.001),
'max_depth': randint(20, 50),
'min_samples_split': randint(2, 25),
'min_samples_leaf': randint(1, 25)
}
from sklearn.model_selection import RandomizedSearchCV
gs = RandomizedSearchCV(DecisionTreeClassifier(random_state=42), params, n_iter=100, n_jobs=-1, random_state=42)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
dt = gs.best_estimator_
print(dt.score(test_input, test_target))
# Exam
gs = RandomizedSearchCV(DecisionTreeClassifier(splitter='random', random_state=42), params, n_iter=100, n_jobs=-1, random_state=42)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
dt = gs.best_estimator_
print(dt.score(test_input, test_target))
|
[
"scipy.stats.randint",
"numpy.mean",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_validate",
"sklearn.tree.DecisionTreeClassifier",
"scipy.stats.uniform",
"numpy.argmax",
"numpy.max",
"sklearn.model_selection.StratifiedKFold",
"numpy.arange"
] |
[((28, 67), 'pandas.read_csv', 'pd.read_csv', (['"""https://bit.ly/wine-date"""'], {}), "('https://bit.ly/wine-date')\n", (39, 67), True, 'import pandas as pd\n'), ((322, 384), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, target, test_size=0.2, random_state=42)\n', (338, 384), False, 'from sklearn.model_selection import train_test_split\n'), ((476, 551), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_input', 'train_target'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(train_input, train_target, test_size=0.2, random_state=42)\n', (492, 551), False, 'from sklearn.model_selection import train_test_split\n'), ((647, 686), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (669, 686), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((857, 902), 'sklearn.model_selection.cross_validate', 'cross_validate', (['dt', 'train_input', 'train_target'], {}), '(dt, train_input, train_target)\n', (871, 902), False, 'from sklearn.model_selection import cross_validate\n'), ((1155, 1214), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=10, shuffle=True, random_state=42)\n', (1170, 1214), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1224, 1282), 'sklearn.model_selection.cross_validate', 'cross_validate', (['dt', 'train_input', 'train_target'], {'cv': 'splitter'}), '(dt, train_input, train_target, cv=splitter)\n', (1238, 1282), False, 'from sklearn.model_selection import cross_validate\n'), ((1706, 1750), 'numpy.argmax', 'np.argmax', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (1715, 1750), True, 'import numpy as np\n'), ((2203, 2217), 'scipy.stats.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2210, 2217), False, 'from scipy.stats import uniform, randint\n'), ((2300, 2313), 'scipy.stats.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2307, 2313), False, 'from scipy.stats import uniform, randint\n'), ((944, 973), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (951, 973), True, 'import numpy as np\n'), ((1112, 1141), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (1119, 1141), True, 'import numpy as np\n'), ((1289, 1318), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (1296, 1318), True, 'import numpy as np\n'), ((1466, 1505), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (1488, 1505), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1831, 1863), 'numpy.arange', 'np.arange', (['(0.0001)', '(0.001)', '(0.0001)'], {}), '(0.0001, 0.001, 0.0001)\n', (1840, 1863), True, 'import numpy as np\n'), ((1985, 2024), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2007, 2024), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2110, 2151), 'numpy.max', 'np.max', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (2116, 2151), True, 'import numpy as np\n'), ((2370, 2392), 'scipy.stats.uniform', 'uniform', (['(0.0001)', '(0.001)'], {}), '(0.0001, 0.001)\n', (2377, 2392), False, 'from scipy.stats import uniform, randint\n'), ((2417, 2432), 'scipy.stats.randint', 'randint', (['(20)', '(50)'], {}), '(20, 50)\n', (2424, 2432), False, 'from scipy.stats import uniform, randint\n'), ((2465, 2479), 'scipy.stats.randint', 'randint', (['(2)', '(25)'], {}), '(2, 25)\n', (2472, 2479), False, 'from scipy.stats import uniform, randint\n'), ((2511, 2525), 'scipy.stats.randint', 'randint', (['(1)', '(25)'], {}), '(1, 25)\n', (2518, 2525), False, 'from scipy.stats import uniform, randint\n'), ((2619, 2658), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2641, 2658), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2773, 2814), 'numpy.max', 'np.max', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (2779, 2814), True, 'import numpy as np\n'), ((2915, 2973), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'splitter': '"""random"""', 'random_state': '(42)'}), "(splitter='random', random_state=42)\n", (2937, 2973), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3088, 3129), 'numpy.max', 'np.max', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (3094, 3129), True, 'import numpy as np\n'), ((1087, 1104), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (1102, 1104), False, 'from sklearn.model_selection import StratifiedKFold\n')]
|
# Module to build a potential landscape
import numpy as np
def gauss(x,mean=0.0,stddev=0.02,peak=1.0):
'''
Input:
x : x-coordintes
Output:
f(x) where f is a Gaussian with the given mean, stddev and peak value
'''
stddev = 5*(x[1] - x[0])
return peak*np.exp(-(x-mean)**2/(2*stddev**2))
def init_ndot(x,n_dot):
'''
Input:
x : 1d grid for the dots
ndot : number of dots
Output:
y : cordinates of the potential grid with ndots
The potential barriers are modelled as gaussians
'''
# n dots imply n+1 barriers
bar_centers = x[0] + (x[-1] - x[0])*np.random.rand(n_dot+1)
bar_heights = np.random.rand(n_dot+1)
#bar_heights = 0.5*np.ones(n_dot+1)
N = len(x)
y = np.zeros(N)
# no need to optimize here really since the dot number is generally small, the calculation of the gauss function is already done in a vectorised manner
for j in range(n_dot+1):
y += gauss(x-bar_centers[j],peak=bar_heights[j])
return y
|
[
"numpy.exp",
"numpy.zeros",
"numpy.random.rand"
] |
[((659, 684), 'numpy.random.rand', 'np.random.rand', (['(n_dot + 1)'], {}), '(n_dot + 1)\n', (673, 684), True, 'import numpy as np\n'), ((747, 758), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (755, 758), True, 'import numpy as np\n'), ((283, 327), 'numpy.exp', 'np.exp', (['(-(x - mean) ** 2 / (2 * stddev ** 2))'], {}), '(-(x - mean) ** 2 / (2 * stddev ** 2))\n', (289, 327), True, 'import numpy as np\n'), ((616, 641), 'numpy.random.rand', 'np.random.rand', (['(n_dot + 1)'], {}), '(n_dot + 1)\n', (630, 641), True, 'import numpy as np\n')]
|
from sys import stdin
input = stdin.readline
from collections import deque
N, Q = map(int, input().split())
tree = [[] for _ in range(N + 1)]
level = [0] * (N + 1)
for _ in range(N - 1):
a, b = map(int, input().split())
tree[a].append(b)
tree[b].append(a)
visited = [False] * (N + 1)
def bfs(st):
global level
q = deque()
q.append([st, 0])
visited[st] = True
while q:
for _ in range(len(q)):
now, lvl = q.popleft()
for next in tree[now]:
if not visited[next]:
q.append([next, lvl + 1])
level[next] = lvl + 1
visited[next] = True
bfs(1)
def solve(a, b):
if abs(level[a] - level[b]) % 2 == 1:
return 'Road'
else:
return 'Town'
for _ in range(Q):
x, y = map(int, input().split())
print(solve(x, y))
|
[
"collections.deque"
] |
[((338, 345), 'collections.deque', 'deque', ([], {}), '()\n', (343, 345), False, 'from collections import deque\n')]
|
import json
def get_all_pets():
pets = read_from_file()
pets_in_store = []
for k, v in pets.items():
current_pet = {"id": k, **v}
pets_in_store.append(current_pet)
return pets
def remove_pet(id):
pets = read_from_file()
del pets[id]
write_to_file(pets)
def update_pet(id, pet):
pets = read_from_file()
ids = pets.keys()
pets[id] = {"name": pet.name, "breed": pet.breed, "price": pet.price}
write_to_file(pets)
def add_pet(pet):
pets = read_from_file()
ids = pets.keys()
new_id = int(ids[-1]) + 1
pets[new_id] = {"name": pet.name, "breed": pet.breed, "price": pet.price}
write_to_file(pets)
def get_pet(id):
pets = read_from_file()
pet = pets[id]
pet["id"] = id
return pet
def write_to_file(content):
with open("./pets.json", "w") as pets:
pets.write(json.dumps(content))
def read_from_file():
with open("./pets.json", "r") as pets:
return json.loads(pets.read())
|
[
"json.dumps"
] |
[((871, 890), 'json.dumps', 'json.dumps', (['content'], {}), '(content)\n', (881, 890), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Drafts-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Invenio Drafts Resources module to create REST APIs."""
import marshmallow as ma
from flask import g
from flask_resources import JSONSerializer, ResponseHandler, \
resource_requestctx, response_handler, route, with_content_negotiation
from invenio_records_resources.resources import \
RecordResource as RecordResourceBase
from invenio_records_resources.resources.records.resource import \
request_data, request_headers, request_read_args, request_search_args, \
request_view_args
from invenio_records_resources.resources.records.utils import es_preference
from .errors import RedirectException
class RecordResource(RecordResourceBase):
"""Draft-aware RecordResource."""
def create_blueprint(self, **options):
"""Create the blueprint."""
# We avoid passing url_prefix to the blueprint because we need to
# install URLs under both /records and /user/records. Instead we
# add the prefix manually to each route (which is anyway what Flask
# does in the end)
options["url_prefix"] = ""
return super().create_blueprint(**options)
def create_url_rules(self):
"""Create the URL rules for the record resource."""
routes = self.config.routes
def p(route):
"""Prefix a route with the URL prefix."""
return f"{self.config.url_prefix}{route}"
def s(route):
"""Suffix a route with the URL prefix."""
return f"{route}{self.config.url_prefix}"
rules = [
route("GET", p(routes["list"]), self.search),
route("POST", p(routes["list"]), self.create),
route("GET", p(routes["item"]), self.read),
route("PUT", p(routes["item"]), self.update),
route("DELETE", p(routes["item"]), self.delete),
route("GET", p(routes["item-versions"]), self.search_versions),
route("POST", p(routes["item-versions"]), self.new_version),
route("GET", p(routes["item-latest"]), self.read_latest),
route("GET", p(routes["item-draft"]), self.read_draft),
route("POST", p(routes["item-draft"]), self.edit),
route("PUT", p(routes["item-draft"]), self.update_draft),
route("DELETE", p(routes["item-draft"]), self.delete_draft),
route("POST", p(routes["item-publish"]), self.publish),
route("GET", s(routes["user-prefix"]), self.search_user_records),
]
if self.service.draft_files:
rules.append(route(
"POST",
p(routes["item-files-import"]),
self.import_files,
apply_decorators=False
))
return rules
@request_search_args
@request_view_args
@response_handler(many=True)
def search_user_records(self):
"""Perform a search over the record's versions.
GET /user/records
"""
hits = self.service.search_drafts(
identity=g.identity,
params=resource_requestctx.args,
es_preference=es_preference(),
)
return hits.to_dict(), 200
@request_search_args
@request_view_args
@response_handler(many=True)
def search_versions(self):
"""Perform a search over the record's versions.
GET /records/:pid_value/versions
"""
hits = self.service.search_versions(
resource_requestctx.view_args["pid_value"],
identity=g.identity,
params=resource_requestctx.args,
es_preference=es_preference()
)
return hits.to_dict(), 200
@request_view_args
@response_handler()
def new_version(self):
"""Create a new version.
POST /records/:pid_value/versions
"""
item = self.service.new_version(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 201
@request_view_args
@response_handler()
def edit(self):
"""Edit a record.
POST /records/:pid_value/draft
"""
item = self.service.edit(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 201
@request_view_args
@response_handler()
def publish(self):
"""Publish the draft."""
item = self.service.publish(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 202
@request_view_args
@with_content_negotiation(
response_handlers={
'application/json': ResponseHandler(JSONSerializer())
},
default_accept_mimetype='application/json',
)
@response_handler(many=True)
def import_files(self):
"""Import files from previous record version."""
files = self.service.import_files(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return files.to_dict(), 201
@request_view_args
def read_latest(self):
"""Redirect to latest record.
GET /records/:pid_value/versions/latest
"""
item = self.service.read_latest(
resource_requestctx.view_args["pid_value"],
g.identity,
)
raise RedirectException(item["links"]["self"])
@request_read_args
@request_view_args
@response_handler()
def read_draft(self):
"""Edit a draft.
GET /records/:pid_value/draft
"""
item = self.service.read_draft(
resource_requestctx.view_args["pid_value"],
g.identity,
)
return item.to_dict(), 200
@request_headers
@request_view_args
@request_data
@response_handler()
def update_draft(self):
"""Update a draft.
PUT /records/:pid_value/draft
"""
item = self.service.update_draft(
resource_requestctx.view_args["pid_value"],
g.identity,
resource_requestctx.data or {},
revision_id=resource_requestctx.headers.get("if_match"),
)
return item.to_dict(), 200
@request_headers
@request_view_args
def delete_draft(self):
"""Delete a draft.
DELETE /records/:pid_value/draft
"""
self.service.delete_draft(
resource_requestctx.view_args["pid_value"],
g.identity,
revision_id=resource_requestctx.headers.get("if_match"),
)
return "", 204
|
[
"flask_resources.response_handler",
"flask_resources.resource_requestctx.headers.get",
"invenio_records_resources.resources.records.utils.es_preference",
"flask_resources.JSONSerializer"
] |
[((3034, 3061), 'flask_resources.response_handler', 'response_handler', ([], {'many': '(True)'}), '(many=True)\n', (3050, 3061), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((3455, 3482), 'flask_resources.response_handler', 'response_handler', ([], {'many': '(True)'}), '(many=True)\n', (3471, 3482), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((3919, 3937), 'flask_resources.response_handler', 'response_handler', ([], {}), '()\n', (3935, 3937), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((4248, 4266), 'flask_resources.response_handler', 'response_handler', ([], {}), '()\n', (4264, 4266), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((4553, 4571), 'flask_resources.response_handler', 'response_handler', ([], {}), '()\n', (4569, 4571), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((5013, 5040), 'flask_resources.response_handler', 'response_handler', ([], {'many': '(True)'}), '(many=True)\n', (5029, 5040), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((5683, 5701), 'flask_resources.response_handler', 'response_handler', ([], {}), '()\n', (5699, 5701), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((6037, 6055), 'flask_resources.response_handler', 'response_handler', ([], {}), '()\n', (6053, 6055), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((3339, 3354), 'invenio_records_resources.resources.records.utils.es_preference', 'es_preference', ([], {}), '()\n', (3352, 3354), False, 'from invenio_records_resources.resources.records.utils import es_preference\n'), ((3829, 3844), 'invenio_records_resources.resources.records.utils.es_preference', 'es_preference', ([], {}), '()\n', (3842, 3844), False, 'from invenio_records_resources.resources.records.utils import es_preference\n'), ((6352, 6395), 'flask_resources.resource_requestctx.headers.get', 'resource_requestctx.headers.get', (['"""if_match"""'], {}), "('if_match')\n", (6383, 6395), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((6735, 6778), 'flask_resources.resource_requestctx.headers.get', 'resource_requestctx.headers.get', (['"""if_match"""'], {}), "('if_match')\n", (6766, 6778), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n'), ((4921, 4937), 'flask_resources.JSONSerializer', 'JSONSerializer', ([], {}), '()\n', (4935, 4937), False, 'from flask_resources import JSONSerializer, ResponseHandler, resource_requestctx, response_handler, route, with_content_negotiation\n')]
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script identifies and enumerates the possible protonation sites of SMILES
strings.
"""
from __future__ import print_function
import copy
import os
import argparse
import sys
try:
# Python2
from StringIO import StringIO
except ImportError:
# Python3
from io import StringIO
def print_header():
"""Prints out header information."""
# Always let the user know a help file is available.
print("\nFor help, use: python dimorphite_dl.py --help")
# And always report citation information.
print("\nIf you use Dimorphite-DL in your research, please cite:")
print("<NAME>, Kaminsky JC, <NAME>, Durrant JD (2019) Dimorphite-DL: An")
print(
"open-source program for enumerating the ionization states of drug-like small"
)
print("molecules. J Cheminform 11:14. doi:10.1186/s13321-019-0336-9.\n")
try:
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
# Disable the unnecessary RDKit warnings
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
except:
msg = "Dimorphite-DL requires RDKit. See https://www.rdkit.org/"
print(msg)
raise Exception(msg)
def main(params=None):
"""The main definition run when you call the script from the commandline.
:param params: The parameters to use. Entirely optional. If absent,
defaults to None, in which case argments will be taken from
those given at the command line.
:param params: dict, optional
:return: Returns a list of the SMILES strings return_as_list parameter is
True. Otherwise, returns None.
"""
parser = ArgParseFuncs.get_args()
args = vars(parser.parse_args())
if not args["silent"]:
print_header()
# Add in any parameters in params.
if params is not None:
for k, v in params.items():
args[k] = v
# If being run from the command line, print out all parameters.
if __name__ == "__main__":
if not args["silent"]:
print("\nPARAMETERS:\n")
for k in sorted(args.keys()):
print(k.rjust(13) + ": " + str(args[k]))
print("")
if args["test"]:
# Run tests.
TestFuncs.test()
else:
# Run protonation
if "output_file" in args and args["output_file"] is not None:
# An output file was specified, so write to that.
with open(args["output_file"], "w") as file:
for protonated_smi in Protonate(args):
file.write(protonated_smi + "\n")
elif "return_as_list" in args and args["return_as_list"] == True:
return list(Protonate(args))
else:
# No output file specified. Just print it to the screen.
for protonated_smi in Protonate(args):
print(protonated_smi)
class MyParser(argparse.ArgumentParser):
"""Overwrite default parse so it displays help file on error. See
https://stackoverflow.com/questions/4042452/display-help-message-with-python-argparse-when-script-is-called-without-any-argu"""
def error(self, message):
"""Overwrites the default error message.
:param message: The default error message.
"""
self.print_help()
msg = "ERROR: %s\n\n" % message
print(msg)
raise Exception(msg)
def print_help(self, file=None):
"""Overwrite the default print_help function
:param file: Output file, defaults to None
"""
print("")
if file is None:
file = sys.stdout
self._print_message(self.format_help(), file)
print(
"""
examples:
python dimorphite_dl.py --smiles_file sample_molecules.smi
python dimorphite_dl.py --smiles "CCC(=O)O" --min_ph -3.0 --max_ph -2.0
python dimorphite_dl.py --smiles "CCCN" --min_ph -3.0 --max_ph -2.0 --output_file output.smi
python dimorphite_dl.py --smiles_file sample_molecules.smi --pka_precision 2.0 --label_states
python dimorphite_dl.py --test"""
)
print("")
class ArgParseFuncs:
"""A namespace for storing functions that are useful for processing
command-line arguments. To keep things organized."""
@staticmethod
def get_args():
"""Gets the arguments from the command line.
:return: A parser object.
"""
parser = MyParser(
description="Dimorphite 1.2.4: Creates models of "
+ "appropriately protonated small moleucles. "
+ "Apache 2.0 License. Copyright 2020 <NAME>. "
+ "Durrant."
)
parser.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="minimum pH to consider (default: 6.4)",
)
parser.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="maximum pH to consider (default: 8.4)",
)
parser.add_argument(
"--pka_precision",
metavar="PRE",
type=float,
default=1.0,
help="pKa precision factor (number of standard devations, default: 1.0)",
)
parser.add_argument(
"--smiles", metavar="SMI", type=str, help="SMILES string to protonate"
)
parser.add_argument(
"--smiles_file",
metavar="FILE",
type=str,
help="file that contains SMILES strings to protonate",
)
parser.add_argument(
"--output_file",
metavar="FILE",
type=str,
help="output file to write protonated SMILES (optional)",
)
parser.add_argument(
"--max_variants",
metavar="MXV",
type=int,
default=128,
help="limit number of variants per input compound (default: 128)",
)
parser.add_argument(
"--label_states",
action="store_true",
help="label protonated SMILES with target state "
+ '(i.e., "DEPROTONATED", "PROTONATED", or "BOTH").',
)
parser.add_argument(
"--silent",
action="store_true",
help="do not print any messages to the screen",
)
parser.add_argument(
"--test", action="store_true", help="run unit tests (for debugging)"
)
return parser
@staticmethod
def clean_args(args):
"""Cleans and normalizes input parameters
:param args: A dictionary containing the arguments.
:type args: dict
:raises Exception: No SMILES in params.
"""
defaults = {
"min_ph": 6.4,
"max_ph": 8.4,
"pka_precision": 1.0,
"label_states": False,
"test": False,
"max_variants": 128,
}
for key in defaults:
if key not in args:
args[key] = defaults[key]
keys = list(args.keys())
for key in keys:
if args[key] is None:
del args[key]
if not "smiles" in args and not "smiles_file" in args:
msg = "Error: No SMILES in params. Use the -h parameter for help."
print(msg)
raise Exception(msg)
# If the user provides a smiles string, turn it into a file-like StringIO
# object.
if "smiles" in args:
if isinstance(args["smiles"], str):
args["smiles_file"] = StringIO(args["smiles"])
args["smiles_and_data"] = LoadSMIFile(args["smiles_file"], args)
return args
class UtilFuncs:
"""A namespace to store functions for manipulating mol objects. To keep
things organized."""
@staticmethod
def neutralize_mol(mol):
"""All molecules should be neuralized to the extent possible. The user
should not be allowed to specify the valence of the atoms in most cases.
:param rdkit.Chem.rdchem.Mol mol: The rdkit Mol objet to be neutralized.
:return: The neutralized Mol object.
"""
# Get the reaction data
rxn_data = [
[
"[Ov1-1:1]",
"[Ov2+0:1]-[H]",
], # To handle O- bonded to only one atom (add hydrogen).
[
"[#7v4+1:1]-[H]",
"[#7v3+0:1]",
], # To handle N+ bonded to a hydrogen (remove hydrogen).
[
"[Ov2-:1]",
"[Ov2+0:1]",
], # To handle O- bonded to two atoms. Should not be Negative.
[
"[#7v3+1:1]",
"[#7v3+0:1]",
], # To handle N+ bonded to three atoms. Should not be positive.
[
"[#7v2-1:1]",
"[#7+0:1]-[H]",
], # To handle N- Bonded to two atoms. Add hydrogen.
# ['[N:1]=[N+0:2]=[N:3]-[H]', '[N:1]=[N+1:2]=[N+0:3]-[H]'], # To handle bad azide. Must be
# protonated. (Now handled
# elsewhere, before SMILES
# converted to Mol object.)
[
"[H]-[N:1]-[N:2]#[N:3]",
"[N:1]=[N+1:2]=[N:3]-[H]",
] # To handle bad azide. R-N-N#N should
# be R-N=[N+]=N
]
# Add substructures and reactions (initially none)
for i, rxn_datum in enumerate(rxn_data):
rxn_data[i].append(Chem.MolFromSmarts(rxn_datum[0]))
rxn_data[i].append(None)
# Add hydrogens (respects valence, so incomplete).
mol.UpdatePropertyCache(strict=False)
mol = Chem.AddHs(mol)
while True: # Keep going until all these issues have been resolved.
current_rxn = None # The reaction to perform.
current_rxn_str = None
for i, rxn_datum in enumerate(rxn_data):
(
reactant_smarts,
product_smarts,
substruct_match_mol,
rxn_placeholder,
) = rxn_datum
if mol.HasSubstructMatch(substruct_match_mol):
if rxn_placeholder is None:
current_rxn_str = reactant_smarts + ">>" + product_smarts
current_rxn = AllChem.ReactionFromSmarts(current_rxn_str)
rxn_data[i][3] = current_rxn # Update the placeholder.
else:
current_rxn = rxn_data[i][3]
break
# Perform the reaction if necessary
if current_rxn is None: # No reaction left, so break out of while loop.
break
else:
mol = current_rxn.RunReactants((mol,))[0][0]
mol.UpdatePropertyCache(strict=False) # Update valences
# The mols have been altered from the reactions described above, we
# need to resanitize them. Make sure aromatic rings are shown as such
# This catches all RDKit Errors. without the catchError and
# sanitizeOps the Chem.SanitizeMol can crash the program.
sanitize_string = Chem.SanitizeMol(
mol,
sanitizeOps=rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL,
catchErrors=True,
)
return mol if sanitize_string.name == "SANITIZE_NONE" else None
@staticmethod
def convert_smiles_str_to_mol(smiles_str):
"""Given a SMILES string, check that it is actually a string and not a
None. Then try to convert it to an RDKit Mol Object.
:param string smiles_str: The SMILES string.
:return: A rdkit.Chem.rdchem.Mol object, or None if it is the wrong type or
if it fails to convert to a Mol Obj
"""
# Check that there are no type errors, ie Nones or non-string A
# non-string type will cause RDKit to hard crash
if smiles_str is None or type(smiles_str) is not str:
return None
# Try to fix azides here. They are just tricky to deal with.
smiles_str = smiles_str.replace("N=N=N", "N=[N+]=N")
smiles_str = smiles_str.replace("NN#N", "N=[N+]=N")
# Now convert to a mol object. Note the trick that is necessary to
# capture RDKit error/warning messages. See
# https://stackoverflow.com/questions/24277488/in-python-how-to-capture-the-stdout-from-a-c-shared-library-to-a-variable
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_pipe = os.pipe()
os.dup2(stderr_pipe[1], stderr_fileno)
os.close(stderr_pipe[1])
mol = Chem.MolFromSmiles(smiles_str)
os.close(stderr_fileno)
os.close(stderr_pipe[0])
os.dup2(stderr_save, stderr_fileno)
os.close(stderr_save)
# Check that there are None type errors Chem.MolFromSmiles has
# sanitize on which means if there is even a small error in the SMILES
# (kekulize, nitrogen charge...) then mol=None. ie.
# Chem.MolFromSmiles("C[N]=[N]=[N]") = None this is an example of an
# nitrogen charge error. It is cased in a try statement to be overly
# cautious.
return None if mol is None else mol
@staticmethod
def eprint(*args, **kwargs):
"""Error messages should be printed to STDERR. See
https://stackoverflow.com/questions/5574702/how-to-print-to-stderr-in-python"""
print(*args, file=sys.stderr, **kwargs)
class LoadSMIFile(object):
"""A generator class for loading in the SMILES strings from a file, one at
a time."""
def __init__(self, filename, args):
"""Initializes this class.
:param filename: The filename or file object (i.e., StringIO).
:type filename: str or StringIO
"""
self.args = args
if type(filename) is str:
# It's a filename
self.f = open(filename, "r")
else:
# It's a file object (i.e., StringIO)
self.f = filename
def __iter__(self):
"""Returns this generator object.
:return: This generator object.
:rtype: LoadSMIFile
"""
return self
def __next__(self):
"""Ensure Python3 compatibility.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
return self.next()
def next(self):
"""Get the data associated with the next line.
:raises StopIteration: If there are no more lines left iin the file.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
line = self.f.readline()
if line == "":
# EOF
self.f.close()
raise StopIteration()
return
# Divide line into smi and data
splits = line.split()
if len(splits) != 0:
# Generate mol object
smiles_str = splits[0]
# Convert from SMILES string to RDKIT Mol. This series of tests is
# to make sure the SMILES string is properly formed and to get it
# into a canonical form. Filter if failed.
mol = UtilFuncs.convert_smiles_str_to_mol(smiles_str)
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Handle nuetralizing the molecules. Filter if failed.
mol = UtilFuncs.neutralize_mol(mol)
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Remove the hydrogens.
try:
mol = Chem.RemoveHs(mol)
except:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Regenerate the smiles string (to standardize).
new_mol_string = Chem.MolToSmiles(mol, isomericSmiles=True)
return {"smiles": new_mol_string, "data": splits[1:]}
else:
# Blank line? Go to next one.
return self.next()
class Protonate(object):
"""A generator class for protonating SMILES strings, one at a time."""
def __init__(self, args):
"""Initialize the generator.
:param args: A dictionary containing the arguments.
:type args: dict
"""
# Make the args an object variable variable.
self.args = args
# A list to store the protonated SMILES strings associated with a
# single input model.
self.cur_prot_SMI = []
# Clean and normalize the args
self.args = ArgParseFuncs.clean_args(args)
# Make sure functions in ProtSubstructFuncs have access to the args.
ProtSubstructFuncs.args = args
# Load the substructures that can be protonated.
self.subs = ProtSubstructFuncs.load_protonation_substructs_calc_state_for_ph(
self.args["min_ph"], self.args["max_ph"], self.args["pka_precision"]
)
def __iter__(self):
"""Returns this generator object.
:return: This generator object.
:rtype: Protonate
"""
return self
def __next__(self):
"""Ensure Python3 compatibility.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
return self.next()
def next(self):
"""Return the next protonated SMILES string.
:raises StopIteration: If there are no more lines left iin the file.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
# If there are any SMILES strings in self.cur_prot_SMI, just return
# the first one and update the list to include only the remaining.
if len(self.cur_prot_SMI) > 0:
first, self.cur_prot_SMI = self.cur_prot_SMI[0], self.cur_prot_SMI[1:]
return first
# self.cur_prot_SMI is empty, so try to add more to it.
# Get the next SMILES string from the input file.
try:
smile_and_datum = self.args["smiles_and_data"].next()
except StopIteration:
# There are no more input smiles strings...
raise StopIteration()
# Keep track of the original smiles string for reporting, starting the
# protonation process, etc.
orig_smi = smile_and_datum["smiles"]
# Dimorphite-DL may protonate some sites in ways that produce invalid
# SMILES. We need to keep track of all smiles so we can "rewind" to
# the last valid one, should things go south.
properly_formed_smi_found = [orig_smi]
# Everything on SMILES line but the SMILES string itself (e.g., the
# molecule name).
data = smile_and_datum["data"]
# Collect the data associated with this smiles (e.g., the molecule
# name).
tag = " ".join(data)
# sites is a list of (atom index, "PROTONATED|DEPROTONATED|BOTH",
# reaction name, mol). Note that the second entry indicates what state
# the site SHOULD be in (not the one it IS in per the SMILES string).
# It's calculated based on the probablistic distributions obtained
# during training.
(
sites,
mol_used_to_idx_sites,
) = ProtSubstructFuncs.get_prot_sites_and_target_states(orig_smi, self.subs)
new_mols = [mol_used_to_idx_sites]
if len(sites) > 0:
for site in sites:
# Make a new smiles with the correct protonation state. Note that
# new_smis is a growing list. This is how multiple protonation
# sites are handled.
new_mols = ProtSubstructFuncs.protonate_site(new_mols, site)
if len(new_mols) > self.args["max_variants"]:
new_mols = new_mols[: self.args["max_variants"]]
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Limited number of variants to "
+ str(self.args["max_variants"])
+ ": "
+ orig_smi
)
# Go through each of these new molecules and add them to the
# properly_formed_smi_found, in case you generate a poorly
# formed SMILES in the future and have to "rewind."
properly_formed_smi_found += [Chem.MolToSmiles(m) for m in new_mols]
else:
# Deprotonate the mols (because protonate_site never called to do
# it).
mol_used_to_idx_sites = Chem.RemoveHs(mol_used_to_idx_sites)
new_mols = [mol_used_to_idx_sites]
# Go through each of these new molecules and add them to the
# properly_formed_smi_found, in case you generate a poorly formed
# SMILES in the future and have to "rewind."
properly_formed_smi_found.append(Chem.MolToSmiles(mol_used_to_idx_sites))
# In some cases, the script might generate redundant molecules.
# Phosphonates, when the pH is between the two pKa values and the
# stdev value is big enough, for example, will generate two identical
# BOTH states. Let's remove this redundancy.
new_smis = list(
set(
[
Chem.MolToSmiles(m, isomericSmiles=True, canonical=True)
for m in new_mols
]
)
)
# Sometimes Dimorphite-DL generates molecules that aren't actually
# possible. Simply convert these to mol objects to eliminate the bad
# ones (that are None).
new_smis = [
s for s in new_smis if UtilFuncs.convert_smiles_str_to_mol(s) is not None
]
# If there are no smi left, return the input one at the very least.
# All generated forms have apparently been judged
# inappropriate/malformed.
if len(new_smis) == 0:
properly_formed_smi_found.reverse()
for smi in properly_formed_smi_found:
if UtilFuncs.convert_smiles_str_to_mol(smi) is not None:
new_smis = [smi]
break
# If the user wants to see the target states, add those to the ends of
# each line.
if self.args["label_states"]:
states = "\t".join([x[1] for x in sites])
new_lines = [x + "\t" + tag + "\t" + states for x in new_smis]
else:
new_lines = [x + "\t" + tag for x in new_smis]
self.cur_prot_SMI = new_lines
return self.next()
class ProtSubstructFuncs:
"""A namespace to store functions for loading the substructures that can
be protonated. To keep things organized."""
args = {}
@staticmethod
def load_substructre_smarts_file():
"""Loads the substructure smarts file. Similar to just using readlines,
except it filters out comments (lines that start with "#").
:return: A list of the lines in the site_substructures.smarts file,
except blank lines and lines that start with "#"
"""
pwd = os.path.dirname(os.path.realpath(__file__))
site_structures_file = "{}/{}".format(pwd, "site_substructures.smarts")
lines = [
l
for l in open(site_structures_file, "r")
if l.strip() != "" and not l.startswith("#")
]
return lines
@staticmethod
def load_protonation_substructs_calc_state_for_ph(
min_ph=6.4, max_ph=8.4, pka_std_range=1
):
"""A pre-calculated list of R-groups with protonation sites, with their
likely pKa bins.
:param float min_ph: The lower bound on the pH range, defaults to 6.4.
:param float max_ph: The upper bound on the pH range, defaults to 8.4.
:param pka_std_range: Basically the precision (stdev from predicted pKa to
consider), defaults to 1.
:return: A dict of the protonation substructions for the specified pH
range.
"""
subs = []
for line in ProtSubstructFuncs.load_substructre_smarts_file():
line = line.strip()
sub = {}
if line is not "":
splits = line.split()
sub["name"] = splits[0]
sub["smart"] = splits[1]
sub["mol"] = Chem.MolFromSmarts(sub["smart"])
pka_ranges = [splits[i : i + 3] for i in range(2, len(splits) - 1, 3)]
prot = []
for pka_range in pka_ranges:
site = pka_range[0]
std = float(pka_range[2]) * pka_std_range
mean = float(pka_range[1])
protonation_state = ProtSubstructFuncs.define_protonation_state(
mean, std, min_ph, max_ph
)
prot.append([site, protonation_state])
sub["prot_states_for_pH"] = prot
subs.append(sub)
return subs
@staticmethod
def define_protonation_state(mean, std, min_ph, max_ph):
"""Updates the substructure definitions to include the protonation state
based on the user-given pH range. The size of the pKa range is also based
on the number of standard deviations to be considered by the user param.
:param float mean: The mean pKa.
:param float std: The precision (stdev).
:param float min_ph: The min pH of the range.
:param float max_ph: The max pH of the range.
:return: A string describing the protonation state.
"""
min_pka = mean - std
max_pka = mean + std
# This needs to be reassigned, and 'ERROR' should never make it past
# the next set of checks.
if min_pka <= max_ph and min_ph <= max_pka:
protonation_state = "BOTH"
elif mean > max_ph:
protonation_state = "PROTONATED"
else:
protonation_state = "DEPROTONATED"
return protonation_state
@staticmethod
def get_prot_sites_and_target_states(smi, subs):
"""For a single molecule, find all possible matches in the protonation
R-group list, subs. Items that are higher on the list will be matched
first, to the exclusion of later items.
:param string smi: A SMILES string.
:param list subs: Substructure information.
:return: A list of protonation sites (atom index), pKa bin.
('PROTONATED', 'BOTH', or 'DEPROTONATED'), and reaction name.
Also, the mol object that was used to generate the atom index.
"""
# Convert the Smiles string (smi) to an RDKit Mol Obj
mol_used_to_idx_sites = UtilFuncs.convert_smiles_str_to_mol(smi)
# Check Conversion worked
if mol_used_to_idx_sites is None:
UtilFuncs.eprint("ERROR: ", smi)
return []
# Try to Add hydrogens. if failed return []
try:
mol_used_to_idx_sites = Chem.AddHs(mol_used_to_idx_sites)
except:
UtilFuncs.eprint("ERROR: ", smi)
return []
# Check adding Hs worked
if mol_used_to_idx_sites is None:
UtilFuncs.eprint("ERROR: ", smi)
return []
ProtectUnprotectFuncs.unprotect_molecule(mol_used_to_idx_sites)
protonation_sites = []
for item in subs:
smart = item["mol"]
if mol_used_to_idx_sites.HasSubstructMatch(smart):
matches = ProtectUnprotectFuncs.get_unprotected_matches(
mol_used_to_idx_sites, smart
)
prot = item["prot_states_for_pH"]
for match in matches:
# We want to move the site from being relative to the
# substructure, to the index on the main molecule.
for site in prot:
proton = int(site[0])
category = site[1]
new_site = (match[proton], category, item["name"])
if not new_site in protonation_sites:
# Because sites must be unique.
protonation_sites.append(new_site)
ProtectUnprotectFuncs.protect_molecule(mol_used_to_idx_sites, match)
return protonation_sites, mol_used_to_idx_sites
@staticmethod
def protonate_site(mols, site):
"""Given a list of molecule objects, we protonate the site.
:param list mols: The list of molecule objects.
:param tuple site: Information about the protonation site.
(idx, target_prot_state, prot_site_name)
:return: A list of the appropriately protonated molecule objects.
"""
# Decouple the atom index and its target protonation state from the
# site tuple
idx, target_prot_state, prot_site_name = site
state_to_charge = {"DEPROTONATED": [-1], "PROTONATED": [0], "BOTH": [-1, 0]}
charges = state_to_charge[target_prot_state]
# Now make the actual smiles match the target protonation state.
output_mols = ProtSubstructFuncs.set_protonation_charge(
mols, idx, charges, prot_site_name
)
return output_mols
@staticmethod
def set_protonation_charge(mols, idx, charges, prot_site_name):
"""Sets the atomic charge on a particular site for a set of SMILES.
:param list mols: A list of the input molecule
objects.
:param int idx: The index of the atom to consider.
:param list charges: A list of the charges (ints) to
assign at this site.
:param string prot_site_name: The name of the protonation site.
:return: A list of the processed (protonated/deprotonated) molecule
objects.
"""
# Sets up the output list and the Nitrogen charge
output = []
for charge in charges:
# The charge for Nitrogens is 1 higher than others (i.e.,
# protonated state is positively charged).
nitrogen_charge = charge + 1
# But there are a few nitrogen moieties where the acidic group is
# the neutral one. Amides are a good example. I gave some thought
# re. how to best flag these. I decided that those
# nitrogen-containing moieties where the acidic group is neutral
# (rather than positively charged) will have "*" in the name.
if "*" in prot_site_name:
nitrogen_charge = nitrogen_charge - 1 # Undo what was done previously.
for mol in mols:
# Make a copy of the molecule.
mol_copy = copy.deepcopy(mol)
# Remove hydrogen atoms.
try:
mol_copy = Chem.RemoveHs(mol_copy)
except:
if "silent" in ProtSubstructFuncs.args and not ProtSubstructFuncs.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: "
+ Chem.MolToSmiles(mol_copy)
)
continue
atom = mol_copy.GetAtomWithIdx(idx)
explicit_bond_order_total = sum(
[b.GetBondTypeAsDouble() for b in atom.GetBonds()]
)
# Assign the protonation charge, with special care for
# nitrogens
element = atom.GetAtomicNum()
if element == 7:
atom.SetFormalCharge(nitrogen_charge)
# Need to figure out how many hydrogens to add.
if nitrogen_charge == 1 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(3)
elif nitrogen_charge == 1 and explicit_bond_order_total == 2:
atom.SetNumExplicitHs(2)
elif nitrogen_charge == 1 and explicit_bond_order_total == 3:
atom.SetNumExplicitHs(1)
elif nitrogen_charge == 0 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(2)
elif nitrogen_charge == 0 and explicit_bond_order_total == 2:
atom.SetNumExplicitHs(1)
elif nitrogen_charge == -1 and explicit_bond_order_total == 2:
atom.SetNumExplicitHs(0)
elif nitrogen_charge == -1 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(1)
#### JDD
else:
atom.SetFormalCharge(charge)
if element == 8 or element == 16: # O and S
if charge == 0 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(1)
elif charge == -1 and explicit_bond_order_total == 1:
atom.SetNumExplicitHs(0)
# Deprotonating protonated aromatic nitrogen gives [nH-]. Change this
# to [n-].
if "[nH-]" in Chem.MolToSmiles(mol_copy):
atom.SetNumExplicitHs(0)
mol_copy.UpdatePropertyCache(strict=False)
# prod.UpdatePropertyCache(strict=False)
output.append(mol_copy)
return output
class ProtectUnprotectFuncs:
"""A namespace for storing functions that are useful for protecting and
unprotecting molecules. To keep things organized. We need to identify and
mark groups that have been matched with a substructure."""
@staticmethod
def unprotect_molecule(mol):
"""Sets the protected property on all atoms to 0. This also creates the
property for new molecules.
:param rdkit.Chem.rdchem.Mol mol: The rdkit Mol object.
:type mol: The rdkit Mol object with atoms unprotected.
"""
for atom in mol.GetAtoms():
atom.SetProp("_protected", "0")
@staticmethod
def protect_molecule(mol, match):
"""Given a 'match', a list of molecules idx's, we set the protected status
of each atom to 1. This will prevent any matches using that atom in the
future.
:param rdkit.Chem.rdchem.Mol mol: The rdkit Mol object to protect.
:param list match: A list of molecule idx's.
"""
for idx in match:
atom = mol.GetAtomWithIdx(idx)
atom.SetProp("_protected", "1")
@staticmethod
def get_unprotected_matches(mol, substruct):
"""Finds substructure matches with atoms that have not been protected.
Returns list of matches, each match a list of atom idxs.
:param rdkit.Chem.rdchem.Mol mol: The Mol object to consider.
:param string substruct: The SMARTS string of the substructure ot match.
:return: A list of the matches. Each match is itself a list of atom idxs.
"""
matches = mol.GetSubstructMatches(substruct)
unprotected_matches = []
for match in matches:
if ProtectUnprotectFuncs.is_match_unprotected(mol, match):
unprotected_matches.append(match)
return unprotected_matches
@staticmethod
def is_match_unprotected(mol, match):
"""Checks a molecule to see if the substructure match contains any
protected atoms.
:param rdkit.Chem.rdchem.Mol mol: The Mol object to check.
:param list match: The match to check.
:return: A boolean, whether the match is present or not.
"""
for idx in match:
atom = mol.GetAtomWithIdx(idx)
protected = atom.GetProp("_protected")
if protected == "1":
return False
return True
class TestFuncs:
"""A namespace for storing functions that perform tests on the code. To
keep things organized."""
@staticmethod
def test():
"""Tests all the 38 groups."""
# fmt: off
smis = [
# input smiles, protonated, deprotonated, category
["C#CCO", "C#CCO", "C#CC[O-]", "Alcohol"],
["C(=O)N", "NC=O", "[NH-]C=O", "Amide"],
["CC(=O)NOC(C)=O", "CC(=O)NOC(C)=O", "CC(=O)[N-]OC(C)=O", "Amide_electronegative"],
["COC(=N)N", "COC(N)=[NH2+]", "COC(=N)N", "AmidineGuanidine2"],
["Brc1ccc(C2NCCS2)cc1", "Brc1ccc(C2[NH2+]CCS2)cc1", "Brc1ccc(C2NCCS2)cc1", "Amines_primary_secondary_tertiary"],
["CC(=O)[n+]1ccc(N)cc1", "CC(=O)[n+]1ccc([NH3+])cc1", "CC(=O)[n+]1ccc(N)cc1", "Anilines_primary"],
["CCNc1ccccc1", "CC[NH2+]c1ccccc1", "CCNc1ccccc1", "Anilines_secondary"],
["Cc1ccccc1N(C)C", "Cc1ccccc1[NH+](C)C", "Cc1ccccc1N(C)C", "Anilines_tertiary"],
["BrC1=CC2=C(C=C1)NC=C2", "Brc1ccc2[nH]ccc2c1", "Brc1ccc2[n-]ccc2c1", "Indole_pyrrole"],
["O=c1cc[nH]cc1", "O=c1cc[nH]cc1", "O=c1cc[n-]cc1", "Aromatic_nitrogen_protonated"],
["C-N=[N+]=[N@H]", "CN=[N+]=N", "CN=[N+]=[N-]", "Azide"],
["BrC(C(O)=O)CBr", "O=C(O)C(Br)CBr", "O=C([O-])C(Br)CBr", "Carboxyl"],
["NC(NN=O)=N", "NC(=[NH2+])NN=O", "N=C(N)NN=O", "AmidineGuanidine1"],
["C(F)(F)(F)C(=O)NC(=O)C", "CC(=O)NC(=O)C(F)(F)F", "CC(=O)[N-]C(=O)C(F)(F)F", "Imide"],
["O=C(C)NC(C)=O", "CC(=O)NC(C)=O", "CC(=O)[N-]C(C)=O", "Imide2"],
["CC(C)(C)C(N(C)O)=O", "CN(O)C(=O)C(C)(C)C", "CN([O-])C(=O)C(C)(C)C", "N-hydroxyamide"],
["C[N+](O)=O", "C[N+](=O)O", "C[N+](=O)[O-]", "Nitro"],
["O=C1C=C(O)CC1", "O=C1C=C(O)CC1", "O=C1C=C([O-])CC1", "O=C-C=C-OH"],
["C1CC1OO", "OOC1CC1", "[O-]OC1CC1", "Peroxide2"],
["C(=O)OO", "O=COO", "O=CO[O-]", "Peroxide1"],
["Brc1cc(O)cc(Br)c1", "Oc1cc(Br)cc(Br)c1", "[O-]c1cc(Br)cc(Br)c1", "Phenol"],
["CC(=O)c1ccc(S)cc1", "CC(=O)c1ccc(S)cc1", "CC(=O)c1ccc([S-])cc1", "Phenyl_Thiol"],
["C=CCOc1ccc(C(=O)O)cc1", "C=CCOc1ccc(C(=O)O)cc1", "C=CCOc1ccc(C(=O)[O-])cc1", "Phenyl_carboxyl"],
["COP(=O)(O)OC", "COP(=O)(O)OC", "COP(=O)([O-])OC", "Phosphate_diester"],
["CP(C)(=O)O", "CP(C)(=O)O", "CP(C)(=O)[O-]", "Phosphinic_acid"],
["CC(C)OP(C)(=O)O", "CC(C)OP(C)(=O)O", "CC(C)OP(C)(=O)[O-]", "Phosphonate_ester"],
["CC1(C)OC(=O)NC1=O", "CC1(C)OC(=O)NC1=O", "CC1(C)OC(=O)[N-]C1=O", "Ringed_imide1"],
["O=C(N1)C=CC1=O", "O=C1C=CC(=O)N1", "O=C1C=CC(=O)[N-]1", "Ringed_imide2"],
["O=S(OC)(O)=O", "COS(=O)(=O)O", "COS(=O)(=O)[O-]", "Sulfate"],
["COc1ccc(S(=O)O)cc1", "COc1ccc(S(=O)O)cc1", "COc1ccc(S(=O)[O-])cc1", "Sulfinic_acid"],
["CS(N)(=O)=O", "CS(N)(=O)=O", "CS([NH-])(=O)=O", "Sulfonamide"],
["CC(=O)CSCCS(O)(=O)=O", "CC(=O)CSCCS(=O)(=O)O", "CC(=O)CSCCS(=O)(=O)[O-]", "Sulfonate"],
["CC(=O)S", "CC(=O)S", "CC(=O)[S-]", "Thioic_acid"],
["C(C)(C)(C)(S)", "CC(C)(C)S", "CC(C)(C)[S-]", "Thiol"],
["Brc1cc[nH+]cc1", "Brc1cc[nH+]cc1", "Brc1ccncc1", "Aromatic_nitrogen_unprotonated"],
["C=C(O)c1c(C)cc(C)cc1C", "C=C(O)c1c(C)cc(C)cc1C", "C=C([O-])c1c(C)cc(C)cc1C", "Vinyl_alcohol"],
["CC(=O)ON", "CC(=O)O[NH3+]", "CC(=O)ON", "Primary_hydroxyl_amine"],
# Note testing Internal_phosphate_polyphos_chain and
# Initial_phosphate_like_in_ATP_ADP here because no way to
# generate monoprotic compounds to test them. See Other tests
# people...
]
smis_phos = [
# [input smiles, protonated, deprotonated1, deprotonated2, category]
["O=P(O)(O)OCCCC", "CCCCOP(=O)(O)O", "CCCCOP(=O)([O-])O", "CCCCOP(=O)([O-])[O-]", "Phosphate"],
["CC(P(O)(O)=O)C", "CC(C)P(=O)(O)O", "CC(C)P(=O)([O-])O", "CC(C)P(=O)([O-])[O-]", "Phosphonate"],
]
# fmt: on
cats_with_two_prot_sites = [inf[4] for inf in smis_phos]
# Load the average pKa values.
average_pkas = {
l.split()[0].replace("*", ""): float(l.split()[3])
for l in ProtSubstructFuncs.load_substructre_smarts_file()
if l.split()[0] not in cats_with_two_prot_sites
}
average_pkas_phos = {
l.split()[0].replace("*", ""): [float(l.split()[3]), float(l.split()[6])]
for l in ProtSubstructFuncs.load_substructre_smarts_file()
if l.split()[0] in cats_with_two_prot_sites
}
print("Running Tests")
print("=============")
print("")
print("Very Acidic (pH -10000000)")
print("--------------------------")
print("")
args = {
"min_ph": -10000000,
"max_ph": -10000000,
"pka_precision": 0.5,
"smiles": "",
"label_states": True,
"silent": True
}
for smi, protonated, deprotonated, category in smis:
args["smiles"] = smi
TestFuncs.test_check(args, [protonated], ["PROTONATED"])
# Test phosphates separately
for smi, protonated, mix, deprotonated, category in smis_phos:
args["smiles"] = smi
TestFuncs.test_check(args, [protonated], ["PROTONATED"])
args["min_ph"] = 10000000
args["max_ph"] = 10000000
print("")
print("Very Basic (pH 10000000)")
print("------------------------")
print("")
for smi, protonated, deprotonated, category in smis:
args["smiles"] = smi
TestFuncs.test_check(args, [deprotonated], ["DEPROTONATED"])
for smi, protonated, mix, deprotonated, category in smis_phos:
args["smiles"] = smi
TestFuncs.test_check(args, [deprotonated], ["DEPROTONATED"])
print("")
print("pH is Category pKa")
print("------------------")
print("")
for smi, protonated, deprotonated, category in smis:
avg_pka = average_pkas[category]
args["smiles"] = smi
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
TestFuncs.test_check(args, [protonated, deprotonated], ["BOTH"])
for smi, protonated, mix, deprotonated, category in smis_phos:
args["smiles"] = smi
avg_pka = average_pkas_phos[category][0]
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
TestFuncs.test_check(args, [mix, protonated], ["BOTH"])
avg_pka = average_pkas_phos[category][1]
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
TestFuncs.test_check(
args, [mix, deprotonated], ["DEPROTONATED", "DEPROTONATED"]
)
avg_pka = 0.5 * (
average_pkas_phos[category][0] + average_pkas_phos[category][1]
)
args["min_ph"] = avg_pka
args["max_ph"] = avg_pka
args["pka_precision"] = 5 # Should give all three
TestFuncs.test_check(
args, [mix, deprotonated, protonated], ["BOTH", "BOTH"]
)
print("")
print("Other Tests")
print("-----------")
print("")
# Make sure no carbanion (old bug).
smi = "Cc1nc2cc(-c3[nH]c4cc5ccccc5c5c4c3CCN(C(=O)O)[C@@H]5O)cc3c(=O)[nH][nH]c(n1)c23"
output = list(Protonate({"smiles": smi, "test": False, "silent": True}))
if "[C-]" in "".join(output).upper():
msg = "Processing " + smi + " produced a molecule with a carbanion!"
raise Exception(msg)
else:
print("(CORRECT) No carbanion: " + smi)
# Make sure max number of variants is limited (old bug).
smi = "CCCC[C@@H](C(=O)N)NC(=O)[C@@H](NC(=O)[C@@H](NC(=O)[C@@H](NC(=O)[C@H](C(C)C)NC(=O)[C@@H](NC(=O)[C@H](Cc1c[nH]c2c1cccc2)NC(=O)[C@@H](NC(=O)[C@@H](Cc1ccc(cc1)O)N)CCC(=O)N)C)C)Cc1nc[nH]c1)Cc1ccccc1"
output = list(Protonate({"smiles": smi, "test": False, "silent": True}))
if len(output) != 128:
msg = "Processing " + smi + " produced more than 128 variants!"
raise Exception(msg)
else:
print("(CORRECT) Produced 128 variants: " + smi)
# Make sure ATP and NAD work at different pHs (because can't test
# Internal_phosphate_polyphos_chain and
# Initial_phosphate_like_in_ATP_ADP with monoprotic examples.
specific_examples = [
[
"O=P(O)(OP(O)(OP(O)(OCC1OC(C(C1O)O)N2C=NC3=C2N=CN=C3N)=O)=O)O", # input, ATP
(
0.5,
"[NH3+]c1[nH+]c[nH+]c2c1[nH+]cn2C1OC(COP(=O)(O)OP(=O)(O)OP(=O)(O)O)C(O)C1O",
),
(
1.0,
"[NH3+]c1[nH+]c[nH+]c2c1[nH+]cn2C1OC(COP(=O)(O)OP(=O)([O-])OP(=O)(O)O)C(O)C1O",
),
(
2.6,
"[NH3+]c1[nH+]c[nH+]c2c1[nH+]cn2C1OC(COP(=O)([O-])OP(=O)([O-])OP(=O)([O-])O)C(O)C1O",
),
(
7.0,
"Nc1ncnc2c1ncn2C1OC(COP(=O)([O-])OP(=O)([O-])OP(=O)([O-])[O-])C(O)C1O",
),
],
[
"O=P(O)(OP(O)(OCC1C(O)C(O)C(N2C=NC3=C(N)N=CN=C32)O1)=O)OCC(O4)C(O)C(O)C4[N+]5=CC=CC(C(N)=O)=C5", # input, NAD
(
0.5,
"NC(=O)c1ccc[n+](C2OC(COP(=O)(O)OP(=O)(O)OCC3OC(n4cnc5c([NH3+])ncnc54)C(O)C3O)C(O)C2O)c1",
),
(
2.5,
"NC(=O)c1ccc[n+](C2OC(COP(=O)([O-])OP(=O)([O-])OCC3OC(n4cnc5c([NH3+])ncnc54)C(O)C3O)C(O)C2O)c1",
),
(
7.4,
"NC(=O)c1ccc[n+](C2OC(COP(=O)([O-])OP(=O)([O-])OCC3OC(n4cnc5c(N)ncnc54)C(O)C3O)C(O)C2O)c1",
),
],
]
for example in specific_examples:
smi = example[0]
for ph, expected_output in example[1:]:
output = list(
Protonate(
{
"smiles": smi,
"test": False,
"min_ph": ph,
"max_ph": ph,
"pka_precision": 0,
"silent": True
}
)
)
if output[0].strip() == expected_output:
print(
"(CORRECT) "
+ smi
+ " at pH "
+ str(ph)
+ " is "
+ output[0].strip()
)
else:
msg = (
smi
+ " at pH "
+ str(ph)
+ " should be "
+ expected_output
+ ", but it is "
+ output[0].strip()
)
raise Exception(msg)
@staticmethod
def test_check(args, expected_output, labels):
"""Tests most ionizable groups. The ones that can only loose or gain a single proton.
:param args: The arguments to pass to protonate()
:param expected_output: A list of the expected SMILES-strings output.
:param labels: The labels. A list containing combo of BOTH, PROTONATED,
DEPROTONATED.
:raises Exception: Wrong number of states produced.
:raises Exception: Unexpected output SMILES.
:raises Exception: Wrong labels.
"""
output = list(Protonate(args))
output = [o.split() for o in output]
num_states = len(expected_output)
if len(output) != num_states:
msg = (
args["smiles"]
+ " should have "
+ str(num_states)
+ " states at at pH "
+ str(args["min_ph"])
+ ": "
+ str(output)
)
UtilFuncs.eprint(msg)
raise Exception(msg)
if len(set([l[0] for l in output]) - set(expected_output)) != 0:
msg = (
args["smiles"]
+ " is not "
+ " AND ".join(expected_output)
+ " at pH "
+ str(args["min_ph"])
+ " - "
+ str(args["max_ph"])
+ "; it is "
+ " AND ".join([l[0] for l in output])
)
UtilFuncs.eprint(msg)
raise Exception(msg)
if len(set([l[1] for l in output]) - set(labels)) != 0:
msg = (
args["smiles"]
+ " not labeled as "
+ " AND ".join(labels)
+ "; it is "
+ " AND ".join([l[1] for l in output])
)
UtilFuncs.eprint(msg)
raise Exception(msg)
ph_range = sorted(list(set([args["min_ph"], args["max_ph"]])))
ph_range_str = "(" + " - ".join("{0:.2f}".format(n) for n in ph_range) + ")"
print(
"(CORRECT) "
+ ph_range_str.ljust(10)
+ " "
+ args["smiles"]
+ " => "
+ " AND ".join([l[0] for l in output])
)
def run(**kwargs):
"""A helpful, importable function for those who want to call Dimorphite-DL
from another Python script rather than the command line. Note that this
function accepts keyword arguments that match the command-line parameters
exactly. If you want to pass and return a list of RDKit Mol objects, import
run_with_mol_list() instead.
:param **kwargs: For a complete description, run dimorphite_dl.py from the
command line with the -h option.
:type kwargs: dict
"""
# Run the main function with the specified arguments.
main(kwargs)
def run_with_mol_list(mol_lst, **kwargs):
"""A helpful, importable function for those who want to call Dimorphite-DL
from another Python script rather than the command line. Note that this
function is for passing Dimorphite-DL a list of RDKit Mol objects, together
with command-line parameters. If you want to use only the same parameters
that you would use from the command line, import run() instead.
:param mol_lst: A list of rdkit.Chem.rdchem.Mol objects.
:type mol_lst: list
:raises Exception: If the **kwargs includes "smiles", "smiles_file",
"output_file", or "test" parameters.
:return: A list of properly protonated rdkit.Chem.rdchem.Mol objects.
:rtype: list
"""
# Do a quick check to make sure the user input makes sense.
for bad_arg in ["smiles", "smiles_file", "output_file", "test"]:
if bad_arg in kwargs:
msg = (
"You're using Dimorphite-DL's run_with_mol_list(mol_lst, "
+ '**kwargs) function, but you also passed the "'
+ bad_arg
+ '" argument. Did you mean to use the '
+ "run(**kwargs) function instead?"
)
UtilFuncs.eprint(msg)
raise Exception(msg)
# Set the return_as_list flag so main() will return the protonated smiles
# as a list.
kwargs["return_as_list"] = True
# Having reviewed the code, it will be very difficult to rewrite it so
# that a list of Mol objects can be used directly. Intead, convert this
# list of mols to smiles and pass that. Not efficient, but it will work.
protonated_smiles_and_props = []
for m in mol_lst:
props = m.GetPropsAsDict()
kwargs["smiles"] = Chem.MolToSmiles(m, isomericSmiles=True)
protonated_smiles_and_props.extend(
[(s.split("\t")[0], props) for s in main(kwargs)]
)
# Now convert the list of protonated smiles strings back to RDKit Mol
# objects. Also, add back in the properties from the original mol objects.
mols = []
for s, props in protonated_smiles_and_props:
m = Chem.MolFromSmiles(s)
if m:
for prop, val in props.items():
if type(val) is int:
m.SetIntProp(prop, val)
elif type(val) is float:
m.SetDoubleProp(prop, val)
elif type(val) is bool:
m.SetBoolProp(prop, val)
else:
m.SetProp(prop, str(val))
mols.append(m)
else:
UtilFuncs.eprint(
"WARNING: Could not process molecule with SMILES string "
+ s
+ " and properties "
+ str(props)
)
return mols
if __name__ == "__main__":
main()
|
[
"os.dup2",
"rdkit.Chem.AddHs",
"os.close",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.MolToSmiles",
"rdkit.Chem.RemoveHs",
"os.dup",
"os.path.realpath",
"io.StringIO",
"rdkit.Chem.SanitizeMol",
"rdkit.Chem.AllChem.ReactionFromSmarts",
"copy.deepcopy",
"rdkit.Chem.MolFromSmarts",
"rdkit.RDLogger.DisableLog",
"os.pipe",
"sys.stderr.fileno"
] |
[((1597, 1627), 'rdkit.RDLogger.DisableLog', 'RDLogger.DisableLog', (['"""rdApp.*"""'], {}), "('rdApp.*')\n", (1616, 1627), False, 'from rdkit import RDLogger\n'), ((10293, 10308), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (10303, 10308), False, 'from rdkit import Chem\n'), ((11817, 11921), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {'sanitizeOps': 'rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL', 'catchErrors': '(True)'}), '(mol, sanitizeOps=rdkit.Chem.rdmolops.SanitizeFlags.\n SANITIZE_ALL, catchErrors=True)\n', (11833, 11921), False, 'from rdkit import Chem\n'), ((13129, 13148), 'sys.stderr.fileno', 'sys.stderr.fileno', ([], {}), '()\n', (13146, 13148), False, 'import sys\n'), ((13171, 13192), 'os.dup', 'os.dup', (['stderr_fileno'], {}), '(stderr_fileno)\n', (13177, 13192), False, 'import os\n'), ((13215, 13224), 'os.pipe', 'os.pipe', ([], {}), '()\n', (13222, 13224), False, 'import os\n'), ((13233, 13271), 'os.dup2', 'os.dup2', (['stderr_pipe[1]', 'stderr_fileno'], {}), '(stderr_pipe[1], stderr_fileno)\n', (13240, 13271), False, 'import os\n'), ((13280, 13304), 'os.close', 'os.close', (['stderr_pipe[1]'], {}), '(stderr_pipe[1])\n', (13288, 13304), False, 'import os\n'), ((13320, 13350), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles_str'], {}), '(smiles_str)\n', (13338, 13350), False, 'from rdkit import Chem\n'), ((13360, 13383), 'os.close', 'os.close', (['stderr_fileno'], {}), '(stderr_fileno)\n', (13368, 13383), False, 'import os\n'), ((13392, 13416), 'os.close', 'os.close', (['stderr_pipe[0]'], {}), '(stderr_pipe[0])\n', (13400, 13416), False, 'import os\n'), ((13425, 13460), 'os.dup2', 'os.dup2', (['stderr_save', 'stderr_fileno'], {}), '(stderr_save, stderr_fileno)\n', (13432, 13460), False, 'import os\n'), ((13469, 13490), 'os.close', 'os.close', (['stderr_save'], {}), '(stderr_save)\n', (13477, 13490), False, 'import os\n'), ((55358, 55398), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {'isomericSmiles': '(True)'}), '(m, isomericSmiles=True)\n', (55374, 55398), False, 'from rdkit import Chem\n'), ((55744, 55765), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['s'], {}), '(s)\n', (55762, 55765), False, 'from rdkit import Chem\n'), ((17596, 17638), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {'isomericSmiles': '(True)'}), '(mol, isomericSmiles=True)\n', (17612, 17638), False, 'from rdkit import Chem\n'), ((22693, 22729), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol_used_to_idx_sites'], {}), '(mol_used_to_idx_sites)\n', (22706, 22729), False, 'from rdkit import Chem\n'), ((25273, 25299), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (25289, 25299), False, 'import os\n'), ((29206, 29239), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol_used_to_idx_sites'], {}), '(mol_used_to_idx_sites)\n', (29216, 29239), False, 'from rdkit import Chem\n'), ((8173, 8197), 'io.StringIO', 'StringIO', (["args['smiles']"], {}), "(args['smiles'])\n", (8181, 8197), False, 'from io import StringIO\n'), ((10102, 10134), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['rxn_datum[0]'], {}), '(rxn_datum[0])\n', (10120, 10134), False, 'from rdkit import Chem\n'), ((16945, 16963), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (16958, 16963), False, 'from rdkit import Chem\n'), ((23031, 23070), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol_used_to_idx_sites'], {}), '(mol_used_to_idx_sites)\n', (23047, 23070), False, 'from rdkit import Chem\n'), ((26526, 26558), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (["sub['smart']"], {}), "(sub['smart'])\n", (26544, 26558), False, 'from rdkit import Chem\n'), ((33096, 33114), 'copy.deepcopy', 'copy.deepcopy', (['mol'], {}), '(mol)\n', (33109, 33114), False, 'import copy\n'), ((22507, 22526), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {}), '(m)\n', (22523, 22526), False, 'from rdkit import Chem\n'), ((23430, 23486), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {'isomericSmiles': '(True)', 'canonical': '(True)'}), '(m, isomericSmiles=True, canonical=True)\n', (23446, 23486), False, 'from rdkit import Chem\n'), ((33209, 33232), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol_copy'], {}), '(mol_copy)\n', (33222, 33232), False, 'from rdkit import Chem\n'), ((35574, 35600), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol_copy'], {}), '(mol_copy)\n', (35590, 35600), False, 'from rdkit import Chem\n'), ((10965, 11008), 'rdkit.Chem.AllChem.ReactionFromSmarts', 'AllChem.ReactionFromSmarts', (['current_rxn_str'], {}), '(current_rxn_str)\n', (10991, 11008), False, 'from rdkit.Chem import AllChem\n'), ((33509, 33535), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol_copy'], {}), '(mol_copy)\n', (33525, 33535), False, 'from rdkit import Chem\n')]
|
import threading
from queue import Queue
from blessed import Terminal
FPS = 60
class Game:
"""The top level class for the game"""
def __init__(self, manager_cls: type):
self.manager_cls = manager_cls
def run(self) -> None:
"""The run method for the game, handling the TUI"""
term = Terminal()
input_queue = Queue()
manager = self.manager_cls(input_queue, term)
manager_thread = threading.Thread(target=manager)
manager_thread.start()
with term.fullscreen(), term.raw(), term.hidden_cursor(), term.location():
while manager_thread.is_alive():
inp = term.inkey(1 / FPS)
if inp != '':
input_queue.put(inp)
print(term.normal + term.clear)
|
[
"threading.Thread",
"blessed.Terminal",
"queue.Queue"
] |
[((324, 334), 'blessed.Terminal', 'Terminal', ([], {}), '()\n', (332, 334), False, 'from blessed import Terminal\n'), ((357, 364), 'queue.Queue', 'Queue', ([], {}), '()\n', (362, 364), False, 'from queue import Queue\n'), ((445, 477), 'threading.Thread', 'threading.Thread', ([], {'target': 'manager'}), '(target=manager)\n', (461, 477), False, 'import threading\n')]
|
import logging
from main.fileextractors.compressedfile import get_compressed_file
from main.utilities.fileutils import dir_path
from main.utilities.subtitlesadjuster import ArchiveAdjuster
class FileExtractor:
def __init__(self, subname, movfile):
self.sn, self.mn = subname, movfile
self.subzip = get_compressed_file(self.sn)
self.log = logging.getLogger(__name__)
def run(self):
if self.subzip:
return self._extractfile() and self._adjust_subs()
return False
def _adjust_subs(self):
return ArchiveAdjuster(self.subzip, self.sn, self.mn).adjust()
def _extractfile(self):
self.log.info("Start extracting %s to: %s", self.sn, dir_path(self.mn))
extracted = self._extract_subtitles_to_movie_dir()
self.log.info("End extracting %s to: %s - with result %s", self.sn, dir_path(self.mn), repr(extracted))
return extracted
def _extract_subtitles_to_movie_dir(self):
extracted = False
try:
self.subzip.accessor.extractall(dir_path(self.mn))
extracted = True
except Exception as e:
self.log.exception("Failed to extract: %s", e)
return extracted
|
[
"logging.getLogger",
"main.fileextractors.compressedfile.get_compressed_file",
"main.utilities.subtitlesadjuster.ArchiveAdjuster",
"main.utilities.fileutils.dir_path"
] |
[((320, 348), 'main.fileextractors.compressedfile.get_compressed_file', 'get_compressed_file', (['self.sn'], {}), '(self.sn)\n', (339, 348), False, 'from main.fileextractors.compressedfile import get_compressed_file\n'), ((368, 395), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (385, 395), False, 'import logging\n'), ((718, 735), 'main.utilities.fileutils.dir_path', 'dir_path', (['self.mn'], {}), '(self.mn)\n', (726, 735), False, 'from main.utilities.fileutils import dir_path\n'), ((872, 889), 'main.utilities.fileutils.dir_path', 'dir_path', (['self.mn'], {}), '(self.mn)\n', (880, 889), False, 'from main.utilities.fileutils import dir_path\n'), ((572, 618), 'main.utilities.subtitlesadjuster.ArchiveAdjuster', 'ArchiveAdjuster', (['self.subzip', 'self.sn', 'self.mn'], {}), '(self.subzip, self.sn, self.mn)\n', (587, 618), False, 'from main.utilities.subtitlesadjuster import ArchiveAdjuster\n'), ((1064, 1081), 'main.utilities.fileutils.dir_path', 'dir_path', (['self.mn'], {}), '(self.mn)\n', (1072, 1081), False, 'from main.utilities.fileutils import dir_path\n')]
|
import curses
from get_json import get_json
def body(screen):
div = curses.newwin(curses.LINES - 2, curses.COLS, 1, 0)
div.box() # draw border around container window
# use a sub-window so we don't clobber the the container window's border.
txt = div.subwin(curses.LINES - 5, curses.COLS - 4, 2, 2)
# update internal window data structures
screen.noutrefresh()
div.noutrefresh()
# redraw the screen
curses.doupdate()
return div, txt
|
[
"curses.doupdate",
"curses.newwin"
] |
[((74, 124), 'curses.newwin', 'curses.newwin', (['(curses.LINES - 2)', 'curses.COLS', '(1)', '(0)'], {}), '(curses.LINES - 2, curses.COLS, 1, 0)\n', (87, 124), False, 'import curses\n'), ((438, 455), 'curses.doupdate', 'curses.doupdate', ([], {}), '()\n', (453, 455), False, 'import curses\n')]
|
import sys, os, seaborn as sns, rasterio, pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config.definitions import ROOT_DIR, ancillary_path, city,year
attr_value ="totalpop"
gtP = ROOT_DIR + "/Evaluation/{0}_groundTruth/{2}_{0}_{1}.tif".format(city,attr_value,year)
srcGT= rasterio.open(gtP)
popGT = srcGT.read(1)
print(popGT.min(),popGT.max(), popGT.mean())
#prP = ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value)
def scatterplot(prP):
cp = "C:/Users/NM12LQ/OneDrive - Aalborg Universitet/PopNetV2_backup/data_prep/ams_ProjectData/temp_tif/ams_CLC_2012_2018Reclas3.tif"
srcC= rasterio.open(cp)
corine = srcC.read(1)
name = prP.split(".tif")[0].split("/")[-1]
print(name)
gtP = ROOT_DIR + "/Evaluation/{0}_groundTruth/{2}_{0}_{1}.tif".format(city,attr_value,year)
srcGT= rasterio.open(gtP)
popGT = srcGT.read(1)
print(popGT.min(),popGT.max(), popGT.mean())
srcPR= rasterio.open(prP)
popPR = srcPR.read(1)
popPR[(np.where(popPR <= -9999))] = 0
print(popPR.min(),popPR.max(), popPR.mean())
cr=corine.flatten()
x=popGT.flatten()
y=popPR.flatten()
df = pd.DataFrame(data={"gt": x, "predictions":y, "cr":cr})
plt.figure(figsize=(20,20))
g= sns.lmplot(data=df, x="gt", y="predictions", hue="cr", palette=["#0d2dc1","#ff9c1c","#71b951","#24f33d","#90308f", "#a8a8a8"],ci = None, order=2, scatter_kws={"s":0.5, "alpha": 0.5}, line_kws={"lw":2, "alpha": 0.5}, legend=False)
plt.legend(title= "Land Cover", labels= ['Water','Urban Fabric', 'Agriculture', 'Green Spaces','Industry','Transportation' ], loc='lower right', fontsize=5)
plt.title('{0}'.format( name), fontsize=11)
# Set x-axis label
plt.xlabel('Ground Truth (persons)', fontsize=11)
# Set y-axis label
plt.ylabel('Predictions (persons)', fontsize=11)
#total pop
#plt.xscale('log')
#plt.yscale('log')
#mobile Adults
#plt.xlim((0,200))
#plt.ylim((-100,500))pl
plt.axis('square')
plt.xlim((0,400))
plt.ylim((0,350))
plt.tight_layout()
#plt.show()
plt.savefig(ROOT_DIR + "/Evaluation/{0}/ScatterPlots/SP4_{2}.png".format(city,attr_value, name),format='png',dpi=300)
evalFiles = [#gtP,
#ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
]
evalFilesMAEbp = [ROOT_DIR + "/Evaluation/{0}/Pycno/mae_{0}_{2}_{0}_{1}_pycno.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/Dasy/mae_{0}_{2}_{0}_{1}_dasyWIESMN.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/mae_{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/mae_{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_3AIL5_12IL_it10_ag_{1}.tif".format(city,attr_value)]
evalFilesPEbp = [ROOT_DIR + "/Evaluation/{0}/Pycno/div_{0}_{2}_{0}_{1}_pycno.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/Dasy/div_{0}_{2}_{0}_{1}_dasyWIESMN.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/div_{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/div_{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value)]
for i in evalFiles:
scatterplot(i)
|
[
"seaborn.lmplot",
"matplotlib.pyplot.ylabel",
"numpy.where",
"rasterio.open",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"os.path.abspath",
"matplotlib.pyplot.legend"
] |
[((378, 396), 'rasterio.open', 'rasterio.open', (['gtP'], {}), '(gtP)\n', (391, 396), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((789, 806), 'rasterio.open', 'rasterio.open', (['cp'], {}), '(cp)\n', (802, 806), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1003, 1021), 'rasterio.open', 'rasterio.open', (['gtP'], {}), '(gtP)\n', (1016, 1021), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1108, 1126), 'rasterio.open', 'rasterio.open', (['prP'], {}), '(prP)\n', (1121, 1126), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1322, 1378), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'gt': x, 'predictions': y, 'cr': cr}"}), "(data={'gt': x, 'predictions': y, 'cr': cr})\n", (1334, 1378), True, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1382, 1410), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (1392, 1410), True, 'import matplotlib.pyplot as plt\n'), ((1418, 1666), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'df', 'x': '"""gt"""', 'y': '"""predictions"""', 'hue': '"""cr"""', 'palette': "['#0d2dc1', '#ff9c1c', '#71b951', '#24f33d', '#90308f', '#a8a8a8']", 'ci': 'None', 'order': '(2)', 'scatter_kws': "{'s': 0.5, 'alpha': 0.5}", 'line_kws': "{'lw': 2, 'alpha': 0.5}", 'legend': '(False)'}), "(data=df, x='gt', y='predictions', hue='cr', palette=['#0d2dc1',\n '#ff9c1c', '#71b951', '#24f33d', '#90308f', '#a8a8a8'], ci=None, order=\n 2, scatter_kws={'s': 0.5, 'alpha': 0.5}, line_kws={'lw': 2, 'alpha': \n 0.5}, legend=False)\n", (1428, 1666), True, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1653, 1818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""Land Cover"""', 'labels': "['Water', 'Urban Fabric', 'Agriculture', 'Green Spaces', 'Industry',\n 'Transportation']", 'loc': '"""lower right"""', 'fontsize': '(5)'}), "(title='Land Cover', labels=['Water', 'Urban Fabric',\n 'Agriculture', 'Green Spaces', 'Industry', 'Transportation'], loc=\n 'lower right', fontsize=5)\n", (1663, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ground Truth (persons)"""'], {'fontsize': '(11)'}), "('Ground Truth (persons)', fontsize=11)\n", (1895, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1962, 2010), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predictions (persons)"""'], {'fontsize': '(11)'}), "('Predictions (persons)', fontsize=11)\n", (1972, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2151, 2169), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (2159, 2169), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2192), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 400)'], {}), '((0, 400))\n', (2182, 2192), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2214), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 350)'], {}), '((0, 350))\n', (2204, 2214), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2236), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2234, 2236), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1188), 'numpy.where', 'np.where', (['(popPR <= -9999)'], {}), '(popPR <= -9999)\n', (1172, 1188), True, 'import numpy as np\n'), ((154, 179), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (169, 179), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n')]
|
import numpy as np
import pytest
import nengo
from nengo.builder import Builder
from nengo.builder.operator import Reset, Copy
from nengo.builder.signal import Signal
from nengo.dists import UniformHypersphere
from nengo.exceptions import ValidationError
from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja
from nengo.processes import WhiteSignal
from nengo.synapses import Alpha, Lowpass
def best_weights(weight_data):
return np.argmax(np.sum(np.var(weight_data, axis=0), axis=0))
def _test_pes(
Simulator,
nl,
plt,
seed,
allclose,
pre_neurons=False,
post_neurons=False,
weight_solver=False,
vin=np.array([0.5, -0.5]),
vout=None,
n=200,
function=None,
transform=np.array(1.0),
rate=1e-3,
):
vout = np.array(vin) if vout is None else vout
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = nl()
stim = nengo.Node(output=vin)
target = nengo.Node(output=vout)
pre = nengo.Ensemble(n, dimensions=stim.size_out)
post = nengo.Ensemble(n, dimensions=stim.size_out)
error = nengo.Ensemble(n, dimensions=target.size_out)
nengo.Connection(stim, pre)
postslice = post[: target.size_out] if target.size_out < stim.size_out else post
pre = pre.neurons if pre_neurons else pre
post = post.neurons if post_neurons else postslice
conn = nengo.Connection(
pre,
post,
function=function,
transform=transform,
learning_rule_type=PES(rate),
)
if weight_solver:
conn.solver = nengo.solvers.LstsqL2(weights=True)
nengo.Connection(target, error, transform=-1)
nengo.Connection(postslice, error)
nengo.Connection(error, conn.learning_rule)
post_p = nengo.Probe(postslice, synapse=0.03)
error_p = nengo.Probe(error, synapse=0.03)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
with Simulator(model) as sim:
sim.run(0.5)
t = sim.trange()
weights = sim.data[weights_p]
plt.subplot(211)
plt.plot(t, sim.data[post_p])
plt.ylabel("Post decoded value")
plt.subplot(212)
plt.plot(t, sim.data[error_p])
plt.ylabel("Error decoded value")
plt.xlabel("Time (s)")
tend = t > 0.4
assert allclose(sim.data[post_p][tend], vout, atol=0.05)
assert allclose(sim.data[error_p][tend], 0, atol=0.05)
assert not allclose(weights[0], weights[-1], atol=1e-5, record_rmse=False)
def test_pes_ens_ens(Simulator, nl_nodirect, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(Simulator, nl_nodirect, plt, seed, allclose, function=function)
def test_pes_weight_solver(Simulator, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, function=function, weight_solver=True
)
def test_pes_ens_slice(Simulator, plt, seed, allclose):
vin = [0.5, -0.5]
vout = [vin[0] ** 2 + vin[1] ** 2]
function = lambda x: [x[0] - x[1]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, vin=vin, vout=vout, function=function
)
def test_pes_neuron_neuron(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=4e-4, size=(n, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=True,
n=n,
transform=initial_weights,
rate=7e-4,
)
def test_pes_neuron_ens(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=1e-4, size=(2, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=False,
n=n,
transform=initial_weights,
)
def test_pes_transform(Simulator, seed, allclose):
"""Test behaviour of PES when function and transform both defined."""
n = 200
# error must be with respect to transformed vector (conn.size_out)
T = np.asarray([[0.5], [-0.5]]) # transform to output
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(output=[1])
a = nengo.Ensemble(n, dimensions=1)
b = nengo.Node(size_in=2)
e = nengo.Node(size_in=1)
nengo.Connection(u, a)
learned_conn = nengo.Connection(
a,
b,
function=lambda x: [0],
transform=T,
learning_rule_type=nengo.PES(learning_rate=1e-3),
)
assert T.shape[0] == learned_conn.size_out
assert T.shape[1] == learned_conn.size_mid
nengo.Connection(b[0], e, synapse=None)
nengo.Connection(nengo.Node(output=-1), e)
nengo.Connection(e, learned_conn.learning_rule, transform=T, synapse=None)
p_b = nengo.Probe(b, synapse=0.05)
with Simulator(m) as sim:
sim.run(1.0)
tend = sim.trange() > 0.7
assert allclose(sim.data[p_b][tend], [1, -1], atol=1e-2)
def test_pes_multidim_error(Simulator, seed):
"""Test that PES works on error connections mapping from N to 1 dims.
Note that the transform is applied before the learning rule, so the error
signal should be 1-dimensional.
"""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[0])
ens1 = nengo.Ensemble(20, 3)
ens2 = nengo.Ensemble(10, 1)
# Case 1: ens -> ens, weights=False
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=False),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 2: ens -> ens, weights=True
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=True),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 3: neurons -> ens
conn = nengo.Connection(
ens1.neurons,
ens2,
transform=np.ones((1, ens1.n_neurons)),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
with Simulator(net) as sim:
sim.run(0.01)
@pytest.mark.parametrize("pre_synapse", [0, Lowpass(tau=0.05), Alpha(tau=0.005)])
def test_pes_synapse(Simulator, seed, pre_synapse, allclose):
rule = PES(pre_synapse=pre_synapse)
with nengo.Network(seed=seed) as model:
stim = nengo.Node(output=WhiteSignal(0.5, high=10))
x = nengo.Ensemble(100, 1)
nengo.Connection(stim, x, synapse=None)
conn = nengo.Connection(x, x, learning_rule_type=rule)
p_neurons = nengo.Probe(x.neurons, synapse=pre_synapse)
p_pes = nengo.Probe(conn.learning_rule, "activities")
with Simulator(model) as sim:
sim.run(0.5)
assert allclose(sim.data[p_neurons][1:, :], sim.data[p_pes][:-1, :])
@pytest.mark.parametrize("weights", [False, True])
def test_pes_recurrent_slice(Simulator, seed, weights, allclose):
"""Test that PES works on recurrent connections from N to 1 dims."""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[-1])
stim = nengo.Node(output=[0, 0])
post = nengo.Ensemble(50, 2, radius=2)
nengo.Connection(stim, post)
conn = nengo.Connection(
post,
post[1],
function=lambda x: 0.0,
solver=nengo.solvers.LstsqL2(weights=weights),
learning_rule_type=nengo.PES(learning_rate=5e-4),
)
nengo.Connection(err, conn.learning_rule)
p = nengo.Probe(post, synapse=0.025)
with Simulator(net) as sim:
sim.run(0.2)
# Learning rule should drive second dimension high, but not first
assert allclose(sim.data[p][-10:, 0], 0, atol=0.2)
assert np.all(sim.data[p][-10:, 1] > 0.8)
def test_pes_cycle(Simulator):
"""Test that PES works when connection output feeds back into error."""
with nengo.Network() as net:
a = nengo.Ensemble(10, 1)
b = nengo.Node(size_in=1)
c = nengo.Connection(a, b, synapse=None, learning_rule_type=nengo.PES())
nengo.Connection(b, c.learning_rule, synapse=None)
with Simulator(net):
# just checking that this builds without error
pass
@pytest.mark.parametrize(
"rule_type, solver",
[
(BCM(learning_rate=1e-8), False),
(Oja(learning_rate=1e-5), False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], True),
],
)
def test_unsupervised(Simulator, rule_type, solver, seed, rng, plt, allclose):
n = 200
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(WhiteSignal(0.5, high=10), size_out=2)
a = nengo.Ensemble(n, dimensions=2)
b = nengo.Ensemble(n + 1, dimensions=2)
nengo.Connection(u, a)
if solver:
conn = nengo.Connection(a, b, solver=nengo.solvers.LstsqL2(weights=True))
else:
initial_weights = rng.uniform(high=1e-3, size=(b.n_neurons, a.n_neurons))
conn = nengo.Connection(a.neurons, b.neurons, transform=initial_weights)
conn.learning_rule_type = rule_type
inp_p = nengo.Probe(u)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
ap = nengo.Probe(a, synapse=0.03)
up = nengo.Probe(b, synapse=0.03)
with Simulator(m, seed=seed + 1) as sim:
sim.run(0.5)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.plot(t, sim.data[inp_p], label="Input")
plt.plot(t, sim.data[ap], label="Pre")
plt.plot(t, sim.data[up], label="Post")
plt.legend(loc="best", fontsize="x-small")
plt.subplot(2, 1, 2)
best_ix = best_weights(sim.data[weights_p])
plt.plot(sim.trange(sample_every=0.01), sim.data[weights_p][..., best_ix])
plt.xlabel("Time (s)")
plt.ylabel("Weights")
assert not allclose(
sim.data[weights_p][0], sim.data[weights_p][-1], record_rmse=False
)
def learning_net(learning_rule=nengo.PES, net=None, rng=np.random):
net = nengo.Network() if net is None else net
with net:
if learning_rule is nengo.PES:
learning_rule_type = learning_rule(learning_rate=1e-5)
else:
learning_rule_type = learning_rule()
u = nengo.Node(output=1.0)
pre = nengo.Ensemble(10, dimensions=1)
post = nengo.Ensemble(10, dimensions=1)
initial_weights = rng.uniform(high=1e-3, size=(pre.n_neurons, post.n_neurons))
conn = nengo.Connection(
pre.neurons,
post.neurons,
transform=initial_weights,
learning_rule_type=learning_rule_type,
)
if learning_rule is nengo.PES:
err = nengo.Ensemble(10, dimensions=1)
nengo.Connection(u, err)
nengo.Connection(err, conn.learning_rule)
net.activity_p = nengo.Probe(pre.neurons, synapse=0.01)
net.weights_p = nengo.Probe(conn, "weights", synapse=None, sample_every=0.01)
return net
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_dt_dependence(Simulator, plt, learning_rule, seed, rng, allclose):
"""Learning rules should work the same regardless of dt."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
trans_data = []
# Using dts greater near tau_ref (0.002 by default) causes learning to
# differ due to lowered presynaptic firing rate
dts = (0.0001, 0.001)
colors = ("b", "g", "r")
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
for c, dt in zip(colors, dts):
with Simulator(m, dt=dt) as sim:
sim.run(0.1)
trans_data.append(sim.data[m.weights_p])
best_ix = best_weights(sim.data[m.weights_p])
ax1.plot(
sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c=c
)
ax2.plot(sim.trange(), sim.data[m.activity_p], c=c)
ax1.set_xlim(right=sim.trange()[-1])
ax1.set_ylabel("Connection weight")
ax2.set_xlim(right=sim.trange()[-1])
ax2.set_ylabel("Presynaptic activity")
assert allclose(trans_data[0], trans_data[1], atol=3e-3)
assert not allclose(
sim.data[m.weights_p][0], sim.data[m.weights_p][-1], record_rmse=False
)
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_reset(Simulator, learning_rule, plt, seed, rng, allclose):
"""Make sure resetting learning rules resets all state."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
with Simulator(m) as sim:
sim.run(0.1)
sim.run(0.2)
first_t = sim.trange()
first_t_trans = sim.trange(sample_every=0.01)
first_activity_p = np.array(sim.data[m.activity_p], copy=True)
first_weights_p = np.array(sim.data[m.weights_p], copy=True)
sim.reset()
sim.run(0.3)
plt.subplot(2, 1, 1)
plt.ylabel("Neural activity")
plt.plot(first_t, first_activity_p, c="b")
plt.plot(sim.trange(), sim.data[m.activity_p], c="g")
plt.subplot(2, 1, 2)
plt.ylabel("Connection weight")
best_ix = best_weights(first_weights_p)
plt.plot(first_t_trans, first_weights_p[..., best_ix], c="b")
plt.plot(sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c="g")
assert allclose(sim.trange(), first_t)
assert allclose(sim.trange(sample_every=0.01), first_t_trans)
assert allclose(sim.data[m.activity_p], first_activity_p)
assert allclose(sim.data[m.weights_p], first_weights_p)
def test_learningruletypeparam():
"""LearningRuleTypeParam must be one or many learning rules."""
class Test:
lrp = LearningRuleTypeParam("lrp", default=None)
inst = Test()
assert inst.lrp is None
inst.lrp = Oja()
assert isinstance(inst.lrp, Oja)
inst.lrp = [Oja(), Oja()]
for lr in inst.lrp:
assert isinstance(lr, Oja)
# Non-LR no good
with pytest.raises(ValueError):
inst.lrp = "a"
# All elements in list must be LR
with pytest.raises(ValueError):
inst.lrp = [Oja(), "a", Oja()]
def test_learningrule_attr(seed):
"""Test learning_rule attribute on Connection"""
def check_rule(rule, conn, rule_type):
assert rule.connection is conn and rule.learning_rule_type is rule_type
with nengo.Network(seed=seed):
a, b, e = [nengo.Ensemble(10, 2) for i in range(3)]
T = np.ones((10, 10))
r1 = PES()
c1 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r1)
check_rule(c1.learning_rule, c1, r1)
r2 = [PES(), BCM()]
c2 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r2, transform=T)
assert isinstance(c2.learning_rule, list)
for rule, rule_type in zip(c2.learning_rule, r2):
check_rule(rule, c2, rule_type)
r3 = dict(oja=Oja(), bcm=BCM())
c3 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r3, transform=T)
assert isinstance(c3.learning_rule, dict)
assert set(c3.learning_rule) == set(r3) # assert same keys
for key in r3:
check_rule(c3.learning_rule[key], c3, r3[key])
def test_voja_encoders(Simulator, nl_nodirect, rng, seed, allclose):
"""Tests that voja changes active encoders to the input."""
n = 200
learned_vector = np.asarray([0.3, -0.4, 0.6])
learned_vector /= np.linalg.norm(learned_vector)
n_change = n // 2 # modify first half of the encoders
# Set the first half to always fire with random encoders, and the
# remainder to never fire due to their encoder's dot product with the input
intercepts = np.asarray([-1] * n_change + [0.99] * (n - n_change))
rand_encoders = UniformHypersphere(surface=True).sample(
n_change, len(learned_vector), rng=rng
)
encoders = np.append(rand_encoders, [-learned_vector] * (n - n_change), axis=0)
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = nl_nodirect()
u = nengo.Node(output=learned_vector)
x = nengo.Ensemble(
n,
dimensions=len(learned_vector),
intercepts=intercepts,
encoders=encoders,
max_rates=nengo.dists.Uniform(300.0, 400.0),
radius=2.0,
) # to test encoder scaling
conn = nengo.Connection(
u, x, synapse=None, learning_rule_type=Voja(learning_rate=1e-1)
)
p_enc = nengo.Probe(conn.learning_rule, "scaled_encoders")
p_enc_ens = nengo.Probe(x, "scaled_encoders")
with Simulator(m) as sim:
sim.run(1.0)
t = sim.trange()
tend = t > 0.5
# Voja's rule relies on knowing exactly how the encoders were scaled
# during the build process, because it modifies the scaled_encoders signal
# proportional to this factor. Therefore, we should check that its
# assumption actually holds.
encoder_scale = (sim.data[x].gain / x.radius)[:, np.newaxis]
assert allclose(sim.data[x].encoders, sim.data[x].scaled_encoders / encoder_scale)
# Check that the last half kept the same encoders throughout the simulation
assert allclose(sim.data[p_enc][0, n_change:], sim.data[p_enc][:, n_change:])
# and that they are also equal to their originally assigned value
assert allclose(
sim.data[p_enc][0, n_change:] / encoder_scale[n_change:], -learned_vector
)
# Check that the first half converged to the input
assert allclose(
sim.data[p_enc][tend, :n_change] / encoder_scale[:n_change],
learned_vector,
atol=0.01,
)
# Check that encoders probed from ensemble equal encoders probed from Voja
assert allclose(sim.data[p_enc], sim.data[p_enc_ens])
def test_voja_modulate(Simulator, nl_nodirect, seed, allclose):
"""Tests that voja's rule can be modulated on/off."""
n = 200
learned_vector = np.asarray([0.5])
def control_signal(t):
"""Modulates the learning on/off."""
return 0 if t < 0.5 else -1
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = nl_nodirect()
control = nengo.Node(output=control_signal)
u = nengo.Node(output=learned_vector)
x = nengo.Ensemble(n, dimensions=len(learned_vector))
conn = nengo.Connection(
u, x, synapse=None, learning_rule_type=Voja(post_synapse=None)
)
nengo.Connection(control, conn.learning_rule, synapse=None)
p_enc = nengo.Probe(conn.learning_rule, "scaled_encoders")
with Simulator(m) as sim:
sim.run(1.0)
tend = sim.trange() > 0.5
# Check that encoders stop changing after 0.5s
assert allclose(sim.data[p_enc][tend], sim.data[p_enc][-1])
# Check that encoders changed during first 0.5s
i = np.where(tend)[0][0] # first time point after changeover
assert not allclose(sim.data[p_enc][0], sim.data[p_enc][i], record_rmse=False)
def test_frozen():
"""Test attributes inherited from FrozenObject"""
a = PES(learning_rate=2e-3, pre_synapse=4e-3)
b = PES(learning_rate=2e-3, pre_synapse=4e-3)
c = PES(learning_rate=2e-3, pre_synapse=5e-3)
assert hash(a) == hash(a)
assert hash(b) == hash(b)
assert hash(c) == hash(c)
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c) # not guaranteed, but highly likely
assert b != c
assert hash(b) != hash(c) # not guaranteed, but highly likely
with pytest.raises((ValueError, RuntimeError)):
a.learning_rate = 1e-1
def test_pes_direct_errors():
"""Test that applying a learning rule to a direct ensemble errors."""
with nengo.Network():
pre = nengo.Ensemble(10, 1, neuron_type=nengo.Direct())
post = nengo.Ensemble(10, 1)
conn = nengo.Connection(pre, post)
with pytest.raises(ValidationError):
conn.learning_rule_type = nengo.PES()
def test_custom_type(Simulator, allclose):
"""Test with custom learning rule type.
A custom learning type may have ``size_in`` not equal to 0, 1, or None.
"""
class TestRule(nengo.learning_rules.LearningRuleType):
modifies = "decoders"
def __init__(self):
super().__init__(1.0, size_in=3)
@Builder.register(TestRule)
def build_test_rule(model, test_rule, rule):
error = Signal(np.zeros(rule.connection.size_in))
model.add_op(Reset(error))
model.sig[rule]["in"] = error[: rule.size_in]
model.add_op(Copy(error, model.sig[rule]["delta"]))
with nengo.Network() as net:
a = nengo.Ensemble(10, 1)
b = nengo.Ensemble(10, 1)
conn = nengo.Connection(
a.neurons, b, transform=np.zeros((1, 10)), learning_rule_type=TestRule()
)
err = nengo.Node([1, 2, 3])
nengo.Connection(err, conn.learning_rule, synapse=None)
p = nengo.Probe(conn, "weights")
with Simulator(net) as sim:
sim.run(sim.dt * 5)
assert allclose(sim.data[p][:, 0, :3], np.outer(np.arange(1, 6), np.arange(1, 4)))
assert allclose(sim.data[p][:, :, 3:], 0)
@pytest.mark.parametrize("LearningRule", (nengo.PES, nengo.BCM, nengo.Voja, nengo.Oja))
def test_tau_deprecation(LearningRule):
params = [
("pre_tau", "pre_synapse"),
("post_tau", "post_synapse"),
("theta_tau", "theta_synapse"),
]
kwargs = {}
for i, (p0, p1) in enumerate(params):
if hasattr(LearningRule, p0):
kwargs[p0] = i
with pytest.warns(DeprecationWarning):
l_rule = LearningRule(learning_rate=0, **kwargs)
for i, (p0, p1) in enumerate(params):
if hasattr(LearningRule, p0):
assert getattr(l_rule, p0) == i
assert getattr(l_rule, p1) == Lowpass(i)
def test_slicing(Simulator, seed, allclose):
with nengo.Network(seed=seed) as model:
a = nengo.Ensemble(50, 1)
b = nengo.Ensemble(30, 2)
conn = nengo.Connection(
a, b, learning_rule_type=PES(), function=lambda x: (0, 0)
)
nengo.Connection(nengo.Node(1.0), a)
err1 = nengo.Node(lambda t, x: x - 0.75, size_in=1)
nengo.Connection(b[0], err1)
nengo.Connection(err1, conn.learning_rule[0])
err2 = nengo.Node(lambda t, x: x + 0.5, size_in=1)
nengo.Connection(b[1], err2)
nengo.Connection(err2, conn.learning_rule[1])
p = nengo.Probe(b, synapse=0.03)
with Simulator(model) as sim:
sim.run(1.0)
t = sim.trange() > 0.8
assert allclose(sim.data[p][t, 0], 0.75, atol=0.15)
assert allclose(sim.data[p][t, 1], -0.5, atol=0.15)
|
[
"nengo.learning_rules.BCM",
"nengo.dists.Uniform",
"nengo.processes.WhiteSignal",
"nengo.Node",
"numpy.array",
"numpy.var",
"numpy.linalg.norm",
"numpy.arange",
"nengo.builder.operator.Copy",
"nengo.builder.operator.Reset",
"nengo.Ensemble",
"numpy.where",
"numpy.asarray",
"nengo.learning_rules.Voja",
"numpy.ones",
"nengo.dists.UniformHypersphere",
"nengo.synapses.Alpha",
"pytest.raises",
"nengo.PES",
"nengo.synapses.Lowpass",
"nengo.learning_rules.LearningRuleTypeParam",
"nengo.Network",
"nengo.Probe",
"nengo.solvers.LstsqL2",
"nengo.learning_rules.Oja",
"numpy.append",
"pytest.mark.parametrize",
"nengo.learning_rules.PES",
"numpy.zeros",
"nengo.builder.Builder.register",
"nengo.Connection",
"nengo.Direct",
"numpy.all",
"pytest.warns"
] |
[((7196, 7245), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weights"""', '[False, True]'], {}), "('weights', [False, True])\n", (7219, 7245), False, 'import pytest\n'), ((11392, 11467), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""learning_rule"""', '[nengo.PES, nengo.BCM, nengo.Oja]'], {}), "('learning_rule', [nengo.PES, nengo.BCM, nengo.Oja])\n", (11415, 11467), False, 'import pytest\n'), ((12657, 12732), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""learning_rule"""', '[nengo.PES, nengo.BCM, nengo.Oja]'], {}), "('learning_rule', [nengo.PES, nengo.BCM, nengo.Oja])\n", (12680, 12732), False, 'import pytest\n'), ((21534, 21624), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""LearningRule"""', '(nengo.PES, nengo.BCM, nengo.Voja, nengo.Oja)'], {}), "('LearningRule', (nengo.PES, nengo.BCM, nengo.Voja,\n nengo.Oja))\n", (21557, 21624), False, 'import pytest\n'), ((666, 687), 'numpy.array', 'np.array', (['[0.5, -0.5]'], {}), '([0.5, -0.5])\n', (674, 687), True, 'import numpy as np\n'), ((748, 761), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (756, 761), True, 'import numpy as np\n'), ((4152, 4179), 'numpy.asarray', 'np.asarray', (['[[0.5], [-0.5]]'], {}), '([[0.5], [-0.5]])\n', (4162, 4179), True, 'import numpy as np\n'), ((4212, 4236), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (4225, 4236), False, 'import nengo\n'), ((6655, 6683), 'nengo.learning_rules.PES', 'PES', ([], {'pre_synapse': 'pre_synapse'}), '(pre_synapse=pre_synapse)\n', (6658, 6683), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8118, 8152), 'numpy.all', 'np.all', (['(sim.data[p][-10:, 1] > 0.8)'], {}), '(sim.data[p][-10:, 1] > 0.8)\n', (8124, 8152), True, 'import numpy as np\n'), ((8987, 9011), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (9000, 9011), False, 'import nengo\n'), ((14168, 14173), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14171, 14173), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((15738, 15766), 'numpy.asarray', 'np.asarray', (['[0.3, -0.4, 0.6]'], {}), '([0.3, -0.4, 0.6])\n', (15748, 15766), True, 'import numpy as np\n'), ((15789, 15819), 'numpy.linalg.norm', 'np.linalg.norm', (['learned_vector'], {}), '(learned_vector)\n', (15803, 15819), True, 'import numpy as np\n'), ((16047, 16100), 'numpy.asarray', 'np.asarray', (['([-1] * n_change + [0.99] * (n - n_change))'], {}), '([-1] * n_change + [0.99] * (n - n_change))\n', (16057, 16100), True, 'import numpy as np\n'), ((16230, 16298), 'numpy.append', 'np.append', (['rand_encoders', '([-learned_vector] * (n - n_change))'], {'axis': '(0)'}), '(rand_encoders, [-learned_vector] * (n - n_change), axis=0)\n', (16239, 16298), True, 'import numpy as np\n'), ((16308, 16332), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (16321, 16332), False, 'import nengo\n'), ((18296, 18313), 'numpy.asarray', 'np.asarray', (['[0.5]'], {}), '([0.5])\n', (18306, 18313), True, 'import numpy as np\n'), ((18432, 18456), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (18445, 18456), False, 'import nengo\n'), ((19428, 19471), 'nengo.learning_rules.PES', 'PES', ([], {'learning_rate': '(0.002)', 'pre_synapse': '(0.004)'}), '(learning_rate=0.002, pre_synapse=0.004)\n', (19431, 19471), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((19478, 19521), 'nengo.learning_rules.PES', 'PES', ([], {'learning_rate': '(0.002)', 'pre_synapse': '(0.004)'}), '(learning_rate=0.002, pre_synapse=0.004)\n', (19481, 19521), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((19528, 19571), 'nengo.learning_rules.PES', 'PES', ([], {'learning_rate': '(0.002)', 'pre_synapse': '(0.005)'}), '(learning_rate=0.002, pre_synapse=0.005)\n', (19531, 19571), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((20679, 20705), 'nengo.builder.Builder.register', 'Builder.register', (['TestRule'], {}), '(TestRule)\n', (20695, 20705), False, 'from nengo.builder import Builder\n'), ((792, 805), 'numpy.array', 'np.array', (['vin'], {}), '(vin)\n', (800, 805), True, 'import numpy as np\n'), ((842, 866), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (855, 866), False, 'import nengo\n'), ((949, 971), 'nengo.Node', 'nengo.Node', ([], {'output': 'vin'}), '(output=vin)\n', (959, 971), False, 'import nengo\n'), ((989, 1012), 'nengo.Node', 'nengo.Node', ([], {'output': 'vout'}), '(output=vout)\n', (999, 1012), False, 'import nengo\n'), ((1027, 1070), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': 'stim.size_out'}), '(n, dimensions=stim.size_out)\n', (1041, 1070), False, 'import nengo\n'), ((1086, 1129), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': 'stim.size_out'}), '(n, dimensions=stim.size_out)\n', (1100, 1129), False, 'import nengo\n'), ((1146, 1191), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': 'target.size_out'}), '(n, dimensions=target.size_out)\n', (1160, 1191), False, 'import nengo\n'), ((1201, 1228), 'nengo.Connection', 'nengo.Connection', (['stim', 'pre'], {}), '(stim, pre)\n', (1217, 1228), False, 'import nengo\n'), ((1710, 1755), 'nengo.Connection', 'nengo.Connection', (['target', 'error'], {'transform': '(-1)'}), '(target, error, transform=-1)\n', (1726, 1755), False, 'import nengo\n'), ((1764, 1798), 'nengo.Connection', 'nengo.Connection', (['postslice', 'error'], {}), '(postslice, error)\n', (1780, 1798), False, 'import nengo\n'), ((1807, 1850), 'nengo.Connection', 'nengo.Connection', (['error', 'conn.learning_rule'], {}), '(error, conn.learning_rule)\n', (1823, 1850), False, 'import nengo\n'), ((1869, 1905), 'nengo.Probe', 'nengo.Probe', (['postslice'], {'synapse': '(0.03)'}), '(postslice, synapse=0.03)\n', (1880, 1905), False, 'import nengo\n'), ((1924, 1956), 'nengo.Probe', 'nengo.Probe', (['error'], {'synapse': '(0.03)'}), '(error, synapse=0.03)\n', (1935, 1956), False, 'import nengo\n'), ((1978, 2025), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {'sample_every': '(0.01)'}), "(conn, 'weights', sample_every=0.01)\n", (1989, 2025), False, 'import nengo\n'), ((4261, 4283), 'nengo.Node', 'nengo.Node', ([], {'output': '[1]'}), '(output=[1])\n', (4271, 4283), False, 'import nengo\n'), ((4296, 4327), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': '(1)'}), '(n, dimensions=1)\n', (4310, 4327), False, 'import nengo\n'), ((4340, 4361), 'nengo.Node', 'nengo.Node', ([], {'size_in': '(2)'}), '(size_in=2)\n', (4350, 4361), False, 'import nengo\n'), ((4374, 4395), 'nengo.Node', 'nengo.Node', ([], {'size_in': '(1)'}), '(size_in=1)\n', (4384, 4395), False, 'import nengo\n'), ((4405, 4427), 'nengo.Connection', 'nengo.Connection', (['u', 'a'], {}), '(u, a)\n', (4421, 4427), False, 'import nengo\n'), ((4743, 4782), 'nengo.Connection', 'nengo.Connection', (['b[0]', 'e'], {'synapse': 'None'}), '(b[0], e, synapse=None)\n', (4759, 4782), False, 'import nengo\n'), ((4842, 4916), 'nengo.Connection', 'nengo.Connection', (['e', 'learned_conn.learning_rule'], {'transform': 'T', 'synapse': 'None'}), '(e, learned_conn.learning_rule, transform=T, synapse=None)\n', (4858, 4916), False, 'import nengo\n'), ((4932, 4960), 'nengo.Probe', 'nengo.Probe', (['b'], {'synapse': '(0.05)'}), '(b, synapse=0.05)\n', (4943, 4960), False, 'import nengo\n'), ((5360, 5384), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (5373, 5384), False, 'import nengo\n'), ((5407, 5429), 'nengo.Node', 'nengo.Node', ([], {'output': '[0]'}), '(output=[0])\n', (5417, 5429), False, 'import nengo\n'), ((5445, 5466), 'nengo.Ensemble', 'nengo.Ensemble', (['(20)', '(3)'], {}), '(20, 3)\n', (5459, 5466), False, 'import nengo\n'), ((5482, 5503), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (5496, 5503), False, 'import nengo\n'), ((5785, 5833), 'nengo.Connection', 'nengo.Connection', (['err', "conn.learning_rule['pes']"], {}), "(err, conn.learning_rule['pes'])\n", (5801, 5833), False, 'import nengo\n'), ((6112, 6160), 'nengo.Connection', 'nengo.Connection', (['err', "conn.learning_rule['pes']"], {}), "(err, conn.learning_rule['pes'])\n", (6128, 6160), False, 'import nengo\n'), ((6394, 6442), 'nengo.Connection', 'nengo.Connection', (['err', "conn.learning_rule['pes']"], {}), "(err, conn.learning_rule['pes'])\n", (6410, 6442), False, 'import nengo\n'), ((6694, 6718), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (6707, 6718), False, 'import nengo\n'), ((6801, 6823), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (6815, 6823), False, 'import nengo\n'), ((6833, 6872), 'nengo.Connection', 'nengo.Connection', (['stim', 'x'], {'synapse': 'None'}), '(stim, x, synapse=None)\n', (6849, 6872), False, 'import nengo\n'), ((6888, 6935), 'nengo.Connection', 'nengo.Connection', (['x', 'x'], {'learning_rule_type': 'rule'}), '(x, x, learning_rule_type=rule)\n', (6904, 6935), False, 'import nengo\n'), ((6957, 7000), 'nengo.Probe', 'nengo.Probe', (['x.neurons'], {'synapse': 'pre_synapse'}), '(x.neurons, synapse=pre_synapse)\n', (6968, 7000), False, 'import nengo\n'), ((7017, 7062), 'nengo.Probe', 'nengo.Probe', (['conn.learning_rule', '"""activities"""'], {}), "(conn.learning_rule, 'activities')\n", (7028, 7062), False, 'import nengo\n'), ((6544, 6561), 'nengo.synapses.Lowpass', 'Lowpass', ([], {'tau': '(0.05)'}), '(tau=0.05)\n', (6551, 6561), False, 'from nengo.synapses import Alpha, Lowpass\n'), ((6563, 6579), 'nengo.synapses.Alpha', 'Alpha', ([], {'tau': '(0.005)'}), '(tau=0.005)\n', (6568, 6579), False, 'from nengo.synapses import Alpha, Lowpass\n'), ((7395, 7419), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (7408, 7419), False, 'import nengo\n'), ((7442, 7465), 'nengo.Node', 'nengo.Node', ([], {'output': '[-1]'}), '(output=[-1])\n', (7452, 7465), False, 'import nengo\n'), ((7481, 7506), 'nengo.Node', 'nengo.Node', ([], {'output': '[0, 0]'}), '(output=[0, 0])\n', (7491, 7506), False, 'import nengo\n'), ((7522, 7553), 'nengo.Ensemble', 'nengo.Ensemble', (['(50)', '(2)'], {'radius': '(2)'}), '(50, 2, radius=2)\n', (7536, 7553), False, 'import nengo\n'), ((7562, 7590), 'nengo.Connection', 'nengo.Connection', (['stim', 'post'], {}), '(stim, post)\n', (7578, 7590), False, 'import nengo\n'), ((7840, 7881), 'nengo.Connection', 'nengo.Connection', (['err', 'conn.learning_rule'], {}), '(err, conn.learning_rule)\n', (7856, 7881), False, 'import nengo\n'), ((7894, 7926), 'nengo.Probe', 'nengo.Probe', (['post'], {'synapse': '(0.025)'}), '(post, synapse=0.025)\n', (7905, 7926), False, 'import nengo\n'), ((8272, 8287), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (8285, 8287), False, 'import nengo\n'), ((8308, 8329), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (8322, 8329), False, 'import nengo\n'), ((8342, 8363), 'nengo.Node', 'nengo.Node', ([], {'size_in': '(1)'}), '(size_in=1)\n', (8352, 8363), False, 'import nengo\n'), ((8453, 8503), 'nengo.Connection', 'nengo.Connection', (['b', 'c.learning_rule'], {'synapse': 'None'}), '(b, c.learning_rule, synapse=None)\n', (8469, 8503), False, 'import nengo\n'), ((9098, 9129), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': '(2)'}), '(n, dimensions=2)\n', (9112, 9129), False, 'import nengo\n'), ((9142, 9177), 'nengo.Ensemble', 'nengo.Ensemble', (['(n + 1)'], {'dimensions': '(2)'}), '(n + 1, dimensions=2)\n', (9156, 9177), False, 'import nengo\n'), ((9186, 9208), 'nengo.Connection', 'nengo.Connection', (['u', 'a'], {}), '(u, a)\n', (9202, 9208), False, 'import nengo\n'), ((9561, 9575), 'nengo.Probe', 'nengo.Probe', (['u'], {}), '(u)\n', (9572, 9575), False, 'import nengo\n'), ((9596, 9643), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {'sample_every': '(0.01)'}), "(conn, 'weights', sample_every=0.01)\n", (9607, 9643), False, 'import nengo\n'), ((9658, 9686), 'nengo.Probe', 'nengo.Probe', (['a'], {'synapse': '(0.03)'}), '(a, synapse=0.03)\n', (9669, 9686), False, 'import nengo\n'), ((9700, 9728), 'nengo.Probe', 'nengo.Probe', (['b'], {'synapse': '(0.03)'}), '(b, synapse=0.03)\n', (9711, 9728), False, 'import nengo\n'), ((10417, 10432), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (10430, 10432), False, 'import nengo\n'), ((10653, 10675), 'nengo.Node', 'nengo.Node', ([], {'output': '(1.0)'}), '(output=1.0)\n', (10663, 10675), False, 'import nengo\n'), ((10690, 10722), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)'], {'dimensions': '(1)'}), '(10, dimensions=1)\n', (10704, 10722), False, 'import nengo\n'), ((10738, 10770), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)'], {'dimensions': '(1)'}), '(10, dimensions=1)\n', (10752, 10770), False, 'import nengo\n'), ((10873, 10986), 'nengo.Connection', 'nengo.Connection', (['pre.neurons', 'post.neurons'], {'transform': 'initial_weights', 'learning_rule_type': 'learning_rule_type'}), '(pre.neurons, post.neurons, transform=initial_weights,\n learning_rule_type=learning_rule_type)\n', (10889, 10986), False, 'import nengo\n'), ((11249, 11287), 'nengo.Probe', 'nengo.Probe', (['pre.neurons'], {'synapse': '(0.01)'}), '(pre.neurons, synapse=0.01)\n', (11260, 11287), False, 'import nengo\n'), ((11312, 11373), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {'synapse': 'None', 'sample_every': '(0.01)'}), "(conn, 'weights', synapse=None, sample_every=0.01)\n", (11323, 11373), False, 'import nengo\n'), ((11644, 11668), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (11657, 11668), False, 'import nengo\n'), ((12900, 12924), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (12913, 12924), False, 'import nengo\n'), ((13117, 13160), 'numpy.array', 'np.array', (['sim.data[m.activity_p]'], {'copy': '(True)'}), '(sim.data[m.activity_p], copy=True)\n', (13125, 13160), True, 'import numpy as np\n'), ((13187, 13229), 'numpy.array', 'np.array', (['sim.data[m.weights_p]'], {'copy': '(True)'}), '(sim.data[m.weights_p], copy=True)\n', (13195, 13229), True, 'import numpy as np\n'), ((14063, 14105), 'nengo.learning_rules.LearningRuleTypeParam', 'LearningRuleTypeParam', (['"""lrp"""'], {'default': 'None'}), "('lrp', default=None)\n", (14084, 14105), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14227, 14232), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14230, 14232), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14234, 14239), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14237, 14239), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14330, 14355), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14343, 14355), False, 'import pytest\n'), ((14427, 14452), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14440, 14452), False, 'import pytest\n'), ((14716, 14740), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (14729, 14740), False, 'import nengo\n'), ((14814, 14831), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (14821, 14831), True, 'import numpy as np\n'), ((14846, 14851), 'nengo.learning_rules.PES', 'PES', ([], {}), '()\n', (14849, 14851), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14865, 14926), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'learning_rule_type': 'r1'}), '(a.neurons, b.neurons, learning_rule_type=r1)\n', (14881, 14926), False, 'import nengo\n'), ((15014, 15088), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'learning_rule_type': 'r2', 'transform': 'T'}), '(a.neurons, b.neurons, learning_rule_type=r2, transform=T)\n', (15030, 15088), False, 'import nengo\n'), ((15295, 15369), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'learning_rule_type': 'r3', 'transform': 'T'}), '(a.neurons, b.neurons, learning_rule_type=r3, transform=T)\n', (15311, 15369), False, 'import nengo\n'), ((16418, 16451), 'nengo.Node', 'nengo.Node', ([], {'output': 'learned_vector'}), '(output=learned_vector)\n', (16428, 16451), False, 'import nengo\n'), ((16859, 16909), 'nengo.Probe', 'nengo.Probe', (['conn.learning_rule', '"""scaled_encoders"""'], {}), "(conn.learning_rule, 'scaled_encoders')\n", (16870, 16909), False, 'import nengo\n'), ((16930, 16963), 'nengo.Probe', 'nengo.Probe', (['x', '"""scaled_encoders"""'], {}), "(x, 'scaled_encoders')\n", (16941, 16963), False, 'import nengo\n'), ((18548, 18581), 'nengo.Node', 'nengo.Node', ([], {'output': 'control_signal'}), '(output=control_signal)\n', (18558, 18581), False, 'import nengo\n'), ((18594, 18627), 'nengo.Node', 'nengo.Node', ([], {'output': 'learned_vector'}), '(output=learned_vector)\n', (18604, 18627), False, 'import nengo\n'), ((18817, 18876), 'nengo.Connection', 'nengo.Connection', (['control', 'conn.learning_rule'], {'synapse': 'None'}), '(control, conn.learning_rule, synapse=None)\n', (18833, 18876), False, 'import nengo\n'), ((18894, 18944), 'nengo.Probe', 'nengo.Probe', (['conn.learning_rule', '"""scaled_encoders"""'], {}), "(conn.learning_rule, 'scaled_encoders')\n", (18905, 18944), False, 'import nengo\n'), ((19890, 19931), 'pytest.raises', 'pytest.raises', (['(ValueError, RuntimeError)'], {}), '((ValueError, RuntimeError))\n', (19903, 19931), False, 'import pytest\n'), ((20079, 20094), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (20092, 20094), False, 'import nengo\n'), ((20175, 20196), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (20189, 20196), False, 'import nengo\n'), ((20212, 20239), 'nengo.Connection', 'nengo.Connection', (['pre', 'post'], {}), '(pre, post)\n', (20228, 20239), False, 'import nengo\n'), ((20973, 20988), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (20986, 20988), False, 'import nengo\n'), ((21009, 21030), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (21023, 21030), False, 'import nengo\n'), ((21043, 21064), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (21057, 21064), False, 'import nengo\n'), ((21208, 21229), 'nengo.Node', 'nengo.Node', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (21218, 21229), False, 'import nengo\n'), ((21238, 21293), 'nengo.Connection', 'nengo.Connection', (['err', 'conn.learning_rule'], {'synapse': 'None'}), '(err, conn.learning_rule, synapse=None)\n', (21254, 21293), False, 'import nengo\n'), ((21307, 21335), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {}), "(conn, 'weights')\n", (21318, 21335), False, 'import nengo\n'), ((21929, 21961), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (21941, 21961), False, 'import pytest\n'), ((22254, 22278), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (22267, 22278), False, 'import nengo\n'), ((22301, 22322), 'nengo.Ensemble', 'nengo.Ensemble', (['(50)', '(1)'], {}), '(50, 1)\n', (22315, 22322), False, 'import nengo\n'), ((22335, 22356), 'nengo.Ensemble', 'nengo.Ensemble', (['(30)', '(2)'], {}), '(30, 2)\n', (22349, 22356), False, 'import nengo\n'), ((22531, 22575), 'nengo.Node', 'nengo.Node', (['(lambda t, x: x - 0.75)'], {'size_in': '(1)'}), '(lambda t, x: x - 0.75, size_in=1)\n', (22541, 22575), False, 'import nengo\n'), ((22584, 22612), 'nengo.Connection', 'nengo.Connection', (['b[0]', 'err1'], {}), '(b[0], err1)\n', (22600, 22612), False, 'import nengo\n'), ((22621, 22666), 'nengo.Connection', 'nengo.Connection', (['err1', 'conn.learning_rule[0]'], {}), '(err1, conn.learning_rule[0])\n', (22637, 22666), False, 'import nengo\n'), ((22683, 22726), 'nengo.Node', 'nengo.Node', (['(lambda t, x: x + 0.5)'], {'size_in': '(1)'}), '(lambda t, x: x + 0.5, size_in=1)\n', (22693, 22726), False, 'import nengo\n'), ((22735, 22763), 'nengo.Connection', 'nengo.Connection', (['b[1]', 'err2'], {}), '(b[1], err2)\n', (22751, 22763), False, 'import nengo\n'), ((22772, 22817), 'nengo.Connection', 'nengo.Connection', (['err2', 'conn.learning_rule[1]'], {}), '(err2, conn.learning_rule[1])\n', (22788, 22817), False, 'import nengo\n'), ((22831, 22859), 'nengo.Probe', 'nengo.Probe', (['b'], {'synapse': '(0.03)'}), '(b, synapse=0.03)\n', (22842, 22859), False, 'import nengo\n'), ((475, 502), 'numpy.var', 'np.var', (['weight_data'], {'axis': '(0)'}), '(weight_data, axis=0)\n', (481, 502), True, 'import numpy as np\n'), ((1665, 1700), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(True)'}), '(weights=True)\n', (1686, 1700), False, 'import nengo\n'), ((4808, 4829), 'nengo.Node', 'nengo.Node', ([], {'output': '(-1)'}), '(output=-1)\n', (4818, 4829), False, 'import nengo\n'), ((9047, 9072), 'nengo.processes.WhiteSignal', 'WhiteSignal', (['(0.5)'], {'high': '(10)'}), '(0.5, high=10)\n', (9058, 9072), False, 'from nengo.processes import WhiteSignal\n'), ((9434, 9499), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'transform': 'initial_weights'}), '(a.neurons, b.neurons, transform=initial_weights)\n', (9450, 9499), False, 'import nengo\n'), ((8666, 8690), 'nengo.learning_rules.BCM', 'BCM', ([], {'learning_rate': '(1e-08)'}), '(learning_rate=1e-08)\n', (8669, 8690), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8708, 8732), 'nengo.learning_rules.Oja', 'Oja', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (8711, 8732), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((11099, 11131), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)'], {'dimensions': '(1)'}), '(10, dimensions=1)\n', (11113, 11131), False, 'import nengo\n'), ((11144, 11168), 'nengo.Connection', 'nengo.Connection', (['u', 'err'], {}), '(u, err)\n', (11160, 11168), False, 'import nengo\n'), ((11181, 11222), 'nengo.Connection', 'nengo.Connection', (['err', 'conn.learning_rule'], {}), '(err, conn.learning_rule)\n', (11197, 11222), False, 'import nengo\n'), ((14474, 14479), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14477, 14479), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14486, 14491), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14489, 14491), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14761, 14782), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(2)'], {}), '(10, 2)\n', (14775, 14782), False, 'import nengo\n'), ((14987, 14992), 'nengo.learning_rules.PES', 'PES', ([], {}), '()\n', (14990, 14992), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14994, 14999), 'nengo.learning_rules.BCM', 'BCM', ([], {}), '()\n', (14997, 14999), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((16121, 16153), 'nengo.dists.UniformHypersphere', 'UniformHypersphere', ([], {'surface': '(True)'}), '(surface=True)\n', (16139, 16153), False, 'from nengo.dists import UniformHypersphere\n'), ((19204, 19218), 'numpy.where', 'np.where', (['tend'], {}), '(tend)\n', (19212, 19218), True, 'import numpy as np\n'), ((20253, 20283), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (20266, 20283), False, 'import pytest\n'), ((20323, 20334), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (20332, 20334), False, 'import nengo\n'), ((20778, 20811), 'numpy.zeros', 'np.zeros', (['rule.connection.size_in'], {}), '(rule.connection.size_in)\n', (20786, 20811), True, 'import numpy as np\n'), ((20834, 20846), 'nengo.builder.operator.Reset', 'Reset', (['error'], {}), '(error)\n', (20839, 20846), False, 'from nengo.builder.operator import Reset, Copy\n'), ((20924, 20961), 'nengo.builder.operator.Copy', 'Copy', (['error', "model.sig[rule]['delta']"], {}), "(error, model.sig[rule]['delta'])\n", (20928, 20961), False, 'from nengo.builder.operator import Reset, Copy\n'), ((21450, 21465), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (21459, 21465), True, 'import numpy as np\n'), ((21467, 21482), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (21476, 21482), True, 'import numpy as np\n'), ((22495, 22510), 'nengo.Node', 'nengo.Node', (['(1.0)'], {}), '(1.0)\n', (22505, 22510), False, 'import nengo\n'), ((1592, 1601), 'nengo.learning_rules.PES', 'PES', (['rate'], {}), '(rate)\n', (1595, 1601), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((4591, 4621), 'nengo.PES', 'nengo.PES', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (4600, 4621), False, 'import nengo\n'), ((5640, 5655), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (5647, 5655), True, 'import numpy as np\n'), ((5676, 5712), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(False)'}), '(weights=False)\n', (5697, 5712), False, 'import nengo\n'), ((5968, 5983), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (5975, 5983), True, 'import numpy as np\n'), ((6004, 6039), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(True)'}), '(weights=True)\n', (6025, 6039), False, 'import nengo\n'), ((6293, 6321), 'numpy.ones', 'np.ones', (['(1, ens1.n_neurons)'], {}), '((1, ens1.n_neurons))\n', (6300, 6321), True, 'import numpy as np\n'), ((6762, 6787), 'nengo.processes.WhiteSignal', 'WhiteSignal', (['(0.5)'], {'high': '(10)'}), '(0.5, high=10)\n', (6773, 6787), False, 'from nengo.processes import WhiteSignal\n'), ((7719, 7757), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': 'weights'}), '(weights=weights)\n', (7740, 7757), False, 'import nengo\n'), ((7790, 7821), 'nengo.PES', 'nengo.PES', ([], {'learning_rate': '(0.0005)'}), '(learning_rate=0.0005)\n', (7799, 7821), False, 'import nengo\n'), ((8432, 8443), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (8441, 8443), False, 'import nengo\n'), ((8751, 8775), 'nengo.learning_rules.Oja', 'Oja', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (8754, 8775), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8776, 8800), 'nengo.learning_rules.BCM', 'BCM', ([], {'learning_rate': '(1e-08)'}), '(learning_rate=1e-08)\n', (8779, 8800), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8820, 8844), 'nengo.learning_rules.Oja', 'Oja', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (8823, 8844), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8845, 8869), 'nengo.learning_rules.BCM', 'BCM', ([], {'learning_rate': '(1e-08)'}), '(learning_rate=1e-08)\n', (8848, 8869), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((15264, 15269), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (15267, 15269), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((15275, 15280), 'nengo.learning_rules.BCM', 'BCM', ([], {}), '()\n', (15278, 15280), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((16627, 16660), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(300.0)', '(400.0)'], {}), '(300.0, 400.0)\n', (16646, 16660), False, 'import nengo\n'), ((16808, 16831), 'nengo.learning_rules.Voja', 'Voja', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (16812, 16831), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((18775, 18798), 'nengo.learning_rules.Voja', 'Voja', ([], {'post_synapse': 'None'}), '(post_synapse=None)\n', (18779, 18798), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((20144, 20158), 'nengo.Direct', 'nengo.Direct', ([], {}), '()\n', (20156, 20158), False, 'import nengo\n'), ((21134, 21151), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (21142, 21151), True, 'import numpy as np\n'), ((22187, 22197), 'nengo.synapses.Lowpass', 'Lowpass', (['i'], {}), '(i)\n', (22194, 22197), False, 'from nengo.synapses import Alpha, Lowpass\n'), ((22427, 22432), 'nengo.learning_rules.PES', 'PES', ([], {}), '()\n', (22430, 22432), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((5753, 5764), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (5762, 5764), False, 'import nengo\n'), ((6080, 6091), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (6089, 6091), False, 'import nengo\n'), ((6362, 6373), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (6371, 6373), False, 'import nengo\n'), ((9278, 9313), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(True)'}), '(weights=True)\n', (9299, 9313), False, 'import nengo\n')]
|
# coding: utf-8
"""
joplin-web
"""
from django.conf import settings
from django.http.response import JsonResponse
from django.urls import reverse
from joplin_api import JoplinApiSync
from joplin_web.utils import nb_notes_by_tag, nb_notes_by_folder
import logging
from rich import console
console = console.Console()
logger = logging.getLogger("joplin_web.app")
joplin = JoplinApiSync(token=settings.JOPLIN_WEBCLIPPER_TOKEN)
def get_folders(request):
"""
all the folders
:param request
:return: json
"""
res = joplin.get_folders()
json_data = sorted(res.json(), key=lambda k: k['title'])
data = nb_notes_by_folder(json_data)
logger.debug(data)
return JsonResponse(data, safe=False)
def get_tags(request):
res = joplin.get_tags()
json_data = sorted(res.json(), key=lambda k: k['title'])
data = nb_notes_by_tag(json_data)
return JsonResponse(data, safe=False)
|
[
"logging.getLogger",
"joplin_api.JoplinApiSync",
"joplin_web.utils.nb_notes_by_tag",
"rich.console.Console",
"joplin_web.utils.nb_notes_by_folder",
"django.http.response.JsonResponse"
] |
[((301, 318), 'rich.console.Console', 'console.Console', ([], {}), '()\n', (316, 318), False, 'from rich import console\n'), ((329, 364), 'logging.getLogger', 'logging.getLogger', (['"""joplin_web.app"""'], {}), "('joplin_web.app')\n", (346, 364), False, 'import logging\n'), ((375, 428), 'joplin_api.JoplinApiSync', 'JoplinApiSync', ([], {'token': 'settings.JOPLIN_WEBCLIPPER_TOKEN'}), '(token=settings.JOPLIN_WEBCLIPPER_TOKEN)\n', (388, 428), False, 'from joplin_api import JoplinApiSync\n'), ((633, 662), 'joplin_web.utils.nb_notes_by_folder', 'nb_notes_by_folder', (['json_data'], {}), '(json_data)\n', (651, 662), False, 'from joplin_web.utils import nb_notes_by_tag, nb_notes_by_folder\n'), ((697, 727), 'django.http.response.JsonResponse', 'JsonResponse', (['data'], {'safe': '(False)'}), '(data, safe=False)\n', (709, 727), False, 'from django.http.response import JsonResponse\n'), ((853, 879), 'joplin_web.utils.nb_notes_by_tag', 'nb_notes_by_tag', (['json_data'], {}), '(json_data)\n', (868, 879), False, 'from joplin_web.utils import nb_notes_by_tag, nb_notes_by_folder\n'), ((891, 921), 'django.http.response.JsonResponse', 'JsonResponse', (['data'], {'safe': '(False)'}), '(data, safe=False)\n', (903, 921), False, 'from django.http.response import JsonResponse\n')]
|
import numpy as np
from django.core.management.base import BaseCommand
from oscar.core.loading import get_classes
StatsSpe, StatsItem, Test, Speciality, Item, Conference = get_classes(
'confs.models',
(
"StatsSpe", "StatsItem", "Test", "Speciality", "Item", "Conference"
)
)
class Command(BaseCommand):
help = 'Evaluate new stats for all specialies and items'
def handle(self, *args, **options):
for spe in Speciality.objects.all():
stats = StatsSpe.objects.get_or_create(speciality=spe)[0]
l = [
test.score for test
in Test.objects.filter(conf__specialities__in=[spe], finished=True).all()
]
l = l if l != [] else [0]
stats.average = np.mean(l)
stats.median = np.median(l)
stats.std_dev = np.std(l)
stats.save()
for item in Item.objects.all():
stats = StatsItem.objects.get_or_create(item=item)[0]
l = [
test.score for test
in Test.objects.filter(conf__items__in=[item], finished=True).all()
]
l = l if l != [] else [0]
stats.average = np.mean(l)
stats.median = np.median(l)
stats.std_dev = np.std(l)
stats.save()
for conf in Conference.objects.filter(tests__isnull=False, for_sale=True).distinct():
conf.update_stats()
|
[
"numpy.mean",
"numpy.median",
"oscar.core.loading.get_classes",
"numpy.std"
] |
[((175, 277), 'oscar.core.loading.get_classes', 'get_classes', (['"""confs.models"""', "('StatsSpe', 'StatsItem', 'Test', 'Speciality', 'Item', 'Conference')"], {}), "('confs.models', ('StatsSpe', 'StatsItem', 'Test', 'Speciality',\n 'Item', 'Conference'))\n", (186, 277), False, 'from oscar.core.loading import get_classes\n'), ((769, 779), 'numpy.mean', 'np.mean', (['l'], {}), '(l)\n', (776, 779), True, 'import numpy as np\n'), ((807, 819), 'numpy.median', 'np.median', (['l'], {}), '(l)\n', (816, 819), True, 'import numpy as np\n'), ((848, 857), 'numpy.std', 'np.std', (['l'], {}), '(l)\n', (854, 857), True, 'import numpy as np\n'), ((1208, 1218), 'numpy.mean', 'np.mean', (['l'], {}), '(l)\n', (1215, 1218), True, 'import numpy as np\n'), ((1246, 1258), 'numpy.median', 'np.median', (['l'], {}), '(l)\n', (1255, 1258), True, 'import numpy as np\n'), ((1287, 1296), 'numpy.std', 'np.std', (['l'], {}), '(l)\n', (1293, 1296), True, 'import numpy as np\n')]
|
from aiohttp import web
from aiohttp import WSMsgType
from Settings import log
class WebSocket(web.View):
async def get(self):
ws = web.WebSocketResponse()
await ws.prepare(self.request)
self.request.app['websockets'].append(ws)
async for msg in ws:
if msg.type == WSMsgType.text:
if msg.data == 'close':
await ws.close()
elif msg == WSMsgType.error:
log.debug('ws connection closed with exception %s' % ws.exception())
self.request.app['websockets'].remove(ws)
|
[
"aiohttp.web.WebSocketResponse"
] |
[((147, 170), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (168, 170), False, 'from aiohttp import web\n')]
|
import json
import os
def qald(in_folder, out_folder):
train = json.load(open(os.path.join(in_folder, "qald-7-train-en-wikidata.json")))
test = json.load(open(os.path.join(in_folder, "qald-7-test-en-wikidata-withoutanswers.json")))
train_q = []
test_q = []
for qs in train["questions"]:
for q in qs["question"]:
train_q.append(q["string"])
split_idx = int(len(train_q)*0.75)
dev_q = train_q[split_idx:]
train_q = train_q[:split_idx]
for qs in test["questions"]:
for q in qs["question"]:
test_q.append(q["string"])
for qs, split in zip([train_q, dev_q, test_q], ["train", "dev", "test"]):
os.makedirs(os.path.join(out_folder, split), exist_ok=True)
with open(os.path.join(out_folder, split, "qald-7.txt"), "w", encoding="utf-8") as f:
for q in qs:
f.write(q+"\n")
def websqp(in_folder, out_folder):
train = json.load(open(os.path.join(in_folder, "WebQSP.train.json"), encoding="utf-8"))
test = json.load(open(os.path.join(in_folder, "WebQSP.test.json"), encoding="utf-8"))
train_q = []
test_q = []
for q in train["Questions"]:
train_q.append(q["RawQuestion"])
split_idx = int(len(train_q)*0.75)
dev_q = train_q[split_idx:]
train_q = train_q[:split_idx]
for q in test["Questions"]:
test_q.append(q["RawQuestion"])
for qs, split in zip([train_q, dev_q, test_q], ["train", "dev", "test"]):
os.makedirs(os.path.join(out_folder, split), exist_ok=True)
with open(os.path.join(out_folder, split, "webqsp.txt"), "w", encoding="utf-8") as f:
for q in qs:
f.write(q+"\n")
if __name__ == "__main__":
qald(r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa\qald", r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa")
websqp(r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa\WebQSP\data", r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa")
|
[
"os.path.join"
] |
[((83, 139), 'os.path.join', 'os.path.join', (['in_folder', '"""qald-7-train-en-wikidata.json"""'], {}), "(in_folder, 'qald-7-train-en-wikidata.json')\n", (95, 139), False, 'import os\n'), ((168, 238), 'os.path.join', 'os.path.join', (['in_folder', '"""qald-7-test-en-wikidata-withoutanswers.json"""'], {}), "(in_folder, 'qald-7-test-en-wikidata-withoutanswers.json')\n", (180, 238), False, 'import os\n'), ((693, 724), 'os.path.join', 'os.path.join', (['out_folder', 'split'], {}), '(out_folder, split)\n', (705, 724), False, 'import os\n'), ((955, 999), 'os.path.join', 'os.path.join', (['in_folder', '"""WebQSP.train.json"""'], {}), "(in_folder, 'WebQSP.train.json')\n", (967, 999), False, 'import os\n'), ((1046, 1089), 'os.path.join', 'os.path.join', (['in_folder', '"""WebQSP.test.json"""'], {}), "(in_folder, 'WebQSP.test.json')\n", (1058, 1089), False, 'import os\n'), ((1496, 1527), 'os.path.join', 'os.path.join', (['out_folder', 'split'], {}), '(out_folder, split)\n', (1508, 1527), False, 'import os\n'), ((759, 804), 'os.path.join', 'os.path.join', (['out_folder', 'split', '"""qald-7.txt"""'], {}), "(out_folder, split, 'qald-7.txt')\n", (771, 804), False, 'import os\n'), ((1562, 1607), 'os.path.join', 'os.path.join', (['out_folder', 'split', '"""webqsp.txt"""'], {}), "(out_folder, split, 'webqsp.txt')\n", (1574, 1607), False, 'import os\n')]
|
""" Test utility functionality."""
import datetime
import decimal
import json
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from mock import patch
from ..utils import JSONSerializable, DatetimeDecimalEncoder
class TestJSONSerializable(unittest.TestCase):
""" Test JSONSerializable functionality."""
def setUp(self):
class A(JSONSerializable):
@property
def json(self):
pass
self._class = A
def test_abstract_class(self):
with self.assertRaises(TypeError):
JSONSerializable()
self._class()
def test_definse_serialize_deserialize(self):
""" Test classmethods of inherited class."""
self.assertEqual(self._class.serialize({}), "{}")
self.assertEqual(self._class.deserialize("{}"), {})
def test_from_json(self):
self.assertTrue(isinstance(self._class.from_json('{}'), self._class))
def test_from_json_incorrect(self):
with self.assertRaises(ValueError):
self._class.from_json('[]')
class TestDatetimeDecimalEncoder(unittest.TestCase):
""" Test DatetimeDecimalEncoder functionality."""
def test_date_encoder(self):
obj = datetime.date.today()
with self.assertRaises(TypeError):
json.dumps(obj)
self.assertEqual(
json.dumps(obj, cls=DatetimeDecimalEncoder),
'"{0}"'.format(obj.isoformat()),
)
def test_datetime_encoder(self):
obj = datetime.datetime.now()
with self.assertRaises(TypeError):
json.dumps(obj)
self.assertEqual(
json.dumps(obj, cls=DatetimeDecimalEncoder),
'"{0}"'.format(obj.isoformat()),
)
def test_decimal_encoder(self):
obj = decimal.Decimal('0.1')
with self.assertRaises(TypeError):
json.dumps(obj)
result = json.dumps(obj, cls=DatetimeDecimalEncoder)
self.assertTrue(isinstance(result, str))
self.assertEqual(float(result), float(0.1))
def test_default(self):
encoder = DatetimeDecimalEncoder()
with patch.object(json.JSONEncoder, 'default') as json_default:
encoder.default("")
self.assertEqual(json_default.call_count, 1)
|
[
"json.dumps",
"mock.patch.object",
"datetime.datetime.now",
"datetime.date.today",
"decimal.Decimal"
] |
[((1262, 1283), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1281, 1283), False, 'import datetime\n'), ((1547, 1570), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1568, 1570), False, 'import datetime\n'), ((1833, 1855), 'decimal.Decimal', 'decimal.Decimal', (['"""0.1"""'], {}), "('0.1')\n", (1848, 1855), False, 'import decimal\n'), ((1946, 1989), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'DatetimeDecimalEncoder'}), '(obj, cls=DatetimeDecimalEncoder)\n', (1956, 1989), False, 'import json\n'), ((1340, 1355), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (1350, 1355), False, 'import json\n'), ((1395, 1438), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'DatetimeDecimalEncoder'}), '(obj, cls=DatetimeDecimalEncoder)\n', (1405, 1438), False, 'import json\n'), ((1627, 1642), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (1637, 1642), False, 'import json\n'), ((1682, 1725), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'DatetimeDecimalEncoder'}), '(obj, cls=DatetimeDecimalEncoder)\n', (1692, 1725), False, 'import json\n'), ((1912, 1927), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (1922, 1927), False, 'import json\n'), ((2176, 2217), 'mock.patch.object', 'patch.object', (['json.JSONEncoder', '"""default"""'], {}), "(json.JSONEncoder, 'default')\n", (2188, 2217), False, 'from mock import patch\n')]
|
from __future__ import annotations
from coredis.response.callbacks import ResponseCallback
from coredis.response.types import LibraryDefinition
from coredis.response.utils import flat_pairs_to_dict
from coredis.typing import Any, AnyStr, Mapping, Union
from coredis.utils import EncodingInsensitiveDict
class FunctionListCallback(ResponseCallback):
def transform(
self, response: Any, **options: Any
) -> Mapping[str, LibraryDefinition]:
libraries = [
EncodingInsensitiveDict(flat_pairs_to_dict(library)) for library in response
]
transformed = EncodingInsensitiveDict()
for library in libraries:
lib_name = library["library_name"]
functions = EncodingInsensitiveDict({})
for function in library.get("functions", []):
function_definition = EncodingInsensitiveDict(
flat_pairs_to_dict(function)
)
functions[function_definition["name"]] = function_definition
functions[function_definition["name"]]["flags"] = set(
function_definition["flags"]
)
library["functions"] = functions
transformed[lib_name] = EncodingInsensitiveDict( # type: ignore
LibraryDefinition(
name=library["name"],
engine=library["engine"],
description=library["description"],
functions=library["functions"],
library_code=library["library_code"],
)
)
return transformed
class FunctionStatsCallback(ResponseCallback):
def transform(
self, response: Any, **options: Any
) -> Mapping[AnyStr, Union[AnyStr, Mapping]]:
transformed = flat_pairs_to_dict(response)
key = b"engines" if b"engines" in transformed else "engines"
engines = flat_pairs_to_dict(transformed.pop(key))
for engine, stats in engines.items():
transformed.setdefault(key, {})[engine] = flat_pairs_to_dict(stats)
return transformed
|
[
"coredis.utils.EncodingInsensitiveDict",
"coredis.response.types.LibraryDefinition",
"coredis.response.utils.flat_pairs_to_dict"
] |
[((600, 625), 'coredis.utils.EncodingInsensitiveDict', 'EncodingInsensitiveDict', ([], {}), '()\n', (623, 625), False, 'from coredis.utils import EncodingInsensitiveDict\n'), ((1816, 1844), 'coredis.response.utils.flat_pairs_to_dict', 'flat_pairs_to_dict', (['response'], {}), '(response)\n', (1834, 1844), False, 'from coredis.response.utils import flat_pairs_to_dict\n'), ((731, 758), 'coredis.utils.EncodingInsensitiveDict', 'EncodingInsensitiveDict', (['{}'], {}), '({})\n', (754, 758), False, 'from coredis.utils import EncodingInsensitiveDict\n'), ((2073, 2098), 'coredis.response.utils.flat_pairs_to_dict', 'flat_pairs_to_dict', (['stats'], {}), '(stats)\n', (2091, 2098), False, 'from coredis.response.utils import flat_pairs_to_dict\n'), ((515, 542), 'coredis.response.utils.flat_pairs_to_dict', 'flat_pairs_to_dict', (['library'], {}), '(library)\n', (533, 542), False, 'from coredis.response.utils import flat_pairs_to_dict\n'), ((1300, 1479), 'coredis.response.types.LibraryDefinition', 'LibraryDefinition', ([], {'name': "library['name']", 'engine': "library['engine']", 'description': "library['description']", 'functions': "library['functions']", 'library_code': "library['library_code']"}), "(name=library['name'], engine=library['engine'],\n description=library['description'], functions=library['functions'],\n library_code=library['library_code'])\n", (1317, 1479), False, 'from coredis.response.types import LibraryDefinition\n'), ((900, 928), 'coredis.response.utils.flat_pairs_to_dict', 'flat_pairs_to_dict', (['function'], {}), '(function)\n', (918, 928), False, 'from coredis.response.utils import flat_pairs_to_dict\n')]
|
from models.tilemap import TileMap
class EditorController:
def __init__(self, view):
self.view = view
self.tilemap = TileMap()
def place_tile(self, coord, ttype):
self.tilemap.add_tile(coord, ttype)
self.view.board.update_tiles({coord: ttype})
def place_spawn(self, coord):
self.tilemap.add_spawn(coord)
self.view.board.update_spawns({coord: 'None'})
def get_tiles(self):
layers = self.tilemap.layers
tiles = layers['ground'].copy()
tiles.update(layers['util'])
tiles.update(layers['powerup'])
tiles.update(layers['wall'])
return tiles
def save(self):
self.tilemap.save()
def load(self, map_path):
self.tilemap.load(map_path)
self.view.board.update_tiles(self.get_tiles())
self.view.board.update_spawns(self.tilemap.spawns)
|
[
"models.tilemap.TileMap"
] |
[((141, 150), 'models.tilemap.TileMap', 'TileMap', ([], {}), '()\n', (148, 150), False, 'from models.tilemap import TileMap\n')]
|
# -*- coding: utf-8 -*-
import pandas as pd
import pytest
from bio_hansel.qc import QC
from bio_hansel.subtype import Subtype
from bio_hansel.subtype_stats import SubtypeCounts
from bio_hansel.subtyper import absent_downstream_subtypes, sorted_subtype_ints, empty_results, \
get_missing_internal_subtypes
from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases
def test_absent_downstream_subtypes():
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1.1', '1.2', '1.3', '1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) is None
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1.1', '1.2', '1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) == ['1.3']
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) == ['1.1', '1.2', '1.3']
def test_sorted_subtype_ints():
assert sorted_subtype_ints(pd.Series([], dtype=object)) == []
exp_subtype_ints = [
[1],
[1, 1],
[1, 1, 1],
[1, 1, 1, 99]
]
assert sorted_subtype_ints(pd.Series(['1', '1.1', '1.1.1', '1.1.1.99'])) == exp_subtype_ints
series = pd.Series(['1', '1.1', '1.1.1', '1.1.1.99', '1.1', '1.1.1'])
assert sorted_subtype_ints(series) == exp_subtype_ints
def test_empty_results():
st = Subtype(sample='test',
file_path='tests/data/Retro1000data/10-1358.fastq',
scheme='enteritidis',
scheme_version='1.0.5',
subtype=None,
non_present_subtypes=None,
all_subtypes=None,
qc_status=QC.FAIL,
qc_message=QC.NO_TARGETS_FOUND)
df_empty = empty_results(st)
df_expected_empty = pd.DataFrame(
{
0: dict(
sample='test',
file_path='tests/data/Retro1000data/10-1358.fastq',
subtype=None,
refposition=None,
is_pos_kmer=None,
scheme='enteritidis',
scheme_version='1.0.5',
qc_status=QC.FAIL,
qc_message=QC.NO_TARGETS_FOUND)}).transpose()
assert ((df_empty == df_expected_empty) | (df_empty.isnull() == df_expected_empty.isnull())).values.all(), \
f'Empty result DataFrame should equal df_expected_empty: {df_expected_empty}'
def test_find_inconsistent_subtypes():
subtype_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1', ]
consistent_subtypes = sorted_subtype_ints(pd.Series(subtype_list))
assert find_inconsistent_subtypes(consistent_subtypes) == [], \
'Expecting all subtypes to be consistent with each other'
subtype_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1',
'1.1.1.2',
'1.1.1.3', ]
inconsistent_subtypes = sorted_subtype_ints(pd.Series(subtype_list))
exp_incon_subtypes = ['1.1.1.1',
'1.1.1.2',
'1.1.1.3', ]
assert find_inconsistent_subtypes(inconsistent_subtypes) == exp_incon_subtypes, \
f'Expecting subtypes {exp_incon_subtypes} to be inconsistent with each other'
subtypes_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1',
'1.1.1.2',
'1.1.1.3',
'1.1.2',
'2', ]
inconsistent_subtypes = sorted_subtype_ints(pd.Series(subtypes_list))
assert set(find_inconsistent_subtypes(inconsistent_subtypes)) == set(subtypes_list), \
f'All subtypes should be inconsistent with each other in {subtypes_list}'
def test_subtype_regex():
good_values = ['1.1.1.1', '10', '192.168.3.11', '172.16.58.3.1.12.4', ]
for good_value in good_values:
assert SubtypeCounts._check_subtype(None, None, good_value) == good_value
bad_values = [
'1..',
'1..1',
'1.1..1.1',
'1....',
'100.',
'',
' ',
'a1.1.1',
'1.11.1a',
'a',
'not.a.valid.subtype',
'B.1.1.7'
]
for bad_value in bad_values:
with pytest.raises(ValueError):
assert SubtypeCounts._check_subtype(None, None, bad_value) == ''
def test_get_missing_internal_subtypes():
st_vals = ['1', '1', '1', '1']
pos_subtypes_set = {
'1',
'1.1',
'1.1.1',
'1.1.1.1'
}
exp_missing_internal_subtypes = set()
assert get_missing_internal_subtypes(st_vals, pos_subtypes_set) == exp_missing_internal_subtypes
st_vals = ['2', '22', '222', '2222', '22222']
pos_subtypes_set = {'2', '2.22.222.2222.22222'}
exp_missing_internal_subtypes = {
'2.22',
'2.22.222',
'2.22.222.2222'
}
assert get_missing_internal_subtypes(st_vals, pos_subtypes_set) == exp_missing_internal_subtypes
def test_expand_degenerate_bases():
assert len(expand_degenerate_bases('NNNNN')) == 1024
with open('tests/data/expand_degenerate_bases_DARTHVADR.txt') as f:
assert expand_degenerate_bases('DARTHVADR') == f.read().split('\n')
|
[
"pandas.Series",
"bio_hansel.subtyper.empty_results",
"bio_hansel.utils.expand_degenerate_bases",
"bio_hansel.subtyper.sorted_subtype_ints",
"pytest.raises",
"bio_hansel.subtype_stats.SubtypeCounts._check_subtype",
"bio_hansel.subtype.Subtype",
"bio_hansel.utils.find_inconsistent_subtypes",
"bio_hansel.subtyper.get_missing_internal_subtypes"
] |
[((1417, 1477), 'pandas.Series', 'pd.Series', (["['1', '1.1', '1.1.1', '1.1.1.99', '1.1', '1.1.1']"], {}), "(['1', '1.1', '1.1.1', '1.1.1.99', '1.1', '1.1.1'])\n", (1426, 1477), True, 'import pandas as pd\n'), ((1574, 1817), 'bio_hansel.subtype.Subtype', 'Subtype', ([], {'sample': '"""test"""', 'file_path': '"""tests/data/Retro1000data/10-1358.fastq"""', 'scheme': '"""enteritidis"""', 'scheme_version': '"""1.0.5"""', 'subtype': 'None', 'non_present_subtypes': 'None', 'all_subtypes': 'None', 'qc_status': 'QC.FAIL', 'qc_message': 'QC.NO_TARGETS_FOUND'}), "(sample='test', file_path='tests/data/Retro1000data/10-1358.fastq',\n scheme='enteritidis', scheme_version='1.0.5', subtype=None,\n non_present_subtypes=None, all_subtypes=None, qc_status=QC.FAIL,\n qc_message=QC.NO_TARGETS_FOUND)\n", (1581, 1817), False, 'from bio_hansel.subtype import Subtype\n'), ((1957, 1974), 'bio_hansel.subtyper.empty_results', 'empty_results', (['st'], {}), '(st)\n', (1970, 1974), False, 'from bio_hansel.subtyper import absent_downstream_subtypes, sorted_subtype_ints, empty_results, get_missing_internal_subtypes\n'), ((1489, 1516), 'bio_hansel.subtyper.sorted_subtype_ints', 'sorted_subtype_ints', (['series'], {}), '(series)\n', (1508, 1516), False, 'from bio_hansel.subtyper import absent_downstream_subtypes, sorted_subtype_ints, empty_results, get_missing_internal_subtypes\n'), ((2816, 2839), 'pandas.Series', 'pd.Series', (['subtype_list'], {}), '(subtype_list)\n', (2825, 2839), True, 'import pandas as pd\n'), ((2853, 2900), 'bio_hansel.utils.find_inconsistent_subtypes', 'find_inconsistent_subtypes', (['consistent_subtypes'], {}), '(consistent_subtypes)\n', (2879, 2900), False, 'from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases\n'), ((3201, 3224), 'pandas.Series', 'pd.Series', (['subtype_list'], {}), '(subtype_list)\n', (3210, 3224), True, 'import pandas as pd\n'), ((3351, 3400), 'bio_hansel.utils.find_inconsistent_subtypes', 'find_inconsistent_subtypes', (['inconsistent_subtypes'], {}), '(inconsistent_subtypes)\n', (3377, 3400), False, 'from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases\n'), ((3800, 3824), 'pandas.Series', 'pd.Series', (['subtypes_list'], {}), '(subtypes_list)\n', (3809, 3824), True, 'import pandas as pd\n'), ((4830, 4886), 'bio_hansel.subtyper.get_missing_internal_subtypes', 'get_missing_internal_subtypes', (['st_vals', 'pos_subtypes_set'], {}), '(st_vals, pos_subtypes_set)\n', (4859, 4886), False, 'from bio_hansel.subtyper import absent_downstream_subtypes, sorted_subtype_ints, empty_results, get_missing_internal_subtypes\n'), ((5137, 5193), 'bio_hansel.subtyper.get_missing_internal_subtypes', 'get_missing_internal_subtypes', (['st_vals', 'pos_subtypes_set'], {}), '(st_vals, pos_subtypes_set)\n', (5166, 5193), False, 'from bio_hansel.subtyper import absent_downstream_subtypes, sorted_subtype_ints, empty_results, get_missing_internal_subtypes\n'), ((1171, 1198), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': 'object'}), '([], dtype=object)\n', (1180, 1198), True, 'import pandas as pd\n'), ((1338, 1382), 'pandas.Series', 'pd.Series', (["['1', '1.1', '1.1.1', '1.1.1.99']"], {}), "(['1', '1.1', '1.1.1', '1.1.1.99'])\n", (1347, 1382), True, 'import pandas as pd\n'), ((3841, 3890), 'bio_hansel.utils.find_inconsistent_subtypes', 'find_inconsistent_subtypes', (['inconsistent_subtypes'], {}), '(inconsistent_subtypes)\n', (3867, 3890), False, 'from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases\n'), ((4153, 4205), 'bio_hansel.subtype_stats.SubtypeCounts._check_subtype', 'SubtypeCounts._check_subtype', (['None', 'None', 'good_value'], {}), '(None, None, good_value)\n', (4181, 4205), False, 'from bio_hansel.subtype_stats import SubtypeCounts\n'), ((4500, 4525), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4513, 4525), False, 'import pytest\n'), ((5280, 5312), 'bio_hansel.utils.expand_degenerate_bases', 'expand_degenerate_bases', (['"""NNNNN"""'], {}), "('NNNNN')\n", (5303, 5312), False, 'from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases\n'), ((5409, 5445), 'bio_hansel.utils.expand_degenerate_bases', 'expand_degenerate_bases', (['"""DARTHVADR"""'], {}), "('DARTHVADR')\n", (5432, 5445), False, 'from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases\n'), ((531, 568), 'pandas.Series', 'pd.Series', (["['1.1', '1.2', '1.3', '1']"], {}), "(['1.1', '1.2', '1.3', '1'])\n", (540, 568), True, 'import pandas as pd\n'), ((758, 788), 'pandas.Series', 'pd.Series', (["['1.1', '1.2', '1']"], {}), "(['1.1', '1.2', '1'])\n", (767, 788), True, 'import pandas as pd\n'), ((981, 997), 'pandas.Series', 'pd.Series', (["['1']"], {}), "(['1'])\n", (990, 997), True, 'import pandas as pd\n'), ((4546, 4597), 'bio_hansel.subtype_stats.SubtypeCounts._check_subtype', 'SubtypeCounts._check_subtype', (['None', 'None', 'bad_value'], {}), '(None, None, bad_value)\n', (4574, 4597), False, 'from bio_hansel.subtype_stats import SubtypeCounts\n')]
|
import gzip
import json
import pickle
from collections import defaultdict
from pathlib import Path
from zipfile import ZipFile
from tqdm import tqdm
from capreolus import ConfigOption, Dependency, constants
from capreolus.utils.common import download_file, remove_newline
from capreolus.utils.loginit import get_logger
from capreolus.utils.trec import topic_to_trectxt
from . import Benchmark
logger = get_logger(__name__)
PACKAGE_PATH = constants["PACKAGE_PATH"]
@Benchmark.register
class CodeSearchNetCorpus(Benchmark):
"""CodeSearchNet Corpus. [1]
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. 2019. CodeSearchNet Challenge: Evaluating the State of Semantic Code Search. arXiv 2019.
"""
module_name = "codesearchnet_corpus"
dependencies = [Dependency(key="collection", module="collection", name="codesearchnet")]
url = "https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2"
query_type = "title"
file_fn = PACKAGE_PATH / "data" / "csn_corpus"
qrel_dir = file_fn / "qrels"
topic_dir = file_fn / "topics"
fold_dir = file_fn / "folds"
qidmap_dir = file_fn / "qidmap"
docidmap_dir = file_fn / "docidmap"
config_spec = [ConfigOption("lang", "ruby", "CSN language dataset to use")]
def build(self):
lang = self.config["lang"]
self.qid_map_file = self.qidmap_dir / f"{lang}.json"
self.docid_map_file = self.docidmap_dir / f"{lang}.json"
self.qrel_file = self.qrel_dir / f"{lang}.txt"
self.topic_file = self.topic_dir / f"{lang}.txt"
self.fold_file = self.fold_dir / f"{lang}.json"
for file in [var for var in vars(self) if var.endswith("file")]:
getattr(self, file).parent.mkdir(exist_ok=True, parents=True)
self.download_if_missing()
@property
def qid_map(self):
if not hasattr(self, "_qid_map"):
if not self.qid_map_file.exists():
self.download_if_missing()
self._qid_map = json.load(open(self.qid_map_file, "r"))
return self._qid_map
@property
def docid_map(self):
if not hasattr(self, "_docid_map"):
if not self.docid_map_file.exists():
self.download_if_missing()
self._docid_map = json.load(open(self.docid_map_file, "r"))
return self._docid_map
def download_if_missing(self):
files = [self.qid_map_file, self.docid_map_file, self.qrel_file, self.topic_file, self.fold_file]
if all([f.exists() for f in files]):
return
lang = self.config["lang"]
tmp_dir = Path("/tmp")
zip_fn = tmp_dir / f"{lang}.zip"
if not zip_fn.exists():
download_file(f"{self.url}/{lang}.zip", zip_fn)
with ZipFile(zip_fn, "r") as zipobj:
zipobj.extractall(tmp_dir)
# prepare docid-url mapping from dedup.pkl
pkl_fn = tmp_dir / f"{lang}_dedupe_definitions_v2.pkl"
doc_objs = pickle.load(open(pkl_fn, "rb"))
self._docid_map = self._prep_docid_map(doc_objs)
assert self._get_n_docid() == len(doc_objs)
# prepare folds, qrels, topics, docstring2qid # TODO: shall we add negative samples?
qrels, self._qid_map = defaultdict(dict), {}
qids = {s: [] for s in ["train", "valid", "test"]}
topic_file = open(self.topic_file, "w", encoding="utf-8")
qrel_file = open(self.qrel_file, "w", encoding="utf-8")
def gen_doc_from_gzdir(dir):
""" generate parsed dict-format doc from all jsonl.gz files under given directory """
for fn in sorted(dir.glob("*.jsonl.gz")):
f = gzip.open(fn, "rb")
for doc in f:
yield json.loads(doc)
for set_name in qids:
set_path = tmp_dir / lang / "final" / "jsonl" / set_name
for doc in gen_doc_from_gzdir(set_path):
code = remove_newline(" ".join(doc["code_tokens"]))
docstring = remove_newline(" ".join(doc["docstring_tokens"]))
n_words_in_docstring = len(docstring.split())
if n_words_in_docstring >= 1024:
logger.warning(
f"chunk query to first 1000 words otherwise TooManyClause would be triggered "
f"at lucene at search stage, "
)
docstring = " ".join(docstring.split()[:1020]) # for TooManyClause
docid = self.get_docid(doc["url"], code)
qid = self._qid_map.get(docstring, str(len(self._qid_map)))
qrel_file.write(f"{qid} Q0 {docid} 1\n")
if docstring not in self._qid_map:
self._qid_map[docstring] = qid
qids[set_name].append(qid)
topic_file.write(topic_to_trectxt(qid, docstring))
topic_file.close()
qrel_file.close()
# write to qid_map.json, docid_map, fold.json
json.dump(self._qid_map, open(self.qid_map_file, "w"))
json.dump(self._docid_map, open(self.docid_map_file, "w"))
json.dump(
{"s1": {"train_qids": qids["train"], "predict": {"dev": qids["valid"], "test": qids["test"]}}},
open(self.fold_file, "w"),
)
def _prep_docid_map(self, doc_objs):
"""
construct a nested dict to map each doc into a unique docid
which follows the structure: {url: {" ".join(code_tokens): docid, ...}}
For all the lanugage datasets the url uniquely maps to a code_tokens yet it's not the case for but js and php
which requires a second-level mapping from raw_doc to docid
:param doc_objs: a list of dict having keys ["nwo", "url", "sha", "identifier", "arguments"
"function", "function_tokens", "docstring", "doctring_tokens",],
:return:
"""
# TODO: any way to avoid the twice traversal of all url and make the return dict structure consistent
lang = self.config["lang"]
url2docid = defaultdict(dict)
for i, doc in tqdm(enumerate(doc_objs), desc=f"Preparing the {lang} docid_map"):
url, code_tokens = doc["url"], remove_newline(" ".join(doc["function_tokens"]))
url2docid[url][code_tokens] = f"{lang}-FUNCTION-{i}"
# remove the code_tokens for the unique url-docid mapping
for url, docids in tqdm(url2docid.items(), desc=f"Compressing the {lang} docid_map"):
url2docid[url] = list(docids.values()) if len(docids) == 1 else docids # {code_tokens: docid} -> [docid]
return url2docid
def _get_n_docid(self):
""" calculate the number of document ids contained in the nested docid map """
lens = [len(docs) for url, docs in self._docid_map.items()]
return sum(lens)
def get_docid(self, url, code_tokens):
""" retrieve the doc id according to the doc dict """
docids = self.docid_map[url]
return docids[0] if len(docids) == 1 else docids[code_tokens]
@Benchmark.register
class CodeSearchNetChallenge(Benchmark):
"""CodeSearchNet Challenge. [1]
This benchmark can only be used for training (and challenge submissions) because no qrels are provided.
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. 2019. CodeSearchNet Challenge: Evaluating the State of Semantic Code Search. arXiv 2019.
"""
module_name = "codesearchnet_challenge"
dependencies = [Dependency(key="collection", module="collection", name="codesearchnet")]
config_spec = [ConfigOption("lang", "ruby", "CSN language dataset to use")]
url = "https://raw.githubusercontent.com/github/CodeSearchNet/master/resources/queries.csv"
query_type = "title"
file_fn = PACKAGE_PATH / "data" / "csn_challenge"
topic_file = file_fn / "topics.txt"
qid_map_file = file_fn / "qidmap.json"
def download_if_missing(self):
""" download query.csv and prepare queryid - query mapping file """
if self.topic_file.exists() and self.qid_map_file.exists():
return
tmp_dir = Path("/tmp")
tmp_dir.mkdir(exist_ok=True, parents=True)
self.file_fn.mkdir(exist_ok=True, parents=True)
query_fn = tmp_dir / f"query.csv"
if not query_fn.exists():
download_file(self.url, query_fn)
# prepare qid - query
qid_map = {}
topic_file = open(self.topic_file, "w", encoding="utf-8")
query_file = open(query_fn)
for qid, line in enumerate(query_file):
if qid != 0: # ignore the first line "query"
topic_file.write(topic_to_trectxt(qid, line.strip()))
qid_map[qid] = line
topic_file.close()
json.dump(qid_map, open(self.qid_map_file, "w"))
|
[
"json.loads",
"capreolus.Dependency",
"zipfile.ZipFile",
"pathlib.Path",
"gzip.open",
"capreolus.utils.loginit.get_logger",
"capreolus.ConfigOption",
"capreolus.utils.trec.topic_to_trectxt",
"collections.defaultdict",
"capreolus.utils.common.download_file"
] |
[((406, 426), 'capreolus.utils.loginit.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (416, 426), False, 'from capreolus.utils.loginit import get_logger\n'), ((773, 844), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""collection"""', 'module': '"""collection"""', 'name': '"""codesearchnet"""'}), "(key='collection', module='collection', name='codesearchnet')\n", (783, 844), False, 'from capreolus import ConfigOption, Dependency, constants\n'), ((1192, 1251), 'capreolus.ConfigOption', 'ConfigOption', (['"""lang"""', '"""ruby"""', '"""CSN language dataset to use"""'], {}), "('lang', 'ruby', 'CSN language dataset to use')\n", (1204, 1251), False, 'from capreolus import ConfigOption, Dependency, constants\n'), ((2599, 2611), 'pathlib.Path', 'Path', (['"""/tmp"""'], {}), "('/tmp')\n", (2603, 2611), False, 'from pathlib import Path\n'), ((6047, 6064), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6058, 6064), False, 'from collections import defaultdict\n'), ((7459, 7530), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""collection"""', 'module': '"""collection"""', 'name': '"""codesearchnet"""'}), "(key='collection', module='collection', name='codesearchnet')\n", (7469, 7530), False, 'from capreolus import ConfigOption, Dependency, constants\n'), ((7551, 7610), 'capreolus.ConfigOption', 'ConfigOption', (['"""lang"""', '"""ruby"""', '"""CSN language dataset to use"""'], {}), "('lang', 'ruby', 'CSN language dataset to use')\n", (7563, 7610), False, 'from capreolus import ConfigOption, Dependency, constants\n'), ((8090, 8102), 'pathlib.Path', 'Path', (['"""/tmp"""'], {}), "('/tmp')\n", (8094, 8102), False, 'from pathlib import Path\n'), ((2697, 2744), 'capreolus.utils.common.download_file', 'download_file', (['f"""{self.url}/{lang}.zip"""', 'zip_fn'], {}), "(f'{self.url}/{lang}.zip', zip_fn)\n", (2710, 2744), False, 'from capreolus.utils.common import download_file, remove_newline\n'), ((2759, 2779), 'zipfile.ZipFile', 'ZipFile', (['zip_fn', '"""r"""'], {}), "(zip_fn, 'r')\n", (2766, 2779), False, 'from zipfile import ZipFile\n'), ((3231, 3248), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3242, 3248), False, 'from collections import defaultdict\n'), ((8299, 8332), 'capreolus.utils.common.download_file', 'download_file', (['self.url', 'query_fn'], {}), '(self.url, query_fn)\n', (8312, 8332), False, 'from capreolus.utils.common import download_file, remove_newline\n'), ((3653, 3672), 'gzip.open', 'gzip.open', (['fn', '"""rb"""'], {}), "(fn, 'rb')\n", (3662, 3672), False, 'import gzip\n'), ((3729, 3744), 'json.loads', 'json.loads', (['doc'], {}), '(doc)\n', (3739, 3744), False, 'import json\n'), ((4837, 4869), 'capreolus.utils.trec.topic_to_trectxt', 'topic_to_trectxt', (['qid', 'docstring'], {}), '(qid, docstring)\n', (4853, 4869), False, 'from capreolus.utils.trec import topic_to_trectxt\n')]
|
from click.testing import CliRunner
from honeybee_radiance_folder.cli import filter_json_file
import json
import os
def test_filter_file():
runner = CliRunner()
input_file = './tests/assets/project_folder/grid_info.json'
output_file = './tests/assets/temp/grid_filtered_0.json'
result = runner.invoke(
filter_json_file, [
input_file, 'group:daylight_grids', '--output-file', output_file
]
)
assert result.exit_code == 0
# check the file is created
with open(output_file) as inf:
data = json.load(inf)
assert len(data) == 1
os.unlink(output_file)
def test_filter_file_remove():
runner = CliRunner()
input_file = './tests/assets/project_folder/grid_info.json'
output_file = './tests/assets/project_folder/grid_filtered_1.json'
result = runner.invoke(
filter_json_file, [
input_file, 'group:daylight_grids', '--output-file', output_file, '--remove'
]
)
assert result.exit_code == 0
# check the file is created
with open(output_file) as inf:
data = json.load(inf)
assert len(data) == 8
os.unlink(output_file)
|
[
"json.load",
"os.unlink",
"click.testing.CliRunner"
] |
[((155, 166), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (164, 166), False, 'from click.testing import CliRunner\n'), ((601, 623), 'os.unlink', 'os.unlink', (['output_file'], {}), '(output_file)\n', (610, 623), False, 'import os\n'), ((670, 681), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (679, 681), False, 'from click.testing import CliRunner\n'), ((1138, 1160), 'os.unlink', 'os.unlink', (['output_file'], {}), '(output_file)\n', (1147, 1160), False, 'import os\n'), ((556, 570), 'json.load', 'json.load', (['inf'], {}), '(inf)\n', (565, 570), False, 'import json\n'), ((1093, 1107), 'json.load', 'json.load', (['inf'], {}), '(inf)\n', (1102, 1107), False, 'import json\n')]
|
import os, time
import numpy as np
import logging
import fire
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from model import *
from dataset import *
def train(dataloader, model, optimizer, criterion, device):
epoch_loss = 0.0
total_num = 0
for data, target in dataloader:
data, target = data.to(device), target.to(device).squeeze()
total_num += len(data)
optimizer.zero_grad()
# out = model(data)
out = model(data, target)
loss = criterion(out, target)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
return epoch_loss / total_num
def evaluate(dataloader, model, device):
c = 0
total_num = 0
with torch.no_grad():
for data, target in dataloader:
data, target = data.to(device), target.to(device).squeeze()
total_num += len(data)
out = model(data)
predicted = torch.max(out, 1)[1]
c += (predicted == target).sum().item()
return c * 100.0 / total_num
def main(**kwargs):
data_dir = kwargs.get('data_dir', '../../dataset_docknet/data')
model_dir = kwargs.get('model_dir', 'models')
log_file = kwargs.get('log_file', 'LOG')
epoch = kwargs.get('epoch', 10)
batch_size = kwargs.get('batch_size', 32)
lr = kwargs.get('lr', 1e-2)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
formatter = logging.Formatter(
"[ %(levelname)s: %(asctime)s ] - %(message)s"
)
logging.basicConfig(level=logging.DEBUG,
format="[ %(levelname)s: %(asctime)s ] - %(message)s")
logger = logging.getLogger("Pytorch")
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info(kwargs)
train_dataset = DockDataset(featdir=os.path.join(data_dir, 'train'), is_train=True)
cv_dataset = DockDataset(featdir=os.path.join(data_dir, 'valid'), is_train=False, shuffle=False)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=True,
drop_last=True,
)
cv_loader = DataLoader(
cv_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=False,
drop_last=True,
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model = resnet18(pretrained=True, progress=True).to(device)
model = resnet18_lsoftmax(pretrained=True, progress=True, device=device).to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
logger.info(model)
best_acc = 0.0
for e in range(epoch):
model.train()
train_loss = train(train_loader, model, optimizer, criterion, device)
model.eval()
cv_acc = evaluate(cv_loader, model, device)
message = { f"[*] Epoch: [{e+1:3d}/{epoch:3d}] - "
f"Training Loss: {train_loss:.5f}, "
f"CV Acc: {cv_acc:.2f}%" }
logger.info(message)
torch.save(model.state_dict(), os.path.join(model_dir, f"checkpoint_{e+1}.pth"))
if cv_acc >= best_acc:
torch.save(model.state_dict(), os.path.join(model_dir, f"model_best.pth"))
best_acc = cv_acc
def score(**kwargs):
data_dir = kwargs.get('data_dir', '../../dataset_docknet/data')
model_dir = kwargs.get('model_dir', 'models')
batch_size = kwargs.get('batch_size', 32)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
eval_dataset = DockDataset(featdir=os.path.join(data_dir, 'test'), is_train=False, shuffle=False)
eval_loader = DataLoader(
eval_dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
)
# model = resnet18()
model = resnet18_lsoftmax(device=device)
model.load_state_dict(torch.load(os.path.join(model_dir, "model_best.pth")))
model.to(device)
model.eval()
eval_acc = evaluate(eval_loader, model, device)
print(f"Test Accuracy is: {eval_acc:.2f}%")
if __name__ == '__main__':
fire.Fire({
'train': main,
'test': score,
})
|
[
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"torch.nn.CrossEntropyLoss",
"os.makedirs",
"fire.Fire",
"logging.Formatter",
"torch.max",
"os.path.join",
"torch.cuda.is_available",
"logging.FileHandler",
"torch.utils.data.DataLoader",
"torch.no_grad"
] |
[((1536, 1601), 'logging.Formatter', 'logging.Formatter', (['"""[ %(levelname)s: %(asctime)s ] - %(message)s"""'], {}), "('[ %(levelname)s: %(asctime)s ] - %(message)s')\n", (1553, 1601), False, 'import logging\n'), ((1623, 1723), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""[ %(levelname)s: %(asctime)s ] - %(message)s"""'}), "(level=logging.DEBUG, format=\n '[ %(levelname)s: %(asctime)s ] - %(message)s')\n", (1642, 1723), False, 'import logging\n'), ((1758, 1786), 'logging.getLogger', 'logging.getLogger', (['"""Pytorch"""'], {}), "('Pytorch')\n", (1775, 1786), False, 'import logging\n'), ((1797, 1826), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (1816, 1826), False, 'import logging\n'), ((2128, 2226), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'num_workers': '(4)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(train_dataset, batch_size=batch_size, num_workers=4, shuffle=\n True, drop_last=True)\n', (2138, 2226), False, 'from torch.utils.data import DataLoader\n'), ((2294, 2389), 'torch.utils.data.DataLoader', 'DataLoader', (['cv_dataset'], {'batch_size': 'batch_size', 'num_workers': '(4)', 'shuffle': '(False)', 'drop_last': '(True)'}), '(cv_dataset, batch_size=batch_size, num_workers=4, shuffle=False,\n drop_last=True)\n', (2304, 2389), False, 'from torch.utils.data import DataLoader\n'), ((3890, 3969), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'drop_last': '(False)'}), '(eval_dataset, batch_size=batch_size, shuffle=False, drop_last=False)\n', (3900, 3969), False, 'from torch.utils.data import DataLoader\n'), ((4353, 4394), 'fire.Fire', 'fire.Fire', (["{'train': main, 'test': score}"], {}), "({'train': main, 'test': score})\n", (4362, 4394), False, 'import fire\n'), ((805, 820), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (818, 820), False, 'import torch\n'), ((1458, 1483), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (1472, 1483), False, 'import os, time\n'), ((1494, 1516), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (1505, 1516), False, 'import os, time\n'), ((1956, 1987), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (1968, 1987), False, 'import os, time\n'), ((2042, 2073), 'os.path.join', 'os.path.join', (['data_dir', '"""valid"""'], {}), "(data_dir, 'valid')\n", (2054, 2073), False, 'import os, time\n'), ((2480, 2505), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2503, 2505), False, 'import torch\n'), ((2691, 2712), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2710, 2712), True, 'import torch.nn as nn\n'), ((3289, 3339), 'os.path.join', 'os.path.join', (['model_dir', 'f"""checkpoint_{e + 1}.pth"""'], {}), "(model_dir, f'checkpoint_{e + 1}.pth')\n", (3301, 3339), False, 'import os, time\n'), ((3726, 3751), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3749, 3751), False, 'import torch\n'), ((3806, 3836), 'os.path.join', 'os.path.join', (['data_dir', '"""test"""'], {}), "(data_dir, 'test')\n", (3818, 3836), False, 'import os, time\n'), ((4130, 4171), 'os.path.join', 'os.path.join', (['model_dir', '"""model_best.pth"""'], {}), "(model_dir, 'model_best.pth')\n", (4142, 4171), False, 'import os, time\n'), ((1028, 1045), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (1037, 1045), False, 'import torch\n'), ((3417, 3459), 'os.path.join', 'os.path.join', (['model_dir', 'f"""model_best.pth"""'], {}), "(model_dir, f'model_best.pth')\n", (3429, 3459), False, 'import os, time\n')]
|
# Generated by Django 2.0.2 on 2018-06-13 22:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("ddcz", "0009_auto_20180610_2246"),
]
operations = [
migrations.CreateModel(
name="CreativePage",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=30)),
("slug", models.SlugField(max_length=30)),
("model_class", models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name="CreativePageConcept",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
(
"page",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="ddcz.CreativePage",
),
),
],
),
migrations.CreateModel(
name="CreativePageSection",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=30)),
("slug", models.SlugField(max_length=30)),
],
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((507, 600), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (523, 600), False, 'from django.db import migrations, models\n'), ((761, 792), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (777, 792), False, 'from django.db import migrations, models\n'), ((820, 851), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (836, 851), False, 'from django.db import migrations, models\n'), ((886, 917), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (902, 917), False, 'from django.db import migrations, models\n'), ((1103, 1196), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1119, 1196), False, 'from django.db import migrations, models\n'), ((1357, 1375), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1373, 1375), False, 'from django.db import migrations, models\n'), ((1444, 1538), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""ddcz.CreativePage"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'ddcz.CreativePage')\n", (1464, 1538), False, 'from django.db import migrations, models\n'), ((1808, 1901), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1824, 1901), False, 'from django.db import migrations, models\n'), ((2062, 2093), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (2078, 2093), False, 'from django.db import migrations, models\n'), ((2121, 2152), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (2137, 2152), False, 'from django.db import migrations, models\n')]
|
# coding=utf-8
# Copyright 2020 The Learning-to-Prompt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific Learning-to-Prompt governing permissions and
# limitations under the License.
# ==============================================================================
"""Input preprocesses."""
from typing import Any, Callable, Dict, Optional
import ml_collections
from augment import augment_utils
import tensorflow as tf
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR10_STD = (0.2471, 0.2435, 0.2616)
CIFAR100_MEAN = (0.5071, 0.4867, 0.4408)
CIFAR100_STD = (0.2675, 0.2565, 0.2761)
# Constants for configuring config.<name>
RANDOM_ERASING = "randerasing"
AUGMENT = "augment"
MIX = "mix"
COLORJITTER = "colorjitter"
create_mix_augment = augment_utils.create_mix_augment
def resize_small(image: tf.Tensor,
size: int,
*,
antialias: bool = False) -> tf.Tensor:
"""Resizes the smaller side to `size` keeping aspect ratio.
Args:
image: Single image as a float32 tensor.
size: an integer, that represents a new size of the smaller side of an input
image.
antialias: Whether to use an anti-aliasing filter when downsampling an
image.
Returns:
A function, that resizes an image and preserves its aspect ratio.
"""
h, w = tf.shape(image)[0], tf.shape(image)[1]
# Figure out the necessary h/w.
ratio = (tf.cast(size, tf.float32) / tf.cast(tf.minimum(h, w), tf.float32))
h = tf.cast(tf.round(tf.cast(h, tf.float32) * ratio), tf.int32)
w = tf.cast(tf.round(tf.cast(w, tf.float32) * ratio), tf.int32)
image = tf.image.resize(image, [h, w], antialias=antialias)
return image
def central_crop(image: tf.Tensor, size: int) -> tf.Tensor:
"""Makes central crop of a given size."""
h, w = size, size
top = (tf.shape(image)[0] - h) // 2
left = (tf.shape(image)[1] - w) // 2
image = tf.image.crop_to_bounding_box(image, top, left, h, w)
return image
def decode_and_random_resized_crop(image: tf.Tensor, rng,
resize_size: int) -> tf.Tensor:
"""Decodes the images and extracts a random crop."""
shape = tf.io.extract_jpeg_shape(image)
begin, size, _ = tf.image.stateless_sample_distorted_bounding_box(
shape,
tf.zeros([0, 0, 4], tf.float32),
seed=rng,
area_range=(0.05, 1.0),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
top, left, _ = tf.unstack(begin)
h, w, _ = tf.unstack(size)
image = tf.image.decode_and_crop_jpeg(image, [top, left, h, w], channels=3)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, (resize_size, resize_size))
return image
def train_preprocess(features: Dict[str, tf.Tensor],
crop_size: int = 224) -> Dict[str, tf.Tensor]:
"""Processes a single example for training."""
image = features["image"]
# This PRNGKey is unique to this example. We can use it with the stateless
# random ops in TF.
rng = features.pop("rng")
rng, rng_crop, rng_flip = tf.unstack(
tf.random.experimental.stateless_split(rng, 3))
image = decode_and_random_resized_crop(image, rng_crop, resize_size=crop_size)
image = tf.image.stateless_random_flip_left_right(image, rng_flip)
return {"image": image, "label": features["label"]}
def train_cifar_preprocess(features: Dict[str, tf.Tensor]):
"""Augmentation function for cifar dataset."""
image = tf.io.decode_jpeg(features["image"])
image = tf.image.resize_with_crop_or_pad(image, 32 + 4, 32 + 4)
rng = features.pop("rng")
rng, rng_crop, rng_flip = tf.unstack(
tf.random.experimental.stateless_split(rng, 3))
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.stateless_random_crop(image, [32, 32, 3], rng_crop)
# Randomly flip the image horizontally
image = tf.image.stateless_random_flip_left_right(image, rng_flip)
image = tf.cast(image, tf.float32) / 255.0
return {"image": image, "label": features["label"]}
def _check_valid_mean_std(mean, std):
expected_shape = (1, 1, 3)
message = "%s shape invalid."
assert all([a == b for a, b in zip(expected_shape, mean.shape)
]), message % "mean"
assert all([a == b for a, b in zip(expected_shape, std.shape)
]), message % "std"
def get_augment_preprocess(
augment_params: ml_collections.ConfigDict,
*,
colorjitter_params: Optional[ml_collections.ConfigDict] = None,
randerasing_params: Optional[ml_collections.ConfigDict] = None,
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None,
basic_process: Callable[[Dict[str, tf.Tensor]],
Dict[str, tf.Tensor]] = train_preprocess,
) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:
"""Creates a custom augmented image preprocess."""
augmentor = None
# If augment_params.type is noop/default, we skip.
if augment_params and augment_params.get(
"type") and augment_params.type not in ("default", "noop"):
augmentor = augment_utils.create_augmenter(**augment_params.to_dict())
jitter = None
if colorjitter_params and colorjitter_params.type not in ("default", "noop"):
jitter = augment_utils.create_augmenter(**colorjitter_params.to_dict())
def train_custom_augment_preprocess(features):
rng = features.pop("rng")
rng, rng_aa, rng_re, rng_jt = tf.unstack(
tf.random.experimental.stateless_split(rng, 4))
features["rng"] = rng
outputs = basic_process(features)
image = outputs["image"]
# image after basic_process has been normalized to [0,1]
image = tf.saturate_cast(image * 255.0, tf.uint8)
if augmentor is not None:
image = augmentor(rng_aa, image)["image"]
if jitter is not None:
image = jitter(rng_jt, image)["image"]
image = tf.cast(image, tf.float32) / 255.0
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
if randerasing_params:
assert mean is not None, "Random erasing requires normalized images"
# Perform random erasing after mean/std normalization
image = augment_utils.create_random_erasing(
**randerasing_params.to_dict())(rng_re, image)
outputs["image"] = image
return outputs
return train_custom_augment_preprocess
def eval_preprocess(features: Dict[str, tf.Tensor],
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None,
input_size: int = 256,
crop_size: int = 224) -> Dict[str, tf.Tensor]:
"""Process a single example for evaluation."""
image = features["image"]
assert image.dtype == tf.uint8
image = tf.cast(image, tf.float32) / 255.0
# image = resize_small(image, size=int(256 / 224 * input_size))
# image = central_crop(image, size=input_size)
image = resize_small(image, size=input_size) # e.g. 256, 448
image = central_crop(image, size=crop_size) # e.g. 224, 384
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
return {"image": image, "label": features["label"]}
def cifar_eval_preprocess(
features: Dict[str, tf.Tensor],
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None) -> Dict[str, tf.Tensor]:
"""Processes a single example for evaluation for cifar."""
image = features["image"]
assert image.dtype == tf.uint8
image = tf.cast(image, tf.float32) / 255.0
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
return {"image": image, "label": features["label"]}
|
[
"tensorflow.unstack",
"tensorflow.image.resize_with_crop_or_pad",
"tensorflow.random.experimental.stateless_split",
"tensorflow.shape",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.image.resize",
"tensorflow.image.stateless_random_flip_left_right",
"tensorflow.image.decode_and_crop_jpeg",
"tensorflow.io.extract_jpeg_shape",
"tensorflow.io.decode_jpeg",
"tensorflow.saturate_cast",
"tensorflow.image.stateless_random_crop",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.zeros"
] |
[((2128, 2179), 'tensorflow.image.resize', 'tf.image.resize', (['image', '[h, w]'], {'antialias': 'antialias'}), '(image, [h, w], antialias=antialias)\n', (2143, 2179), True, 'import tensorflow as tf\n'), ((2408, 2461), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['image', 'top', 'left', 'h', 'w'], {}), '(image, top, left, h, w)\n', (2437, 2461), True, 'import tensorflow as tf\n'), ((2669, 2700), 'tensorflow.io.extract_jpeg_shape', 'tf.io.extract_jpeg_shape', (['image'], {}), '(image)\n', (2693, 2700), True, 'import tensorflow as tf\n'), ((2989, 3006), 'tensorflow.unstack', 'tf.unstack', (['begin'], {}), '(begin)\n', (2999, 3006), True, 'import tensorflow as tf\n'), ((3019, 3035), 'tensorflow.unstack', 'tf.unstack', (['size'], {}), '(size)\n', (3029, 3035), True, 'import tensorflow as tf\n'), ((3046, 3113), 'tensorflow.image.decode_and_crop_jpeg', 'tf.image.decode_and_crop_jpeg', (['image', '[top, left, h, w]'], {'channels': '(3)'}), '(image, [top, left, h, w], channels=3)\n', (3075, 3113), True, 'import tensorflow as tf\n'), ((3169, 3219), 'tensorflow.image.resize', 'tf.image.resize', (['image', '(resize_size, resize_size)'], {}), '(image, (resize_size, resize_size))\n', (3184, 3219), True, 'import tensorflow as tf\n'), ((3747, 3805), 'tensorflow.image.stateless_random_flip_left_right', 'tf.image.stateless_random_flip_left_right', (['image', 'rng_flip'], {}), '(image, rng_flip)\n', (3788, 3805), True, 'import tensorflow as tf\n'), ((3981, 4017), 'tensorflow.io.decode_jpeg', 'tf.io.decode_jpeg', (["features['image']"], {}), "(features['image'])\n", (3998, 4017), True, 'import tensorflow as tf\n'), ((4028, 4083), 'tensorflow.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['image', '(32 + 4)', '(32 + 4)'], {}), '(image, 32 + 4, 32 + 4)\n', (4060, 4083), True, 'import tensorflow as tf\n'), ((4274, 4334), 'tensorflow.image.stateless_random_crop', 'tf.image.stateless_random_crop', (['image', '[32, 32, 3]', 'rng_crop'], {}), '(image, [32, 32, 3], rng_crop)\n', (4304, 4334), True, 'import tensorflow as tf\n'), ((4386, 4444), 'tensorflow.image.stateless_random_flip_left_right', 'tf.image.stateless_random_flip_left_right', (['image', 'rng_flip'], {}), '(image, rng_flip)\n', (4427, 4444), True, 'import tensorflow as tf\n'), ((1919, 1944), 'tensorflow.cast', 'tf.cast', (['size', 'tf.float32'], {}), '(size, tf.float32)\n', (1926, 1944), True, 'import tensorflow as tf\n'), ((2789, 2820), 'tensorflow.zeros', 'tf.zeros', (['[0, 0, 4]', 'tf.float32'], {}), '([0, 0, 4], tf.float32)\n', (2797, 2820), True, 'import tensorflow as tf\n'), ((3124, 3150), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (3131, 3150), True, 'import tensorflow as tf\n'), ((3608, 3654), 'tensorflow.random.experimental.stateless_split', 'tf.random.experimental.stateless_split', (['rng', '(3)'], {}), '(rng, 3)\n', (3646, 3654), True, 'import tensorflow as tf\n'), ((4158, 4204), 'tensorflow.random.experimental.stateless_split', 'tf.random.experimental.stateless_split', (['rng', '(3)'], {}), '(rng, 3)\n', (4196, 4204), True, 'import tensorflow as tf\n'), ((4455, 4481), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4462, 4481), True, 'import tensorflow as tf\n'), ((6151, 6192), 'tensorflow.saturate_cast', 'tf.saturate_cast', (['(image * 255.0)', 'tf.uint8'], {}), '(image * 255.0, tf.uint8)\n', (6167, 6192), True, 'import tensorflow as tf\n'), ((7240, 7266), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (7247, 7266), True, 'import tensorflow as tf\n'), ((7961, 7987), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (7968, 7987), True, 'import tensorflow as tf\n'), ((1834, 1849), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1842, 1849), True, 'import tensorflow as tf\n'), ((1854, 1869), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1862, 1869), True, 'import tensorflow as tf\n'), ((1955, 1971), 'tensorflow.minimum', 'tf.minimum', (['h', 'w'], {}), '(h, w)\n', (1965, 1971), True, 'import tensorflow as tf\n'), ((5937, 5983), 'tensorflow.random.experimental.stateless_split', 'tf.random.experimental.stateless_split', (['rng', '(4)'], {}), '(rng, 4)\n', (5975, 5983), True, 'import tensorflow as tf\n'), ((6355, 6381), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (6362, 6381), True, 'import tensorflow as tf\n'), ((2009, 2031), 'tensorflow.cast', 'tf.cast', (['h', 'tf.float32'], {}), '(h, tf.float32)\n', (2016, 2031), True, 'import tensorflow as tf\n'), ((2075, 2097), 'tensorflow.cast', 'tf.cast', (['w', 'tf.float32'], {}), '(w, tf.float32)\n', (2082, 2097), True, 'import tensorflow as tf\n'), ((2330, 2345), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2338, 2345), True, 'import tensorflow as tf\n'), ((2369, 2384), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2377, 2384), True, 'import tensorflow as tf\n')]
|
from __future__ import absolute_import, division, print_function
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT
from glue.config import viewer_tool
from glue.viewers.common.tool import CheckableTool, Tool
__all__ = ['MatplotlibTool', 'MatplotlibCheckableTool', 'HomeTool', 'SaveTool',
'PanTool', 'ZoomTool']
def _ensure_mpl_nav(viewer):
# Set up virtual Matplotlib navigation toolbar (don't show it)
if not hasattr(viewer, '_mpl_nav'):
viewer._mpl_nav = NavigationToolbar2QT(viewer.central_widget.canvas, viewer)
viewer._mpl_nav.hide()
def _cleanup_mpl_nav(viewer):
if getattr(viewer, '_mpl_nav', None) is not None:
viewer._mpl_nav.setParent(None)
viewer._mpl_nav.parent = None
class MatplotlibTool(Tool):
def __init__(self, viewer=None):
super(MatplotlibTool, self).__init__(viewer=viewer)
_ensure_mpl_nav(viewer)
def close(self):
_cleanup_mpl_nav(self.viewer)
super(MatplotlibTool, self).close()
class MatplotlibCheckableTool(CheckableTool):
def __init__(self, viewer=None):
super(MatplotlibCheckableTool, self).__init__(viewer=viewer)
_ensure_mpl_nav(viewer)
def close(self):
_cleanup_mpl_nav(self.viewer)
super(MatplotlibCheckableTool, self).close()
@viewer_tool
class HomeTool(MatplotlibTool):
tool_id = 'mpl:home'
icon = 'glue_home'
action_text = 'Home'
tool_tip = 'Reset original zoom'
shortcut = 'H'
def activate(self):
if hasattr(self.viewer, 'state') and hasattr(self.viewer.state, 'reset_limits'):
self.viewer.state.reset_limits()
else:
self.viewer._mpl_nav.home()
@viewer_tool
class SaveTool(MatplotlibTool):
tool_id = 'mpl:save'
icon = 'glue_filesave'
action_text = 'Save plot to file'
tool_tip = 'Save the figure'
def activate(self):
self.viewer._mpl_nav.save_figure()
@viewer_tool
class PanTool(MatplotlibCheckableTool):
tool_id = 'mpl:pan'
icon = 'glue_move'
action_text = 'Pan'
tool_tip = 'Pan axes with left mouse, zoom with right'
shortcut = 'M'
def activate(self):
self.viewer._mpl_nav.pan()
def deactivate(self):
if hasattr(self.viewer, '_mpl_nav'):
self.viewer._mpl_nav.pan()
@viewer_tool
class ZoomTool(MatplotlibCheckableTool):
tool_id = 'mpl:zoom'
icon = 'glue_zoom_to_rect'
action_text = 'Zoom'
tool_tip = 'Zoom to rectangle'
shortcut = 'Z'
def activate(self):
self.viewer._mpl_nav.zoom()
def deactivate(self):
if hasattr(self.viewer, '_mpl_nav'):
self.viewer._mpl_nav.zoom()
|
[
"matplotlib.backends.backend_qt5.NavigationToolbar2QT"
] |
[((505, 563), 'matplotlib.backends.backend_qt5.NavigationToolbar2QT', 'NavigationToolbar2QT', (['viewer.central_widget.canvas', 'viewer'], {}), '(viewer.central_widget.canvas, viewer)\n', (525, 563), False, 'from matplotlib.backends.backend_qt5 import NavigationToolbar2QT\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interactive, interactive_output, fixed, HBox, VBox
import ipywidgets as widgets
def true_function_old(x):
x_copy = -1 * x
f = 2 * x_copy * np.sin(0.8*x_copy) + 0.5 * x_copy**2 - 5
return f
def sigmoid(x, L=10, k=2, x_0=20):
return L / (1 + np.exp(-k * (x - x_0)))
def true_function(x):
const = 17
lin = -0.25 * x
quad = 0.2*(x-20)**2
sig = sigmoid(x, L=-20, k=0.6, x_0=30)
# quad_sig = - sigmoid(xx, L=1, k=0.6, x_0=30) * (0.1 * (x-40)**2)
sig2 = sigmoid(x, L=-50, k=0.8, x_0=37)
f = const + lin + quad + sig + sig2
return f
def generate_data(n_samples=20, random_state=None):
rng = np.random.RandomState(random_state)
# Beobachtungen
x_sample = 40 * rng.rand(n_samples)
# Kennzeichnungen/Labels
f_sample = true_function(x_sample)
noise = 7 * rng.randn(n_samples)
y_sample = f_sample + noise
return x_sample[:, np.newaxis], y_sample
|
[
"numpy.exp",
"numpy.sin",
"numpy.random.RandomState"
] |
[((717, 752), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (738, 752), True, 'import numpy as np\n'), ((334, 356), 'numpy.exp', 'np.exp', (['(-k * (x - x_0))'], {}), '(-k * (x - x_0))\n', (340, 356), True, 'import numpy as np\n'), ((223, 243), 'numpy.sin', 'np.sin', (['(0.8 * x_copy)'], {}), '(0.8 * x_copy)\n', (229, 243), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import os
import sys
import jieba
import numpy as np
jieba.setLogLevel(60) # quiet
fname = sys.argv[1]
with open(fname) as f:
text = f.read()
tokenizer = jieba.Tokenizer()
tokens = list(tokenizer.cut(text))
occurences = np.array([tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ])
difficulties = 1 / (occurences + 1)
max_occurence = np.max(list(tokenizer.FREQ.values()))
min_score = 1 / (max_occurence + 1)
max_score = 1
perc = 75
mean = np.mean(difficulties)
median = np.percentile(difficulties, perc)
def norm(x):
return (x - min_score) / (max_score - min_score)
normalized_mean = norm(mean)
normalized_median = norm(median)
print(
f"{os.path.basename(fname)}: "
f"mean: {normalized_mean:.6f}, {perc}th percentile: {normalized_median:.6f} "
f"in [0: trivial, 1: hardest]"
)
import matplotlib.pyplot as plt
clipped = difficulties[(difficulties <= 0.01) & (difficulties >= 0.0001)]
plt.hist(clipped, bins=20, density=True)
ax = plt.gca()
ax.set_title(fname)
plt.show()
|
[
"numpy.mean",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.gca",
"numpy.array",
"os.path.basename",
"numpy.percentile",
"jieba.setLogLevel",
"jieba.Tokenizer",
"matplotlib.pyplot.show"
] |
[((77, 98), 'jieba.setLogLevel', 'jieba.setLogLevel', (['(60)'], {}), '(60)\n', (94, 98), False, 'import jieba\n'), ((186, 203), 'jieba.Tokenizer', 'jieba.Tokenizer', ([], {}), '()\n', (201, 203), False, 'import jieba\n'), ((252, 320), 'numpy.array', 'np.array', (['[tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ]'], {}), '([tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ])\n', (260, 320), True, 'import numpy as np\n'), ((481, 502), 'numpy.mean', 'np.mean', (['difficulties'], {}), '(difficulties)\n', (488, 502), True, 'import numpy as np\n'), ((512, 545), 'numpy.percentile', 'np.percentile', (['difficulties', 'perc'], {}), '(difficulties, perc)\n', (525, 545), True, 'import numpy as np\n'), ((949, 989), 'matplotlib.pyplot.hist', 'plt.hist', (['clipped'], {'bins': '(20)', 'density': '(True)'}), '(clipped, bins=20, density=True)\n', (957, 989), True, 'import matplotlib.pyplot as plt\n'), ((995, 1004), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1002, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1033, 1035), True, 'import matplotlib.pyplot as plt\n'), ((693, 716), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (709, 716), False, 'import os\n')]
|
from itertools import tee
from typing import Dict, Iterator, List, Sequence, Tuple
from brown_clustering.defaultvaluedict import DefaultValueDict
Corpus = Sequence[Sequence[str]]
class BigramCorpus:
def __init__(
self,
corpus: Corpus,
alpha: float = 1,
start_symbol: str = '<s>',
end_symbol: str = '</s>',
min_count: int = 0
):
self.vocabulary: Dict[str, int] = DefaultValueDict(0)
self.gather_vocab(corpus, min_count)
word_count = len(self.vocabulary) + 2
self.alpha = alpha
self.n = alpha * word_count * word_count
self.unigrams: Dict[str, float] = DefaultValueDict(alpha * word_count)
self.bigrams: Dict[Tuple[str, str], float] = DefaultValueDict(alpha)
self.gather_statistics(corpus, start_symbol, end_symbol)
def gather_vocab(self, corpus: Corpus, min_count: int):
for sentence in corpus:
for word in sentence:
self.vocabulary[word] += 1
self.vocabulary = dict(filter(
lambda x: x[1] >= min_count,
self.vocabulary.items()
))
def gather_statistics(
self,
corpus: Corpus,
start_symbol: str = '<s>',
end_symbol: str = '</s>',
):
for sentence in corpus:
act_sentence = [start_symbol] + [
w for w in sentence if w in self.vocabulary
] + [end_symbol]
for word in act_sentence:
self.unigrams[word] += 1
grams = two_grams(act_sentence)
for w1, w2 in grams:
self.n += 1
self.bigrams[(w1, w2)] += 1
def bigram_propa(
self,
cluster1: Sequence[str],
cluster2: Sequence[str]
) -> float:
return sum(
self.bigrams[(w1, w2)]
for w1 in cluster1
for w2 in cluster2
) / self.n
def unigram_propa(self, cluster: Sequence[str]) -> float:
return sum(
self.unigrams[w]
for w in cluster
) / self.n
def ranks(self) -> List[Tuple[str, int]]:
return sorted(self.vocabulary.items(), key=lambda x: (-x[1], x[0]))
def print_stats(self):
extended_vocab = len(self.vocabulary) + 2
alpha_bonus = self.alpha * extended_vocab * extended_vocab
print(f"Vocab count: {len(self.vocabulary)}")
print(f"Token count: {sum(self.vocabulary.values())}")
print(f"unique 2gram count: {len(self.bigrams)}")
print(f"2gram count: {self.n - alpha_bonus}")
print(f"Laplace smoothing: {self.alpha}")
def two_grams(sequence: Sequence) -> Iterator[Tuple]:
iterables = tee(sequence, 2)
next(iterables[1], None)
return zip(*iterables)
|
[
"brown_clustering.defaultvaluedict.DefaultValueDict",
"itertools.tee"
] |
[((2763, 2779), 'itertools.tee', 'tee', (['sequence', '(2)'], {}), '(sequence, 2)\n', (2766, 2779), False, 'from itertools import tee\n'), ((454, 473), 'brown_clustering.defaultvaluedict.DefaultValueDict', 'DefaultValueDict', (['(0)'], {}), '(0)\n', (470, 473), False, 'from brown_clustering.defaultvaluedict import DefaultValueDict\n'), ((685, 721), 'brown_clustering.defaultvaluedict.DefaultValueDict', 'DefaultValueDict', (['(alpha * word_count)'], {}), '(alpha * word_count)\n', (701, 721), False, 'from brown_clustering.defaultvaluedict import DefaultValueDict\n'), ((775, 798), 'brown_clustering.defaultvaluedict.DefaultValueDict', 'DefaultValueDict', (['alpha'], {}), '(alpha)\n', (791, 798), False, 'from brown_clustering.defaultvaluedict import DefaultValueDict\n')]
|
import sys
# for development
sys.path.append('../../src')
from screencastscript import ScreencastScript # noqa: E402
screencast = ScreencastScript()
screencast.sleep(1)
screencast.i3wm_focus_left()
screencast.sleep(1)
screencast.i3wm_zoom_in()
screencast.sleep(1)
screencast.i3wm_zoom_out()
screencast.sleep(1)
screencast.i3wm_focus_right()
screencast.sleep(1)
screencast.i3wm_focus_up()
screencast.sleep(1)
screencast.i3wm_focus_down()
screencast.sleep(1)
screencast.i3wm_toggle_fullscreen()
screencast.sleep(1)
screencast.i3wm_ws_2()
screencast.sleep(1)
screencast.i3wm_ws_1()
screencast.sleep(1)
|
[
"sys.path.append",
"screencastscript.ScreencastScript"
] |
[((29, 57), 'sys.path.append', 'sys.path.append', (['"""../../src"""'], {}), "('../../src')\n", (44, 57), False, 'import sys\n'), ((133, 151), 'screencastscript.ScreencastScript', 'ScreencastScript', ([], {}), '()\n', (149, 151), False, 'from screencastscript import ScreencastScript\n')]
|
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy, reverse
from django.shortcuts import redirect
from .models import StockEntry, StockEntryLine
from .forms import StockEntryForm, StockEntryLineForm, StockEntryLineIF
from main.views import BaseView
class StockEntryList(BaseView, ListView):
model = StockEntry
template_name = 'stock/list.html'
paginate_by = 8
permission_required = 'stockentry.view_stockentry'
class StockEntryDetail(BaseView, DetailView):
model = StockEntry
form_class = StockEntryForm
template_name = 'stock/detail.html'
fields = "__all__"
pk_url_kwarg = 'pk'
permission_required = 'stockentry.view_stockentry'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
lines = StockEntryLine.objects.all().filter(parent=self.kwargs['pk'])
new_line = StockEntryLineForm(initial={'parent':self.object})
context['new_line'] = new_line
context['lines'] = lines
return context
class StockEntryCreate(BaseView, CreateView):
model = StockEntry
form_class = StockEntryForm
template_name = 'stock/create.html'
permission_required = 'stockentry.add_stockentry'
def get_success_url(self):
return reverse('stock:detail', kwargs={'pk':self.object.id})
class StockEntryUpdate(BaseView, UpdateView):
model = StockEntry
form_class = StockEntryForm
formset_class = StockEntryLineIF
template_name = 'stock/detail.html'
pk_url_kwarg = 'pk'
success_url = reverse_lazy('stock:detail')
permission_required = 'stockentry.change_stockentry'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# lines = StockEntryLine.objects.all().filter(parent=self.kwargs['pk'])
# new_line = StockEntryLineForm(initial={'parent':self.object})
# context['new_line'] = new_line
# context['lines'] = lines
# return context
# def get_success_url(self):
# pk = self.kwargs['pk']
# return reverse('stock:detail', kwargs={'pk':pk})
def post(self, request, *args, **kwargs):
obj = self.get_object()
if kwargs.get('process') == 'submit':
obj.submit_stock_entry(obj.id)
if kwargs.get('process') == 'cancel':
obj.cancel_stock_entry(obj.id)
return redirect('stock:detail', pk=obj.id)
class StockEntryLineCreate(BaseView, CreateView):
model = StockEntryLine
form_class = StockEntryLineForm
template_name = 'stock/add_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.add_stockentryline'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['parent'] = self.kwargs['pk']
# return context
def get_success_url(self):
# pk = self.kwargs['pk']
# parent = StockEntry.objects.get(pk=self.kwargs['pk'])
parent_id = self.request.POST['parent']
return reverse('stock:detail', kwargs={'pk':parent_id})
class StockEntryLineEdit(BaseView, UpdateView):
model = StockEntryLine
form_class = StockEntryLineForm
template_name = 'stock/edit_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.change_stockentryline'
def get_success_url(self):
line = StockEntryLine.objects.get(pk=self.kwargs['pk'])
return reverse('stock:detail', kwargs={'pk':line.parent.id})
class StockEntryLineDelete(BaseView, DeleteView):
model = StockEntryLine
template_name = 'stock/delete_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.delete_stockentryline'
def get_success_url(self):
return reverse('stock:detail', kwargs={'pk':self.object.parent.id})
|
[
"django.urls.reverse",
"django.shortcuts.redirect",
"django.urls.reverse_lazy"
] |
[((1654, 1682), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""stock:detail"""'], {}), "('stock:detail')\n", (1666, 1682), False, 'from django.urls import reverse_lazy, reverse\n'), ((1378, 1432), 'django.urls.reverse', 'reverse', (['"""stock:detail"""'], {'kwargs': "{'pk': self.object.id}"}), "('stock:detail', kwargs={'pk': self.object.id})\n", (1385, 1432), False, 'from django.urls import reverse_lazy, reverse\n'), ((2501, 2536), 'django.shortcuts.redirect', 'redirect', (['"""stock:detail"""'], {'pk': 'obj.id'}), "('stock:detail', pk=obj.id)\n", (2509, 2536), False, 'from django.shortcuts import redirect\n'), ((3144, 3193), 'django.urls.reverse', 'reverse', (['"""stock:detail"""'], {'kwargs': "{'pk': parent_id}"}), "('stock:detail', kwargs={'pk': parent_id})\n", (3151, 3193), False, 'from django.urls import reverse_lazy, reverse\n'), ((3549, 3603), 'django.urls.reverse', 'reverse', (['"""stock:detail"""'], {'kwargs': "{'pk': line.parent.id}"}), "('stock:detail', kwargs={'pk': line.parent.id})\n", (3556, 3603), False, 'from django.urls import reverse_lazy, reverse\n'), ((3863, 3924), 'django.urls.reverse', 'reverse', (['"""stock:detail"""'], {'kwargs': "{'pk': self.object.parent.id}"}), "('stock:detail', kwargs={'pk': self.object.parent.id})\n", (3870, 3924), False, 'from django.urls import reverse_lazy, reverse\n')]
|
import setuptools
setuptools.setup(
name="synmetric",
version="0.2.dev1",
license='MIT',
author="<NAME>",
author_email="<EMAIL>",
description="Metric to evaluate data quality for synthetic data.",
url="https://github.com/harsh020/synthetic_metric",
download_url = 'https://github.com/harsh020/synthetic_metric/archive/v_02dev1.tar.gz',
project_urls={
"Bug Tracker": "https://github.com/harsh020/synthetic_metric/issues",
},
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires = [
'numpy',
'pandas',
'scikit-learn',
'scipy'
]
)
|
[
"setuptools.find_packages"
] |
[((698, 724), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (722, 724), False, 'import setuptools\n')]
|
from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.SimpleRouter()
router.register(r'players', views.PlayerView, basename='players')
router.register(r'teams', views.TeamView, basename='teams')
urlpatterns = [
path('', views.APIWelcomeView),
path('', include((router.urls))),
]
|
[
"rest_framework.routers.SimpleRouter",
"django.urls.path",
"django.urls.include"
] |
[((103, 125), 'rest_framework.routers.SimpleRouter', 'routers.SimpleRouter', ([], {}), '()\n', (123, 125), False, 'from rest_framework import routers\n'), ((275, 305), 'django.urls.path', 'path', (['""""""', 'views.APIWelcomeView'], {}), "('', views.APIWelcomeView)\n", (279, 305), False, 'from django.urls import path, include\n'), ((320, 340), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (327, 340), False, 'from django.urls import path, include\n')]
|
import requests
from sanic import Sanic
from sanic.response import json
from sanic_limiter import Limiter, get_remote_address
from bs4 import BeautifulSoup
async def ratelimit_handler(request, exception):
return json({"error": f"Ratelimit exceeded {exception}."}, status=429)
app = Sanic()
app.error_handler.add(Exception, ratelimit_handler)
limiter = Limiter(app, global_limits=["1 per 3 seconds", "50 per hour"], key_func=get_remote_address)
@app.route("/")
async def main(request):
if not (bot := request.args.get("bot")):
return json({"error": "Bot query is required. Example: ?bot=atlas"})
soup = BeautifulSoup((response := requests.get(f"https://top.gg/bot/{bot}")).content, "html.parser")
if (status := response.status_code) not in [200, 204]:
return json({"status": status, "error": f"Failed to get info on \"{bot}\"."})
try:
votes = int(soup.find(id="points").string.strip())
except:
return json({"status": status, "error": "Was unable to parse bot votes."})
return json({"status": status, "name": soup.find("span", {"class": "bot-name"}).string.strip() if soup.find("span", {"class": "bot-name"}) else bot, "votes": votes})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9500)
|
[
"sanic_limiter.Limiter",
"sanic.Sanic",
"sanic.response.json",
"requests.get"
] |
[((290, 297), 'sanic.Sanic', 'Sanic', ([], {}), '()\n', (295, 297), False, 'from sanic import Sanic\n'), ((361, 457), 'sanic_limiter.Limiter', 'Limiter', (['app'], {'global_limits': "['1 per 3 seconds', '50 per hour']", 'key_func': 'get_remote_address'}), "(app, global_limits=['1 per 3 seconds', '50 per hour'], key_func=\n get_remote_address)\n", (368, 457), False, 'from sanic_limiter import Limiter, get_remote_address\n'), ((219, 282), 'sanic.response.json', 'json', (["{'error': f'Ratelimit exceeded {exception}.'}"], {'status': '(429)'}), "({'error': f'Ratelimit exceeded {exception}.'}, status=429)\n", (223, 282), False, 'from sanic.response import json\n'), ((555, 616), 'sanic.response.json', 'json', (["{'error': 'Bot query is required. Example: ?bot=atlas'}"], {}), "({'error': 'Bot query is required. Example: ?bot=atlas'})\n", (559, 616), False, 'from sanic.response import json\n'), ((798, 866), 'sanic.response.json', 'json', (['{\'status\': status, \'error\': f\'Failed to get info on "{bot}".\'}'], {}), '({\'status\': status, \'error\': f\'Failed to get info on "{bot}".\'})\n', (802, 866), False, 'from sanic.response import json\n'), ((965, 1032), 'sanic.response.json', 'json', (["{'status': status, 'error': 'Was unable to parse bot votes.'}"], {}), "({'status': status, 'error': 'Was unable to parse bot votes.'})\n", (969, 1032), False, 'from sanic.response import json\n'), ((656, 697), 'requests.get', 'requests.get', (['f"""https://top.gg/bot/{bot}"""'], {}), "(f'https://top.gg/bot/{bot}')\n", (668, 697), False, 'import requests\n')]
|
#! python3
# coding: utf-8
from vpc.nos import NetworkElement,NetworkElementEvent,event_t,EventChain
class OVSEvent(NetworkElementEvent):
def __init__(self,ne_id,type):
super().__init__(ne_id,type)
class OVS(NetworkElement):
def __init__(self,channel,datapath):
super().__init__()
self.chn = channel
self.ofp = self.chn.ofp
self._datapath = datapath
self.ne_online()
@property
def datapath(self):
return self._datapath
def ne_online(self):
e = OVSEvent(self.id,event_t.NE_ONLINE)
EventChain().feed(e)
if __name__ == "__main__":
pass
|
[
"vpc.nos.EventChain"
] |
[((577, 589), 'vpc.nos.EventChain', 'EventChain', ([], {}), '()\n', (587, 589), False, 'from vpc.nos import NetworkElement, NetworkElementEvent, event_t, EventChain\n')]
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# Func1: change density map into count map
# density map: batch size * 1 * w * h
def get_local_count(density_map,psize,pstride):
IF_gpu = torch.cuda.is_available() # if gpu, return gpu
IF_ret_gpu = (density_map.device.type == 'cuda')
psize,pstride = int(psize),int(pstride)
density_map = density_map.cpu().type(torch.float32)
conv_kernel = torch.ones(1,1,psize,psize,dtype = torch.float32)
if IF_gpu:
density_map,conv_kernel = density_map.cuda(),conv_kernel.cuda()
count_map = F.conv2d(density_map,conv_kernel,stride=pstride)
if not IF_ret_gpu:
count_map = count_map.cpu()
return count_map
# Func2: convert count to class (0->c-1)
def Count2Class(count_map,label_indice):
if isinstance(label_indice,np.ndarray):
label_indice = torch.from_numpy(label_indice)
IF_gpu = torch.cuda.is_available()
IF_ret_gpu = (count_map.device.type == 'cuda')
label_indice = label_indice.cpu().type(torch.float32)
cls_num = len(label_indice)+1
cls_map = torch.zeros(count_map.size()).type(torch.LongTensor)
if IF_gpu:
count_map,label_indice,cls_map = count_map.cuda(),label_indice.cuda(),cls_map.cuda()
for i in range(cls_num-1):
if IF_gpu:
cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor).cuda()
else:
cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor)
if not IF_ret_gpu:
cls_map = cls_map.cpu()
return cls_map
# Func3: convert class (0->c-1) to count number
def Class2Count(pre_cls,label_indice):
'''
# --Input:
# 1.pre_cls is class label range in [0,1,2,...,C-1]
# 2.label_indice not include 0 but the other points
# --Output:
# 1.count value, the same size as pre_cls
'''
if isinstance(label_indice,np.ndarray):
label_indice = torch.from_numpy(label_indice)
label_indice = label_indice.squeeze()
IF_gpu = torch.cuda.is_available()
IF_ret_gpu = (pre_cls.device.type == 'cuda')
# tranform interval to count value map
label2count = [0.0]
for (i,item) in enumerate(label_indice):
if i<label_indice.size()[0]-1:
tmp_count = (label_indice[i]+label_indice[i+1])/2
else:
tmp_count = label_indice[i]
label2count.append(tmp_count)
label2count = torch.tensor(label2count)
label2count = label2count.type(torch.FloatTensor)
#outputs = outputs.max(dim=1)[1].cpu().data
ORI_SIZE = pre_cls.size()
pre_cls = pre_cls.reshape(-1).cpu()
pre_counts = torch.index_select(label2count,0,pre_cls.cpu().type(torch.LongTensor))
pre_counts = pre_counts.reshape(ORI_SIZE)
if IF_ret_gpu:
pre_counts = pre_counts.cuda()
return pre_counts
if __name__ == '__main__':
pre_cls = torch.Tensor([[0,1,2],[3,4,4]])
label_indice =torch.Tensor([0.5,1,1.5,2])
pre_counts = Class2Count(pre_cls,label_indice)
print(pre_cls)
print(label_indice)
print(pre_counts)
pre_cls = Count2Class(pre_counts,label_indice)
print(pre_cls)
|
[
"torch.nn.functional.conv2d",
"torch.Tensor",
"torch.from_numpy",
"torch.tensor",
"torch.cuda.is_available",
"torch.ones"
] |
[((264, 289), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (287, 289), False, 'import torch\n'), ((486, 537), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'psize', 'psize'], {'dtype': 'torch.float32'}), '(1, 1, psize, psize, dtype=torch.float32)\n', (496, 537), False, 'import torch\n'), ((648, 698), 'torch.nn.functional.conv2d', 'F.conv2d', (['density_map', 'conv_kernel'], {'stride': 'pstride'}), '(density_map, conv_kernel, stride=pstride)\n', (656, 698), True, 'import torch.nn.functional as F\n'), ((989, 1014), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1012, 1014), False, 'import torch\n'), ((2159, 2184), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2182, 2184), False, 'import torch\n'), ((2575, 2600), 'torch.tensor', 'torch.tensor', (['label2count'], {}), '(label2count)\n', (2587, 2600), False, 'import torch\n'), ((3049, 3085), 'torch.Tensor', 'torch.Tensor', (['[[0, 1, 2], [3, 4, 4]]'], {}), '([[0, 1, 2], [3, 4, 4]])\n', (3061, 3085), False, 'import torch\n'), ((3100, 3130), 'torch.Tensor', 'torch.Tensor', (['[0.5, 1, 1.5, 2]'], {}), '([0.5, 1, 1.5, 2])\n', (3112, 3130), False, 'import torch\n'), ((943, 973), 'torch.from_numpy', 'torch.from_numpy', (['label_indice'], {}), '(label_indice)\n', (959, 973), False, 'import torch\n'), ((2071, 2101), 'torch.from_numpy', 'torch.from_numpy', (['label_indice'], {}), '(label_indice)\n', (2087, 2101), False, 'import torch\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
import testtools
import shade
from shade import _utils
from shade import exc
from shade.tests import fakes
from shade.tests.unit import base
RANGE_DATA = [
dict(id=1, key1=1, key2=5),
dict(id=2, key1=1, key2=20),
dict(id=3, key1=2, key2=10),
dict(id=4, key1=2, key2=30),
dict(id=5, key1=3, key2=40),
dict(id=6, key1=3, key2=40),
]
class TestShade(base.RequestsMockTestCase):
def setUp(self):
# This set of tests are not testing neutron, they're testing
# rebuilding servers, but we do several network calls in service
# of a NORMAL rebuild to find the default_network. Putting
# in all of the neutron mocks for that will make the tests harder
# to read. SO - we're going mock neutron into the off position
# and then turn it back on in the few tests that specifically do.
# Maybe we should reorg these into two classes - one with neutron
# mocked out - and one with it not mocked out
super(TestShade, self).setUp()
self.has_neutron = False
def fake_has_service(*args, **kwargs):
return self.has_neutron
self.cloud.has_service = fake_has_service
def test_openstack_cloud(self):
self.assertIsInstance(self.cloud, shade.OpenStackCloud)
@mock.patch.object(shade.OpenStackCloud, 'search_images')
def test_get_images(self, mock_search):
image1 = dict(id='123', name='mickey')
mock_search.return_value = [image1]
r = self.cloud.get_image('mickey')
self.assertIsNotNone(r)
self.assertDictEqual(image1, r)
@mock.patch.object(shade.OpenStackCloud, 'search_images')
def test_get_image_not_found(self, mock_search):
mock_search.return_value = []
r = self.cloud.get_image('doesNotExist')
self.assertIsNone(r)
def test_get_server(self):
server1 = fakes.make_fake_server('123', 'mickey')
server2 = fakes.make_fake_server('345', 'mouse')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [server1, server2]}),
])
r = self.cloud.get_server('mickey')
self.assertIsNotNone(r)
self.assertEqual(server1['name'], r['name'])
self.assert_calls()
def test_get_server_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': []}),
])
r = self.cloud.get_server('doesNotExist')
self.assertIsNone(r)
self.assert_calls()
def test_list_servers_exception(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
status_code=400)
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_servers)
self.assert_calls()
def test__neutron_exceptions_resource_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
status_code=404)
])
self.assertRaises(exc.OpenStackCloudResourceNotFound,
self.cloud.list_networks)
self.assert_calls()
def test__neutron_exceptions_url_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
status_code=404)
])
self.assertRaises(exc.OpenStackCloudURINotFound,
self.cloud.list_networks)
self.assert_calls()
def test_list_servers(self):
server_id = str(uuid.uuid4())
server_name = self.getUniqueString('name')
fake_server = fakes.make_fake_server(server_id, server_name)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
r = self.cloud.list_servers()
self.assertEqual(1, len(r))
self.assertEqual(server_name, r[0]['name'])
self.assert_calls()
def test_list_servers_all_projects(self):
'''This test verifies that when list_servers is called with
`all_projects=True` that it passes `all_tenants=True` to nova.'''
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['all_tenants=True']),
complete_qs=True,
json={'servers': []}),
])
self.cloud.list_servers(all_projects=True)
self.assert_calls()
def test_iterate_timeout_bad_wait(self):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"Wait value must be an int or float value."):
for count in _utils._iterate_timeout(
1, "test_iterate_timeout_bad_wait", wait="timeishard"):
pass
@mock.patch('time.sleep')
def test_iterate_timeout_str_wait(self, mock_sleep):
iter = _utils._iterate_timeout(
10, "test_iterate_timeout_str_wait", wait="1.6")
next(iter)
next(iter)
mock_sleep.assert_called_with(1.6)
@mock.patch('time.sleep')
def test_iterate_timeout_int_wait(self, mock_sleep):
iter = _utils._iterate_timeout(
10, "test_iterate_timeout_int_wait", wait=1)
next(iter)
next(iter)
mock_sleep.assert_called_with(1.0)
@mock.patch('time.sleep')
def test_iterate_timeout_timeout(self, mock_sleep):
message = "timeout test"
with testtools.ExpectedException(
exc.OpenStackCloudTimeout,
message):
for count in _utils._iterate_timeout(0.1, message, wait=1):
pass
mock_sleep.assert_called_with(1.0)
def test__nova_extensions(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
extensions = self.cloud._nova_extensions()
self.assertEqual(set(['NMN', 'OS-DCF']), extensions)
self.assert_calls()
def test__nova_extensions_fails(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
status_code=404),
])
with testtools.ExpectedException(
exc.OpenStackCloudURINotFound,
"Error fetching extension list for nova"
):
self.cloud._nova_extensions()
self.assert_calls()
def test__has_nova_extension(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
self.assertTrue(self.cloud._has_nova_extension('NMN'))
self.assert_calls()
def test__has_nova_extension_missing(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
self.assertFalse(self.cloud._has_nova_extension('invalid'))
self.assert_calls()
def test_range_search(self):
filters = {"key1": "min", "key2": "20"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(1, len(retval))
self.assertEqual([RANGE_DATA[1]], retval)
def test_range_search_2(self):
filters = {"key1": "<=2", "key2": ">10"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(2, len(retval))
self.assertEqual([RANGE_DATA[1], RANGE_DATA[3]], retval)
def test_range_search_3(self):
filters = {"key1": "2", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(0, len(retval))
def test_range_search_4(self):
filters = {"key1": "max", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(0, len(retval))
def test_range_search_5(self):
filters = {"key1": "min", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(1, len(retval))
self.assertEqual([RANGE_DATA[0]], retval)
|
[
"shade._utils._iterate_timeout",
"mock.patch",
"testtools.ExpectedException",
"shade.tests.fakes.make_fake_server",
"uuid.uuid4",
"mock.patch.object"
] |
[((1867, 1923), 'mock.patch.object', 'mock.patch.object', (['shade.OpenStackCloud', '"""search_images"""'], {}), "(shade.OpenStackCloud, 'search_images')\n", (1884, 1923), False, 'import mock\n'), ((2180, 2236), 'mock.patch.object', 'mock.patch.object', (['shade.OpenStackCloud', '"""search_images"""'], {}), "(shade.OpenStackCloud, 'search_images')\n", (2197, 2236), False, 'import mock\n'), ((6080, 6104), 'mock.patch', 'mock.patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (6090, 6104), False, 'import mock\n'), ((6350, 6374), 'mock.patch', 'mock.patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (6360, 6374), False, 'import mock\n'), ((6616, 6640), 'mock.patch', 'mock.patch', (['"""time.sleep"""'], {}), "('time.sleep')\n", (6626, 6640), False, 'import mock\n'), ((2456, 2495), 'shade.tests.fakes.make_fake_server', 'fakes.make_fake_server', (['"""123"""', '"""mickey"""'], {}), "('123', 'mickey')\n", (2478, 2495), False, 'from shade.tests import fakes\n'), ((2514, 2552), 'shade.tests.fakes.make_fake_server', 'fakes.make_fake_server', (['"""345"""', '"""mouse"""'], {}), "('345', 'mouse')\n", (2536, 2552), False, 'from shade.tests import fakes\n'), ((4709, 4755), 'shade.tests.fakes.make_fake_server', 'fakes.make_fake_server', (['server_id', 'server_name'], {}), '(server_id, server_name)\n', (4731, 4755), False, 'from shade.tests import fakes\n'), ((6177, 6249), 'shade._utils._iterate_timeout', '_utils._iterate_timeout', (['(10)', '"""test_iterate_timeout_str_wait"""'], {'wait': '"""1.6"""'}), "(10, 'test_iterate_timeout_str_wait', wait='1.6')\n", (6200, 6249), False, 'from shade import _utils\n'), ((6447, 6515), 'shade._utils._iterate_timeout', '_utils._iterate_timeout', (['(10)', '"""test_iterate_timeout_int_wait"""'], {'wait': '(1)'}), "(10, 'test_iterate_timeout_int_wait', wait=1)\n", (6470, 6515), False, 'from shade import _utils\n'), ((4622, 4634), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4632, 4634), False, 'import uuid\n'), ((5791, 5896), 'testtools.ExpectedException', 'testtools.ExpectedException', (['exc.OpenStackCloudException', '"""Wait value must be an int or float value."""'], {}), "(exc.OpenStackCloudException,\n 'Wait value must be an int or float value.')\n", (5818, 5896), False, 'import testtools\n'), ((5952, 6030), 'shade._utils._iterate_timeout', '_utils._iterate_timeout', (['(1)', '"""test_iterate_timeout_bad_wait"""'], {'wait': '"""timeishard"""'}), "(1, 'test_iterate_timeout_bad_wait', wait='timeishard')\n", (5975, 6030), False, 'from shade import _utils\n'), ((6743, 6806), 'testtools.ExpectedException', 'testtools.ExpectedException', (['exc.OpenStackCloudTimeout', 'message'], {}), '(exc.OpenStackCloudTimeout, message)\n', (6770, 6806), False, 'import testtools\n'), ((6866, 6911), 'shade._utils._iterate_timeout', '_utils._iterate_timeout', (['(0.1)', 'message'], {'wait': '(1)'}), '(0.1, message, wait=1)\n', (6889, 6911), False, 'from shade import _utils\n'), ((8304, 8408), 'testtools.ExpectedException', 'testtools.ExpectedException', (['exc.OpenStackCloudURINotFound', '"""Error fetching extension list for nova"""'], {}), "(exc.OpenStackCloudURINotFound,\n 'Error fetching extension list for nova')\n", (8331, 8408), False, 'import testtools\n')]
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from warnings import catch_warnings, simplefilter
from pandas import Panel
from pandas.util.testing import assert_panel_equal
from .test_generic import Generic
class TestPanel(Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True)
# run all the tests, but wrap each in a warning catcher
for t in ['test_rename', 'test_get_numeric_data',
'test_get_default', 'test_nonzero',
'test_downcast', 'test_constructor_compound_dtypes',
'test_head_tail',
'test_size_compat', 'test_split_compat',
'test_unexpected_keyword',
'test_stat_unexpected_keyword', 'test_api_compat',
'test_stat_non_defaults_args',
'test_truncate_out_of_bounds',
'test_metadata_propagation', 'test_copy_and_deepcopy',
'test_pct_change', 'test_sample']:
def f():
def tester(self):
f = getattr(super(TestPanel, self), t)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
f()
return tester
setattr(TestPanel, t, f())
|
[
"warnings.simplefilter",
"pandas.util.testing.assert_panel_equal",
"warnings.catch_warnings"
] |
[((303, 343), 'pandas.util.testing.assert_panel_equal', 'assert_panel_equal', (['x', 'y'], {'by_blocks': '(True)'}), '(x, y, by_blocks=True)\n', (321, 343), False, 'from pandas.util.testing import assert_panel_equal\n'), ((1038, 1065), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1052, 1065), False, 'from warnings import catch_warnings, simplefilter\n'), ((1083, 1120), 'warnings.simplefilter', 'simplefilter', (['"""ignore"""', 'FutureWarning'], {}), "('ignore', FutureWarning)\n", (1095, 1120), False, 'from warnings import catch_warnings, simplefilter\n')]
|
# coding: utf-8
"""
paginate.py
```````````
: 分页api
"""
from flask import url_for
def pagination(lit, page, perpage,endpoint):
"""
返回当前分页的列表对象,
next、last链接
{current: next_lit}
"""
_yu = len(lit) % perpage
_chu = len(lit) // perpage
if _yu == 0:
last = _chu
else:
last = _chu + 1
current = lit[perpage*(page-1): perpage*page]
next_page = ""
if page < last:
next_page = url_for(endpoint, page=page+1)
elif page == last:
next_page = ""
last_page = url_for(endpoint, page=last)
return [current, (next_page, last_page)]
|
[
"flask.url_for"
] |
[((547, 575), 'flask.url_for', 'url_for', (['endpoint'], {'page': 'last'}), '(endpoint, page=last)\n', (554, 575), False, 'from flask import url_for\n'), ((454, 486), 'flask.url_for', 'url_for', (['endpoint'], {'page': '(page + 1)'}), '(endpoint, page=page + 1)\n', (461, 486), False, 'from flask import url_for\n')]
|
import a as b
import b.c as e
b.foo(1)
e.baz(1)
|
[
"a.foo",
"b.c.baz"
] |
[((31, 39), 'a.foo', 'b.foo', (['(1)'], {}), '(1)\n', (36, 39), True, 'import a as b\n'), ((40, 48), 'b.c.baz', 'e.baz', (['(1)'], {}), '(1)\n', (45, 48), True, 'import b.c as e\n')]
|
'''
:class:`eulxml.xmlmap.XmlObject` classes for working with ABBYY
FineReadux OCR XML.
Currently supports **FineReader6-schema-v1** and
**FineReader8-schema-v2**.
----
'''
from eulxml import xmlmap
class Base(xmlmap.XmlObject):
'''Base :class:`eulxml.xmlmap.XmlObject` for ABBYY OCR XML with
common namespace declarations.
'''
ROOT_NAMESPACES = {
'fr6v1': 'http://www.abbyy.com/FineReader_xml/FineReader6-schema-v1.xml',
'fr8v2': 'http://www.abbyy.com/FineReader_xml/FineReader8-schema-v2.xml'
}
'namespaces for supported versions of FineReader xml'
id = xmlmap.StringField('@xml:id')
def frns(xpath):
'''Utility function to convert a simple xpath to match any of the
configured versions of ABBYY FineReader XML namespaces. Example
conversions:
* ``page`` becomes ``f1:page|f2:page``
* ``text/par`` becomes ``f1:page/f1:text|f2:page/f2:text``
Uses all declared namespace prefixes from
:attr:`Base.ROOT_NAMESPACES`
'''
namespaces = Base.ROOT_NAMESPACES.keys()
return '|'.join('/'.join('%s:%s' % (ns, el) for el in xpath.split('/'))
for ns in namespaces)
class Formatting(Base):
'''A group of characters in a single :class:`Line` with uniform
formatting.'''
ROOT_NAME = 'formatting'
language = xmlmap.StringField('@lang')
'language of this formatted section'
text = xmlmap.StringField('text()')
'text value'
# char params ?
# boolean attributes for: ff, fs, bold, italic, subscript, superscript,
# smallcaps, underline, strikeout, color, scaling, spacing
class Line(Base):
'''A single line of text in a :class:`Paragraph`.'''
ROOT_NAME = 'line'
baseline = xmlmap.IntegerField('@baseline')
'integer baseline'
left = xmlmap.IntegerField('@l')
'integer left'
top = xmlmap.IntegerField('@t')
'integer top'
right = xmlmap.IntegerField('@r')
'integer right'
bottom = xmlmap.IntegerField('@b')
'integer bottom'
formatted_text = xmlmap.NodeListField(frns('formatting'),
Formatting)
'list of :class:`Formatting` elements'
class Paragraph(Base):
'''A single paragraph of text somewhere in a :class:`Document`.'''
ROOT_NAME = 'par'
align = xmlmap.StringField('@align') # default is Left; Center, Right, Justified
'text alignment (Left, Center, Right, Justified)'
left_indent = xmlmap.IntegerField('@leftIndent')
'integer left indent'
right_indent = xmlmap.IntegerField('@rightIndent')
'integer right indent'
start_indent = xmlmap.IntegerField('@startIndent')
'integer start indent'
line_spacing = xmlmap.IntegerField('@lineSpacing')
'integer line spacing'
# dropChars stuff ?
lines = xmlmap.NodeListField(frns('line'), Line)
'list of :class:`Line` elements'
class Block(Base):
ROOT_NAME = 'page'
'''A single block of content on a :class:`Page`.'''
type = xmlmap.StringField('@blockType') # Text, Table, Picture, Barcode
'type of block (Text, Table, Picture, Barcode)'
left = xmlmap.IntegerField('@l')
'integer left'
top = xmlmap.IntegerField('@t')
'integer top'
right = xmlmap.IntegerField('@r')
'integer right'
bottom = xmlmap.IntegerField('@b')
'integer bottom'
# must have one & only one region;
# region/rect dimensions appears to be redundant...
paragraphs = xmlmap.NodeListField(frns('text/par'), Paragraph)
'list of :class:`Paragraph` elements'
class Page(Base):
'''A single page of a :class:`Document`.'''
ROOT_NAME = 'page'
width = xmlmap.IntegerField('@width')
'integer width'
height = xmlmap.IntegerField('@height')
'integer height'
resolution = xmlmap.IntegerField('@resolution')
'integer resolution'
blocks = xmlmap.NodeListField(frns('block'), Block)
'list of :class:`Block` elements in this page'
text_blocks = xmlmap.NodeListField(frns('block[@blockType="Text"]'),
Block)
'text :class:`Block` elements (where type is "Text")'
picture_blocks = xmlmap.NodeListField(frns('block[@blockType="Picture"]'),
Block)
'picture :class:`Block` elements (where type is "Picture")'
# block position info possibly redundant? map paragraphs directly
paragraphs = xmlmap.NodeListField(frns('block/text/par'),
Paragraph)
'list of :class:`Paragraph` elements in any of the blocks on this page'
class Document(Base):
''':class:`~eulxml.xmlmap.XmlObject` class for an ABBYY
OCR XML Document.
.. Note::
Currently there is no support for tabular formatting elements.
'''
ROOT_NAME ='document'
pages = xmlmap.NodeListField(frns('page'), Page)
'pages as :class:`Page`'
page_count = xmlmap.IntegerField('@pagesCount')
'integer page_count (document ``@pagesCount``)'
language = xmlmap.StringField('@mainLanguage')
'main language of the document'
languages = xmlmap.StringField('@languages')
'all languages included in the document'
|
[
"eulxml.xmlmap.IntegerField",
"eulxml.xmlmap.StringField"
] |
[((604, 633), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""@xml:id"""'], {}), "('@xml:id')\n", (622, 633), False, 'from eulxml import xmlmap\n'), ((1320, 1347), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""@lang"""'], {}), "('@lang')\n", (1338, 1347), False, 'from eulxml import xmlmap\n'), ((1400, 1428), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""text()"""'], {}), "('text()')\n", (1418, 1428), False, 'from eulxml import xmlmap\n'), ((1719, 1751), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@baseline"""'], {}), "('@baseline')\n", (1738, 1751), False, 'from eulxml import xmlmap\n'), ((1786, 1811), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@l"""'], {}), "('@l')\n", (1805, 1811), False, 'from eulxml import xmlmap\n'), ((1841, 1866), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@t"""'], {}), "('@t')\n", (1860, 1866), False, 'from eulxml import xmlmap\n'), ((1897, 1922), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@r"""'], {}), "('@r')\n", (1916, 1922), False, 'from eulxml import xmlmap\n'), ((1956, 1981), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@b"""'], {}), "('@b')\n", (1975, 1981), False, 'from eulxml import xmlmap\n'), ((2291, 2319), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""@align"""'], {}), "('@align')\n", (2309, 2319), False, 'from eulxml import xmlmap\n'), ((2436, 2470), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@leftIndent"""'], {}), "('@leftIndent')\n", (2455, 2470), False, 'from eulxml import xmlmap\n'), ((2516, 2551), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@rightIndent"""'], {}), "('@rightIndent')\n", (2535, 2551), False, 'from eulxml import xmlmap\n'), ((2598, 2633), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@startIndent"""'], {}), "('@startIndent')\n", (2617, 2633), False, 'from eulxml import xmlmap\n'), ((2680, 2715), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@lineSpacing"""'], {}), "('@lineSpacing')\n", (2699, 2715), False, 'from eulxml import xmlmap\n'), ((2967, 2999), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""@blockType"""'], {}), "('@blockType')\n", (2985, 2999), False, 'from eulxml import xmlmap\n'), ((3095, 3120), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@l"""'], {}), "('@l')\n", (3114, 3120), False, 'from eulxml import xmlmap\n'), ((3150, 3175), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@t"""'], {}), "('@t')\n", (3169, 3175), False, 'from eulxml import xmlmap\n'), ((3206, 3231), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@r"""'], {}), "('@r')\n", (3225, 3231), False, 'from eulxml import xmlmap\n'), ((3265, 3290), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@b"""'], {}), "('@b')\n", (3284, 3290), False, 'from eulxml import xmlmap\n'), ((3618, 3647), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@width"""'], {}), "('@width')\n", (3637, 3647), False, 'from eulxml import xmlmap\n'), ((3681, 3711), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@height"""'], {}), "('@height')\n", (3700, 3711), False, 'from eulxml import xmlmap\n'), ((3750, 3784), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@resolution"""'], {}), "('@resolution')\n", (3769, 3784), False, 'from eulxml import xmlmap\n'), ((4865, 4899), 'eulxml.xmlmap.IntegerField', 'xmlmap.IntegerField', (['"""@pagesCount"""'], {}), "('@pagesCount')\n", (4884, 4899), False, 'from eulxml import xmlmap\n'), ((4967, 5002), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""@mainLanguage"""'], {}), "('@mainLanguage')\n", (4985, 5002), False, 'from eulxml import xmlmap\n'), ((5055, 5087), 'eulxml.xmlmap.StringField', 'xmlmap.StringField', (['"""@languages"""'], {}), "('@languages')\n", (5073, 5087), False, 'from eulxml import xmlmap\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.