blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dca35377643639e9b92acceb9f5f5e1edf8a6e29 | bb08d3f19acf138da831694482e30286fa6d7735 | /0x01-python-if_else_loops_functions/5-print_comb2.py | 95db922209b068bbad35734f13627ad3c8ebc4b6 | [] | no_license | marianellamonroyortizhb/holbertonschool-higher_level_programming | 954f1473953bbda61e8a246fccd84087f381fbb0 | a9829e231bdfed426e70289d78b271b6c3ca5405 | refs/heads/main | 2023-07-17T18:52:38.329313 | 2021-08-23T03:08:01 | 2021-08-23T03:08:01 | 291,706,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | #!/usr/bin/python3
for num in range(0, 99):
print("{:02d}".format(num), end=", ")
print("99")
| [
"1880@holbertonschool.com"
] | 1880@holbertonschool.com |
2d146d6143cc226689e3331b859cee83e50b585e | fcbd988fe9e902a6d41bee33c55a1861d7f9c4ac | /newswala/urls.py | aa97d07935aa748209a495db3c478191a6d46a97 | [] | no_license | viratvasu/news-paper-automation-system | 50711787033a6ebe7f3d8a6c005f0d5c254f56a3 | e26be7739614650e59301af420ed2afa1af6075c | refs/heads/master | 2023-06-07T18:12:04.763377 | 2021-06-25T06:21:32 | 2021-06-25T06:21:32 | 352,373,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | """newswala URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include,re_path
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
# from .views import renderHomepage
from django.views.generic import TemplateView
urlpatterns = [
# path('',renderHomepage),
path('admin/', admin.site.urls),
path('api/token/', TokenObtainPairView.as_view()),
path('accounts/',include('accounts.urls')),
path('customer/',include('customer.urls')),
path('paperboy/',include('paperboy.urls')),
path('manager/',include('manager.urls')),
path('accountant/',include('accountant.urls')),
path('adminapp/',include('adminapp.urls'))
]
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
urlpatterns += [
re_path(r'',TemplateView.as_view(template_name = 'index.html')),
]
| [
"vasuvirat492@gmail.com"
] | vasuvirat492@gmail.com |
439ef1f5c8760b07e9d17c9232ee3b364806fc6a | e28bf3e233da1ff4a9b8556699bcfaa026a11211 | /samples/socialAggregator/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/config.gypi | 3201cd72a22fdef91e8338031caaf5941accd2ca | [
"MIT",
"Apache-2.0"
] | permissive | GauravDelphinus/delphinus | af9c5002e0a274b1425045fa64a0461d8f0e8cf4 | e311ca7643d3fcf127289406dc89007d11b4e429 | refs/heads/master | 2023-03-08T09:28:05.892467 | 2021-01-01T14:03:23 | 2021-01-01T14:03:23 | 83,102,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,424 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/jon/.node-gyp/0.10.26",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/jon/Documents/Pluralsight/Courses/OAuth/module4/code/socialAggregator/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/Release/bson.node",
"module_name": "bson",
"module_path": "/Users/jon/Documents/Pluralsight/Courses/OAuth/module4/code/socialAggregator/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/Release",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/jon/npm/etc/npmignore",
"shell": "/bin/bash",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"email": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "null",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"npat": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/Users/jon/npm/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/jon/.npmrc",
"init_module": "/Users/jon/.npm-init.js",
"user": "501",
"node_version": "v0.10.26",
"editor": "vi",
"save": "true",
"tag": "latest",
"global": "",
"username": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "null",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"ca": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/jon/.npm",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.10.26 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/pq/rx_9877x50v5zlmwnj5plvr40000gn/T/",
"onload_script": "",
"link": "",
"prefix": "/Users/jon/npm"
}
}
| [
"gauravjain.nsit@gmail.com"
] | gauravjain.nsit@gmail.com |
cf37aac9d227dfbd4c7430df7abe6facb7d78387 | 9bb01fa882e713aa59345051fec07f4e3d3478b0 | /tests/cysparse_/sparse/memory/test_copy_CSCSparseMatrix_INT32_t_COMPLEX64_t.py | 647b1079524c4d905c0e53d370b23d6cd9d3eca0 | [] | no_license | syarra/cysparse | f1169c496b54d61761fdecbde716328fd0fb131b | 7654f7267ab139d0564d3aa3b21c75b364bcfe72 | refs/heads/master | 2020-05-25T16:15:38.160443 | 2017-03-14T21:17:39 | 2017-03-14T21:17:39 | 84,944,993 | 0 | 0 | null | 2017-03-14T12:11:48 | 2017-03-14T12:11:48 | null | UTF-8 | Python | false | false | 4,646 | py | #!/usr/bin/env python
"""
This file tests ``copy()`` for all sparse-likes objects.
"""
import unittest
from cysparse.sparse.ll_mat import *
from cysparse.common_types.cysparse_types import *
########################################################################################################################
# Tests
########################################################################################################################
#######################################################################
# Case: store_symmetry == False, Store_zero==False
#######################################################################
class CySparseCopyNoSymmetryNoZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=COMPLEX64_T, itype=INT32_T)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.nrow):
for j in range(self.ncol):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == True, Store_zero==False
#######################################################################
class CySparseCopyWithSymmetryNoZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 10
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=COMPLEX64_T, itype=INT32_T, store_symmetry=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.size):
for j in range(self.size):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == False, Store_zero==True
#######################################################################
class CySparseCopyNoSymmetrySWithZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=COMPLEX64_T, itype=INT32_T, store_zero=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.nrow):
for j in range(self.ncol):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == True, Store_zero==True
#######################################################################
class CySparseCopyWithSymmetrySWithZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 10
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=COMPLEX64_T, itype=INT32_T, store_symmetry=True, store_zero=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.size):
for j in range(self.size):
self.assertTrue(self.C[i, j] == C_copy[i, j])
if __name__ == '__main__':
unittest.main()
| [
"sylvain.arreckx@gmail.com"
] | sylvain.arreckx@gmail.com |
86dce18c7b5d76d01f32df22306412f7ca2feb73 | d7d19d6918029de88bcf060cea23d5b4a1f7efb1 | /xiab/apps/subjects/models.py | 85c54cc05e21150cfe80e2ddb9d412d7c622452e | [] | no_license | petercollingridge/xiab | 8abe2b2b7124eeb0cfa06d2f21ce858a4ffbd975 | ae84d3d228f3fe9392d0fd894652e290b219b1d2 | refs/heads/master | 2020-03-26T04:25:28.163381 | 2019-09-29T16:20:25 | 2019-09-29T16:20:25 | 144,503,055 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
class SubjectPage(Page):
summary = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('summary'),
]
def get_context(self, request):
# Update context to include only published posts
context = super().get_context(request)
context['children'] = self.get_children().live()
return context
| [
"peter.collingridge@gmail.com"
] | peter.collingridge@gmail.com |
79853311cf8854f9270c9533d95f1037ad709899 | b5d57d0793056ca112547358466ca8934542e686 | /code/trash/roadm.py | 53830df821cbfe9ef886408a81cfcfb847d2d375 | [] | no_license | sourav295/research_2016 | 021ba6b6e057326fe02074741def9f8bbc8d7245 | ecee1fc6ca41e3630a4228a36376df7b536f68fa | refs/heads/master | 2020-05-21T19:14:22.118585 | 2016-10-28T04:07:11 | 2016-10-28T04:07:11 | 62,491,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,726 | py | import graph
from configure import GlobalConfiguration
import simpy
class Optical_Link(object):
"""
Links 2 roadms - used by spring xml for initializing the architecture
this has no significanace on the network simulation, but important for the set up
"""
def __init__(self, frm, to):
self.frm = frm
self.to = to
def connect(self):
self.frm.connect(self.to)
class Optical_Signal(object):
"""object to traverse over the optical fiber"""
def __init__(self, pck, lambd):
self.pck = pck
self.lambd = lambd
def get_lambda(self):
return self.lambd
def get_pck(self):
return self.pck
class Lambda(object):
def __init__(self, category):
self.category = category
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.category == other.category)
def __repr__(self):
return "Lambd {}".format(self.category)
class Lambda_Factory(object):
N_lambdas = GlobalConfiguration.N_Channels
all_lambdas = []
for category in range(N_lambdas):
all_lambdas.append(Lambda(category))
@staticmethod
def generate_lambda(unavailable_lambdas):
allowed_lambdas = [l for l in Lambda_Factory.all_lambdas if not l in unavailable_lambdas]
if allowed_lambdas:#not an empty list
return allowed_lambdas[0]
else:
raise ValueError('Not enough lambdas provisioned by the system')
class Optical_Fibre(object):
def __init__(self):
self.out = None
self.lambda_to_channel_map = {}
self.delay = 0
'''Remove Hardcode'''
for l in Lambda_Factory.all_lambdas:
self.lambda_to_channel_map[l] = Channel(GlobalConfiguration.simpyEnv, l, self.delay)
def put(self, value):
#simulate propagation over channel
sig = value
lmd = sig.get_lambda()
chn = self.lambda_to_channel_map[lmd]
#make sure channel leads to the "out" end of the fiber
chn.out = self.out
chn.put(sig)
class Channel(object):
"""This class represents the propagation on a particular lambda"""
def __init__(self, env, lambd, delay):
self.env = env
self.delay = delay
self.store = simpy.Store(env)
self.lambd = lambd
self.out = None
def latency(self, value):
temp_time = self.env.now
yield self.env.timeout(self.delay)
propagation_delay = self.env.now - temp_time
value.get_pck().log("<<Progation Delay over optical channel: {} lambda category: {}>>".format(propagation_delay, value.get_lambda().category))
self.out.put(value)
def put(self, value):
self.env.process(self.latency(value))
class Roadm(object):
def __init__(self,hostname, n_of_degrees, is_border = False):
self.n_of_degrees = int(n_of_degrees)
self.degrees = []
self.LFIB = {}
self.id = hostname
self.is_border = is_border
#init all degrees on this roadm
for id in range(self.n_of_degrees):
new_degree_id = "{}_{}".format(str(self.id), str(id))
new_degree = Degree(new_degree_id, is_border = self.is_border, LFIB = self.LFIB)
#wire to other degrees
for other_degree in self.degrees:
new_degree.connect(other_degree)
self.degrees.append(new_degree)
'''SETUP'''
def connect(self, other_roadm):
if other_roadm == self:
return
#roadm degrees not yet connected to anything
avble_degree_on_self = self.get_an_unconnected_degree()
avble_degree_on_other = other_roadm.get_an_unconnected_degree()
if ( not avble_degree_on_self ) or ( not avble_degree_on_other ):
raise ValueError('Not enough degrees on RoadM on {} or {}'.format(self, other_roadm))
else:
of1 = Optical_Fibre()
of2 = Optical_Fibre()
#1 : connect self (out) to fiber and then fiber to other roadm port (in)
avble_degree_on_self.out_port.out = of1
of1.out = avble_degree_on_other.in_port
#2 : connect other roadm port (out) to fiber and then fiber self roadm port (in)
avble_degree_on_other.out_port.out = of2
of2.out = avble_degree_on_self.in_port
def get_an_unconnected_degree(self):
#while connecting roadms, we want a degree which has not been connected yet
available_degress = [ d for d in self.degrees if d.out_port.out == None]
return available_degress[0] if available_degress else None
def find_degree_to_reach_next_Roadm(self, other_roadm):
#given another roadm find a plausable degree on my self to reach the target
for self_degree in self.degrees:
try:
self_out_port = self_degree.out_port
connected_fiber = self_out_port.out
nxt_port = connected_fiber.out # port on other roadm
other_degrees = [other_degree for other_degree in other_roadm.degrees if nxt_port == other_degree.in_port]
if len(other_degrees) > 0:
#plausdible to reach the other_road via this self_degree / connected_fiber
return self_degree #self degree signifies out port, other degree signifies the in port
except AttributeError:
print "out doesnt exist"
raise ValueError('No direct link to the next roadm')
'''Distribute switching info'''
def register_FEC_to_LFIB(self, fec, out_port, lambd):
#FEC = destination roadM
self.LFIB[fec] = (out_port, lambd)
def distribute_labels(self, fec, path):
out_degrees_in_path = []
for i in range(len(path)-1):#exclude the last path element (roadm)
this_roadm = path[i]
next_roadm = path[i+1]
out_degrees_in_path.append( this_roadm.find_degree_to_reach_next_Roadm( next_roadm ) )
in_degrees_in_path = []
for roadm in path:#exclude last roadm
in_degrees_in_path.extend( [in_d for in_d in roadm.degrees if in_d not in out_degrees_in_path] )
#Find available resource
unavailable_resources_on_path = []#[uavail_res for uavail_res in degree.out_port.resources_reserved for degree in out_degrees_in_path]
for degree in out_degrees_in_path:
unavailable_resources_on_path.extend( degree.out_port.resources_reserved )
for degree in in_degrees_in_path:
unavailable_resources_on_path.extend( degree.in_port.resources_reserved )
available_resource = Lambda_Factory.generate_lambda(unavailable_resources_on_path)
#Reserve resource on each out_port on this path and create entry in wss
for degree in out_degrees_in_path:
degree.wss.set_lambda_to_select(available_resource)
degree.out_port.reserve_resource(available_resource)
for degree in in_degrees_in_path:
degree.in_port.reserve_resource(available_resource)
#Register to LFIB on this roadm
target_out_port_on_self = out_degrees_in_path[0].out_port #out port on roadm this signal is supposed to go through
self.register_FEC_to_LFIB(fec, target_out_port_on_self, available_resource)
@staticmethod
def get_border_roadms(all_roadms):
border_roadms = [roadm for roadm in all_roadms if roadm.is_border == True]
return border_roadms if border_roadms else None
@staticmethod
def expel_inner_roadms_from_path(explicit_path):
#if first condition is false, then only it evals the second condition assuming it is a roadm (short circuiting)
return [net_comp for net_comp in explicit_path if not isinstance(net_comp,Roadm) or net_comp.is_border == True]
def __repr__(self):
return "{}".format(self.id)
#construct map {incoming port, incoming lambda} -> {outgoing port, outport lambda}
class Degree(object):
def __init__(self, id, is_border = False, LFIB = None):
self.id = id
self.LFIB = LFIB
self.in_port = Roadm_Port("{}_in".format(self.id), LFIB = self.LFIB)
self.out_port = Roadm_Port("{}_out".format(self.id))
self.splitter = Splitter()
self.wss = WSS()
self.in_port.out = self.splitter
self.wss.out = self.out_port
self.add_drop_module = Add_Drop_Module()
self.splitter.out.append(self.add_drop_module)
def connect(self, other_degree):
if other_degree == self:
return
if not other_degree.wss in self.splitter.out:
self.splitter.out.append(other_degree.wss)
if not self.wss in other_degree.splitter.out:
other_degree.splitter.out.append(self.wss)
def mark_as_interfacing_outside_network(self):
self.in_port.has_to_add_label = True
self.out_port.has_to_remove_label = True
self.wss.disabled = True
def __repr__(self):
return "(Degree){}".format(self.id)
class Add_Drop_Module(object):
def __init__(self):
out = None
def put(self, value):
todo = "NOT COMPETTED"
class Roadm_Port(object):
def __init__(self, id, LFIB = None):
#self.env = env
self.id = id
self.out = None
self.LFIB = LFIB #LFIB[fec] --to--> (out_port, lambd) MAP
self.resources_reserved = []
#if it is a border roadm
self.has_to_add_label = False
self.has_to_remove_label = False
def put(self, value, destination_roadm = None):
#destination_roadm (fec) compulsary if label is to be added
if self.has_to_add_label:#convert to optical signal, ingress port
pck = value
fec = pck.next_hop()
pck.log("--~~--")
pck.log("Packet arrived at Optical Network Ingress: {}".format(self))
out_port, lambd = self.LFIB[fec]#query LFIB
pck.log("Lambda Category Assigned: {} For FEC: {}".format(lambd.category, fec))
pck.log("--~~--")
pck.increment_explicit_path_pointer()
self.out.put( Optical_Signal(pck, lambd) )
elif self.has_to_remove_label:#convert from optical to something else, egress port
sig = value
pck = sig.get_pck()
pck.increment_explicit_path_pointer()
pck.log("--~~--")
pck.log("Packet arrived at Optical Network Egress: {}, Lambda removed".format(self))
pck.log("--~~--")
self.out.put(pck)
else:#intermediatary roadms
self.out.put(value)
def reserve_resource(self, resource):
#just to keep track
if not resource in self.resources_reserved:
self.resources_reserved.append(resource)
return True
else:
return False
def get_reserved_resources(self):
return self.resources_reserved
def __repr__(self):
return "(Roadm_port){}".format(self.id)
class WSS(object): #wavelength selective switch
def __init__(self):
self.out = None
self.lambda_to_select = []
self.disabled = False
def put(self, value):
if not self.disabled:
if value.get_lambda() in self.lambda_to_select:
self.out.put(value)
else:
todo = "WSS dropped packet"
else:
self.out.put(value)
def set_lambda_to_select(self, resource):
#just to keep track
if not resource in self.lambda_to_select:
self.lambda_to_select.append(resource)
return True
else:
return False
def get_lambda_to_select(self):
return self.lambda_to_select
class Splitter(object):
def __init__(self):
#self.env = env
self.out = []
def put(self, value):
for out in self.out:
out.put(value)
| [
"sourav295@gmail.com"
] | sourav295@gmail.com |
b463de8542116cdad5bb3450d74020633c97ac4e | 2f7c215e59345d222bdf62fc4b46a3634b010331 | /server.py | 2a9bb778290f23c27fd63eba2cd865a944231c16 | [] | no_license | cccpast/TerminalChat | dc173cec1653c69a4c2147db2c1d213c416b9b58 | 5019f930a310a9edbb9dbc4271b821fe3cea1fe5 | refs/heads/master | 2021-05-16T11:24:50.745304 | 2017-09-27T10:13:53 | 2017-09-27T10:13:53 | 105,003,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | # -*- coding:utf-8 -*-
import socket
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
s.bind(("localhost", 3001))
s.listen(1)
sock, addr = s.accept()
print("connected by" + str(addr))
while (1):
send_data = input("server > ")
sock.send(bytes(send_data, 'utf-8'))
receive_data = sock.recv(1024)
receive_data = str(receive_data.decode('utf-8'))
print("client > ", receive_data)
# サーバサイドからは切断できない
if receive_data == "q":
sock.close()
break
if __name__ == "__main__":
main()
| [
"coltpex13fix@gmail.com"
] | coltpex13fix@gmail.com |
fbbc336f4814604c71fdc425ac88f6046747b23c | 348a2e0d3170a8a7ce27a4a15275dfc26eaefc6c | /AddTwoNumbers.py | bb6eefeb56f56d6e1ba14b8477a6fbd82eafddcd | [
"MIT"
] | permissive | zcipod/leetcode | ae5e043374661a2e45ff4612e6b1f6cd8d92fffe | cea4f51c7e7616b4e0c56e2af0e7169c1b768787 | refs/heads/master | 2022-11-25T12:08:56.451249 | 2020-08-09T09:53:32 | 2020-08-09T09:53:32 | 266,452,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | # You are given two non-empty linked lists representing two non-negative integer
# s. The digits are stored in reverse order and each of their nodes contain a sing
# le digit. Add the two numbers and return it as a linked list.
#
# You may assume the two numbers do not contain any leading zero, except the nu
# mber 0 itself.
#
# Example:
#
#
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 0 -> 8
# Explanation: 342 + 465 = 807.
#
# Related Topics Linked List Math
# 👍 8458 👎 2145
from typing import List
import decorator_time
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
root = ListNode(0)
res = root
add_one = 0
while l1 and l2:
temp_sum = l1.val + l2.val + add_one
if temp_sum >= 10:
add_one = 1
temp_sum -= 10
else:
add_one = 0
res.next = ListNode(temp_sum)
res = res.next
l1 = l1.next
l2 = l2.next
if l1:
res.next = l1
else:
res.next = l2
while add_one == 1:
if res.next:
if res.next.val == 9:
res.next.val = 0
res = res.next
else:
res.next.val += 1
add_one = 0
else:
res.next = ListNode(1)
add_one = 0
return root.next
# leetcode submit region end(Prohibit modification and deletion)
| [
"zcipod@163.com"
] | zcipod@163.com |
6c135183da24b761c602e57c6225f91f588637ad | 883c90d0f0a96093a89a74aa868c4c569e6f9eb5 | /newfile.py | 3407c4d6c464c3f9cf113294d10cad37e219bdf6 | [] | no_license | xrarayin27/python_wrkpls | 121bee7c55ffd5bdc52f6887ad5e872dba71bd83 | 8aae38e9d5273043813fcf3ad37d6b3a5f7385ff | refs/heads/master | 2022-12-10T18:13:11.661810 | 2020-09-04T17:37:20 | 2020-09-04T17:37:20 | 292,908,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | print("Robitaet?")
danet=input()
if (danet=="da"):
print("Otlichno!")
input()
if (danet=="net"):
print("AAAA BL!")
input() | [
"noreply@github.com"
] | xrarayin27.noreply@github.com |
4e554d1fb9a88ed2d04b9397feb311493507f223 | 289da5146b8991942ba22eefe948289ee024d3ff | /sheng/tutorial/L3函数/8 global.py | 380ea400f5deb82c17c96c689facbc7d471efff3 | [] | no_license | a1424186319/tutorial | 263585961ab40e7a9a55405263d80057a88298d4 | 909bfc9f850118af7892a7ba4b0f7e3d0798db8a | refs/heads/master | 2022-12-09T01:05:36.063099 | 2019-02-18T12:12:52 | 2019-02-18T12:12:52 | 166,967,437 | 0 | 0 | null | 2021-06-01T23:20:20 | 2019-01-22T09:38:51 | Python | UTF-8 | Python | false | false | 500 | py | #
#(老写法 a是全局变量) 从1 加到 100的和
# a = 0
# for i in range(1,101):
# a = a + i
# print(a)
## global(全局) 显示声明变量为全局变量
# total = 0
# def add1(n):
# global total
# total = total + 1
# add1()
# add1()
# add1()
# print(total)
## nonlocal(局部的)https://www.cnblogs.com/saintdingspage/p/7788958.html
def outer():
num = 10
def inner():
nonlocal num
num = 100
print(num)
inner()
print(num)
outer() | [
"1424186319@qq.com"
] | 1424186319@qq.com |
162bfe0339e4d87b47dccec2ef3d7d4ba3519a52 | 640a5ce57b169a2f56fbbc307208ac9749871329 | /node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi | 74315c88faa1b40f9834631fd1e9c5f82f617a8c | [
"MIT",
"Apache-2.0"
] | permissive | kennychong89/codereview_prototype_one | 9ba9957b0362d43e3d3f2e521ce90d26c8c097ec | a175c3c123dabbf65142c1169298b1612fa4feae | refs/heads/master | 2016-08-04T14:51:44.920514 | 2014-12-01T21:06:35 | 2014-12-01T21:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 48,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/kenny2/.node-gyp/0.10.32",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.28 node/v0.10.32 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/kenny2/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/kenny2/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "0.10.32",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/kenny2/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}
| [
"kennychong89@gmail.com"
] | kennychong89@gmail.com |
ad272e6634a3e14b8363b384efb4abde77a7c68c | 13ebcbfcc2e57240298bc5f35773b1ec7996007c | /bin/cortex | 84f30ff454b0c27fc87e58418edc26c3ec005915 | [
"MIT"
] | permissive | miguelmota/dotfiles | ebec8dedcfc1d26397fdb110d3f1b9c4641f8258 | 000ce1ecfb1ede44b44c36a1c8923fa3f5497ac7 | refs/heads/master | 2023-01-13T02:19:36.444703 | 2022-12-27T05:20:09 | 2022-12-27T05:20:09 | 18,570,788 | 22 | 6 | null | null | null | null | UTF-8 | Python | false | false | 27,681 | #!/usr/bin/env python3
import os
import sys
import curses
import threading
import time
import webbrowser
from configparser import ConfigParser
from urllib.request import Request, urlopen
from urllib.parse import quote
from threading import Timer
try:
from subprocess import call, DEVNULL
except ImportError:
DEVNULL = open(os.devnull, "wb")
# Get a json library
try:
import json
except ImportError:
# Fall back to simplejson if no stdlib json is available
import simplejson as json
# Get a pickle library
try:
import cPickle as pickle
except ImportError:
import pickle
# {{{ Constants
APPLICATION_NAME = "Cortex"
APPLICATION_VERSION = "0.5.1"
APPLICATION_URI = "http://cortex.glacicle.org"
# {{{ Version/Help text
VERSIONTEXT = """%(name)s v%(version)s <%(uri)s>
See cortex(1) for information about using and configuring cortex.
Copyright (C) 2010 Lucas de Vries <lucas@glacicle.org>
License WTFPL: <http://sam.zoy.org/wtfpl>""" % dict(
name=APPLICATION_NAME,
version=APPLICATION_VERSION,
uri=APPLICATION_URI
)
HELPTEXT = """
Usage: {{CMD}} [-v|--version] [-h|--help] [SUBREDDIT]
-v, --version Show the version
-h, --help This message
See cortex(1) for information about using and configuring cortex.
""".replace("{{CMD}}", sys.argv[0], 1)
# }}}
CONF_DIR = os.getenv("HOME") + "/.cortex"
SEEN_LOCATION = CONF_DIR + "/seen.cache"
CONF_LOCATION = CONF_DIR + "/config"
GETCH_TIMEOUT = 400
BASE_URI = "http://reddit.com/"
INBOX_URI = BASE_URI + "message/inbox"
SUB_URI = BASE_URI + "r/%s/"
SEARCH_URI = BASE_URI + "search.json?q=%s&sort=new"
COLOR_NAME = {
"black": 0,
"red": 1,
"green": 2,
"yellow": 3,
"blue": 4,
"magenta": 5,
"cyan": 6,
"white": 7,
"default": -1,
}
# }}}
# {{{ Configuration
# Default config
Config = {
"browser-command": "",
"update-interval": 10,
"user-agent": "",
"seen-size": 500,
"sort": "-read",
"frontpage": BASE_URI+".json",
"inbox": "",
}
if hasattr(str, "format"):
Config.update({
"title-format": " Cortex -- Reddit/{title}: {total} articles, {new} new",
"entry-format-minimal": " {title} %> {subreddit:<13} | {num_comments:<4}",
"entry-format-full": " {title} %n [{score:4}] {read} {nsfw} %> {domain:30} {subreddit:<13} {num_comments:4} comments",
})
else:
Config.update({
"title-format": " Cortex -- Reddit/%(title)s: %(total)s articles, %(new)s new",
"entry-format-minimal": " %(title)s {>} %(subreddit)-13s | %(num_comments)-4s",
"entry-format-full": " %(title)s {n} [%(score)4s] %(read)s %(nsfw)s {>} %(domain)30s %(subreddit)-13s %(num_comments)4s comments",
})
Colors = {
"title": ("brightyellow", "blue"),
"normal": ("white", "black"),
"entry": ("white", "black"),
"entry-data": ("yellow", "black"),
"entry-selected": ("brightyellow", "magenta"),
"entry-data-selected": ("brightyellow", "magenta"),
"entry-bottom": ("green", "black"),
"entry-bottom-selected": ("brightyellow", "magenta"),
"entry-bottom-data": ("yellow", "black"),
"entry-bottom-data-selected": ("brightyellow", "magenta"),
"messages": ("brightgreen", "blue"),
}
Binds = {
ord("q"): "quit",
ord("r"): "update",
ord("m"): "toggle-minimal",
ord("x"): "toggle-hideold",
ord("k"): "prev-line",
ord("j"): "next-line",
ord("0"): "first-line",
ord("g"): "first-line",
ord("$"): "last-line",
ord("G"): "last-line",
ord("o"): "open",
10: "open",
ord("z"): "mark-read",
ord("t"): "open-both",
ord("c"): "open-comments",
ord("i"): "open-inbox",
ord("l"): "mark-all-read",
ord("h"): "frontpage",
ord("s"): "subreddit",
ord("/"): "search",
ord("f"): "subreddit-go",
ord(""): "prev-page",
ord(""): "next-page",
ord(""): "redraw",
curses.KEY_HOME: "first-line",
curses.KEY_END: "last-line",
curses.KEY_UP: "prev-line",
curses.KEY_DOWN: "next-line",
curses.KEY_PPAGE: "prev-page",
curses.KEY_NPAGE: "next-page",
}
Minimal = False
HideOld = False
BackgroundBrowser = True
UseHTTPS = False
# Load user config
LocalConfig = ConfigParser(interpolation=None)
LocalConfig.read([CONF_LOCATION,])
if LocalConfig.has_section("config"):
for opt in LocalConfig.options("config"):
Config[opt] = LocalConfig.get("config", opt)
if opt == "minimal":
Minimal = bool(int(Config[opt]))
elif opt == "hideold":
HideOld = bool(int(Config[opt]))
elif opt == "browser-background":
BackgroundBrowser = bool(int(Config[opt]))
elif opt == "https":
UseHTTPS = bool(int(Config[opt]))
if LocalConfig.has_section("color"):
for cname in LocalConfig.options("color"):
col = LocalConfig.get("color", cname).split(",")
if len(col) == 2:
Colors[cname] = col
else:
Colors[cname] = col[0], Colors["normal"][1]
if UseHTTPS:
BASE_URI = "https://pay.reddit.com/"
INBOX_URI = BASE_URI + "message/inbox"
SUB_URI = BASE_URI + "r/%s/"
SEARCH_URI = BASE_URI + "search.json?q=%s&sort=new"
# Load seen cache
Seen = []
if not os.path.exists(CONF_DIR):
os.mkdir(CONF_DIR)
if os.path.exists(SEEN_LOCATION):
try:
Seen = pickle.load(open(SEEN_LOCATION, 'rb'))
except:
pass
# }}}
# {{{ Utility functions
commands = {}
# {{{ command
def command(name=None, cmds=None):
"""Register a function as a command."""
def cmd(func):
# Get command dict
if cmds == None:
global commands
cm = commands
else:
cm = cmds
cm[(name or func.__name__)] = func
return func
return cmd
# }}}
# {{{ getcommand
def getcommand(name, cmds=None):
"""Get a command by name."""
# Get command dict
if cmds == None:
global commands
cmd = commands
else:
cmd = cmds
# Exact match
if name in cmd:
return cmd[name]
# Prefix match
candidate = filter(lambda x: x.startswith(name), cmd.keys())
if len(candidate) == 1:
return cmd[candidate[0]]
elif candidate:
return candidate
else:
return None
# }}}
# {{{ browseropen
def browseropen(uri):
"""Open a link in the browser."""
if not BackgroundBrowser:
curses.curs_set(1)
if type(uri) in (str, bytes):
if Config["browser-command"]:
if "{0}" in Config["browser-command"]:
cmd = Config["browser-command"].format(uri)
elif "%s" in Config["browser-command"]:
cmd = (Config["browser-command"] % uri )
else:
cmd = Config["browser-command"]+" "+uri
if BackgroundBrowser:
cmd += " &"
call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
else:
call(cmd, shell=True)
else:
# Taken from webbrowser source since stderr is displayed using regular browser
browser = webbrowser.get()
if hasattr(browser, 'name'):
cmdline = browser.name + " " + uri
call( cmdline, shell=True, stdout=DEVNULL, stderr=DEVNULL)
else:
webbrowser.open(uri)
else:
if Config["browser-command"]:
if "{0}" in Config["browser-command"]:
cmd = "("+" ; ".join([Config["browser-command"].format(u) for u in uri])+")"
elif "%s" in Config["browser-command"]:
cmd = "("+" ; ".join([Config["browser-command"]%u for u in uri])+")"
else:
cmd = "("+" ; ".join([Config["browser-command"]+" "+u for u in uri])+")"
if BackgroundBrowser:
cmd += " &"
call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
else:
call(cmd, shell=True)
else:
for u in uri:
# Taken from webbrowser source since stderr is displayed using regular browser
cmdline = webbrowser.get().name + " " + u
call( cmdline, shell=True, stdout=DEVNULL, stderr=DEVNULL)
if not BackgroundBrowser:
curses.curs_set(0)
# }}}
# {{{ seen_write
def seen_write():
"""Write to the seen file."""
global Seen
if len(Seen) > int(Config["seen-size"]):
Seen = Seen[-int(Config["seen-size"]):]
pickle.dump(Seen, open(SEEN_LOCATION, 'wb'), pickle.HIGHEST_PROTOCOL)
# }}}
# {{{ check_inbox
def check_inbox(interface):
try:
req = Request(Config["inbox"])
ua = Config.get("user-agent", "{} - {}".format(
APPLICATION_NAME, APPLICATION_VERSION))
req.add_header('User-agent', ua)
resp = urlopen(req).read().decode("utf-8")
data = json.loads(resp)["data"]["children"]
interface.messages = len(data)
except:
pass
# }}}
# }}}
# {{{ Data
class Data(object):
"""Manage communication with the external website."""
def __init__(self, uri):
self.uri = uri
self.loading = False
self.need_update = False
self.entries = []
self.total = 0
self.new = 0
self.error = ""
def pprint(self):
from pprint import pprint
pprint(self.data)
def _load(self, use_thread=True):
try:
req = Request(self.uri)
ua = Config.get("user-agent", "{} - {}".format(
APPLICATION_NAME, APPLICATION_VERSION))
req.add_header('User-agent', ua)
resp = urlopen(req).read().decode("utf-8")
data = json.loads(resp)["data"]["children"]
self.error = ""
except Exception as e:
self.loading = False
self.need_update = True
self.error = "Error opening reddit json file at %s. %s [Line %s]." % (self.uri, e, sys.exc_info()[-1].tb_lineno)
return
for datum in data:
datum = datum["data"]
if datum["hidden"]: continue
index = [entry for entry in self.entries if entry.data["id"] == datum["id"]]
if index:
entry = index[0]
entry.data["num_comments"] = datum["num_comments"]
entry.data["score"] = datum["score"]
entry.data["ups"] = datum["ups"]
entry.data["downs"] = datum["downs"]
else:
self.entries.append(Entry(datum))
if datum["id"] not in Seen:
Seen.append(datum["id"])
seen_write()
if Config["sort"]:
sort = Config["sort"]
if sort.startswith("-"):
sort = sort[1:]
self.entries.sort(key=(lambda entry: entry.data[sort] if sort in entry.data else 0))
if Config["sort"].startswith("-"):
self.entries.reverse()
self.loading = False
self.need_update = True
self.total = len(self.entries)
self.new = len([entry for entry in self.entries if not entry.read])
def load(self, use_thread=True):
if not self.loading:
self.loading = True
self.need_update = True
if use_thread:
t = threading.Thread(target=self._load, args=())
t.daemon = True
t.start()
else:
self._load(False)
# }}}
# {{{ Entry
class Entry(object):
def __init__(self, data):
self.data = data
self.read = "id" in self.data and self.data["id"] in Seen
self.reval_data()
def reval_data(self):
"""Revaluate data dictionary to set some convenience vars."""
if "title" not in self.data:
return
self.data["title"] = self.data["title"].replace("\r\n", "").replace("\n", "").replace("&","&")
self.data["read"] = " " if self.read else "[New]"
self.data["nsfw"] = "NSFW" if self.data["over_18"] else " "
def __repr__(self):
if "title" not in self.data:
return "<>"
return "<"+self.data["title"]+">"
def __eq__(self, other):
if "id" not in self.data or "id" not in other.data:
return False;
return other.data["id"] == self.data["id"]
def _line_format(self, line, width):
if hasattr(str, "format"):
line = line.split("%>")
else:
line = line.split("{>}")
left, right = (line[0], line[1]) if len(line) >= 2 else (line[0], "")
right_len = len(right)
left_target = width - right_len
if len(left) > left_target:
left = left[:left_target - 3] + "..."
else:
left = left + " " * (left_target - len(left))
return left, right
def format(self, display_type, width):
if not len(self.data):
return ""
if display_type == "minimal":
if hasattr(str, "format"):
return self._line_format(Config["entry-format-minimal"].format(**self.data), width)
else:
return self._line_format(Config["entry-format-minimal"] % self.data, width)
else:
if hasattr(str, "format"):
lines = Config["entry-format-full"].format(**self.data).split("%n")
else:
lines = (Config["entry-format-full"] % self.data).split("{n}")
if len(lines) >= 2:
top, bottom = lines[0], lines[1]
bottom = self._line_format(bottom, width)
else:
top, bottom = lines[0], ("", "")
top = self._line_format(top, width)
return top[0], top[1], bottom[0], bottom[1]
@property
def comment_uri(self):
"""Get the comment uri associated with this entry."""
if "id" not in self.data:
return ""
uri = (SUB_URI % self.data["subreddit"])
if uri[-1] != "/":
uri += "/"
return uri+"comments/"+self.data["id"]
def open(self):
"""Open entry link in browser."""
if "url" in self.data:
if UseHTTPS:
self.data["url"] = self.data["url"].replace("http:", "https:")
browseropen(self.data["url"])
self.read = True
self.data["read"] = " "
def opencomments(self):
"""Open comments link in browser."""
if self.comment_uri:
browseropen(self.comment_uri)
def openboth(self):
"""Open entry link in browser."""
if self.comment_uri and "url" in self.data:
browseropen([self.data["url"], self.comment_uri])
self.read = True
self.data["read"] = " "
# }}}
# {{{ Interface
class Interface(object):
"""Manage on-screen display through curses."""
def __init__(self, data):
self._quit = False
self.scr = None
self.data = data
self.scroll = 0
self.selected = 0
self.need_update = False
self.need_redraw = False
self.messages = 0
self.displayed_entries = 0
self.timer = Timer(int(Config["update-interval"])*60, self._timeout)
self.timer.start()
def _timeout(self):
# Load data
self.data.load(False)
# Check inbox if applicable
if Config["inbox"]:
threading.Thread(target=check_inbox, daemon=True, args=(self,)).start()
# Restart timer
del self.timer
self.timer = Timer(int(Config["update-interval"])*60, self._timeout)
self.timer.start()
def init(self):
"""Initialise curses screen"""
# Initialise curses
self.scr = curses.initscr()
self.scr.keypad(1)
self.scr.timeout(GETCH_TIMEOUT)
curses.curs_set(0)
curses.noecho()
curses.start_color()
curses.use_default_colors()
# Initialise colors
self.colors = {}
pairnum = 1
for cname in Colors:
fg, bg = Colors[cname]
if fg.startswith("bright"):
fbright = True
fg = fg[6:]
else:
fbright = False
try:
curses.init_pair(pairnum, COLOR_NAME[fg], COLOR_NAME[bg])
except IndexError:
curses.init_pair(pairnum, 0, 0)
self.colors[cname] = curses.color_pair(pairnum)
if fbright:
self.colors[cname] |= curses.A_BOLD
pairnum += 1
@property
def entry(self):
"""Get the selected entry."""
if HideOld:
num_entries = self.data.total
i = 0
r = 0
while i < num_entries:
entry = self.data.entries[i]
i += 1
if HideOld and entry.read:
self.displayed_entries -= 1
continue
if r == self.selected:
return entry
r += 1
return Entry({})
elif self.selected < self.data.total:
return self.data.entries[self.selected]
else:
return Entry({})
def cleanup(self):
# Cancel timer
self.timer.cancel()
# Cleanup curses
curses.endwin()
def main(self):
"""Run a main loop and handle key events."""
self.refresh()
while not self._quit:
key = self.scr.getch()
if key in Binds:
cmd = getcommand(Binds[key])
if callable(cmd):
cmd(self.data, self)
if self.need_update or self.data.need_update:
self.refresh()
self.need_update = self.data.need_update = False
elif curses.is_term_resized(y,x):
self.refresh()
y,x = self.scr.getmaxyx()
def quit(self):
"""Quit the interface and application."""
self._quit = True
def setline(self, y, x, line, color=None, offset=0, char=' '):
"""Fill a line completely with text."""
height, width = self.scr.getmaxyx()
line = str(line).ljust(width-offset, char)
try:
if color and color in self.colors:
self.scr.addstr(y, x, line, self.colors[color])
else:
self.scr.addstr(y, x, line)
except:
pass
def refresh(self):
"""Refresh the curses interface."""
# Get dimensions
height, width = self.scr.getmaxyx()
if self.need_redraw:
self.need_redraw = False
self.scr.clear()
# Title line
if hasattr(str, "format"):
self.setline(0, 0, Config["title-format"].format(total=self.data.total,
new=self.data.new, title=self.data.title), color="title")
else:
self.setline(0, 0, Config["title-format"] % dict(total=self.data.total,
new=self.data.new, title=self.data.title), color="title")
# Unread messages count
if Config["inbox"]:
text = str(self.messages)+" unread messages"
self.setline(0, width-len(text)-1, text, color = "messages")
# Display entries
y = 1
i = 0
r = 0
num_entries = self.data.total
self.displayed_entries = num_entries
if num_entries:
displayed = int((height-1-y)/(1 if Minimal else 2))
if self.selected <= self.scroll:
self.scroll = self.selected
elif self.selected >= self.scroll+displayed:
self.scroll = self.selected-displayed+1
while y < height-1:
if num_entries and i < num_entries:
entry = self.data.entries[i]
i += 1
if HideOld and entry.read:
self.displayed_entries -= 1
continue
if r < self.scroll:
r += 1
continue
sel = "-selected" if self.selected == r else ""
r += 1
if Minimal:
left, right = entry.format("minimal", width-1)
self.setline(y, 0, left, "entry"+sel)
self.setline(y, len(left), right, "entry-data"+sel)
else:
topleft, topright, bottomleft, bottomright = entry.format("full", width-1)
self.setline(y, 0, topleft, "entry"+sel)
self.setline(y, len(topleft), topright, "entry-data"+sel)
if y+1 < height-1:
y += 1
self.setline(y, 0, bottomleft, "entry-bottom"+sel)
self.setline(y, len(bottomleft), bottomright, "entry-bottom-data"+sel)
else:
self.setline(y, 0, "", "normal")
y += 1
if self.data.error:
self.setline(y, 0, self.data.error.center(width-1))
else:
self.setline(y, 0, "Loading...".center(width-1) if self.data.loading else " "*(width-1))
# Refresh the screen
self.scr.refresh()
def prev_line(self):
total = self.displayed_entries
self.selected = total-1 if self.selected <= 0 else self.selected-1
self.need_update = True
def next_line(self):
total = self.displayed_entries
self.selected = 0 if self.selected >= total-1 else self.selected+1
self.need_update = True
def prev_page(self):
height, width = self.scr.getmaxyx()
total = self.displayed_entries
displayed = int((height-2)/(1 if Minimal else 2))
self.selected -= displayed
self.need_update = True
if self.selected < 0:
self.selected = total-1
def next_page(self):
height, width = self.scr.getmaxyx()
total = self.displayed_entries
displayed = int((height-2)/(1 if Minimal else 2))
self.selected += displayed
self.need_update = True
if self.selected >= total-1:
self.selected = 0
def first_line(self):
self.selected = 0
self.need_update = True
def last_line(self):
self.selected = self.data.total-1
self.need_update = True
# }}}
# {{{ Commands
@command(name="quit")
def cm_quit(data, interface):
interface.quit()
@command(name="first-line")
def cm_first_line(data, interface):
interface.first_line()
@command(name="last-line")
def cm_last_line(data, interface):
interface.last_line()
@command(name="prev-line")
def cm_prev_line(data, interface):
interface.prev_line()
@command(name="next-line")
def cm_next_line(data, interface):
interface.next_line()
@command(name="prev-page")
def cm_prev_page(data, interface):
interface.prev_page()
@command(name="next-page")
def cm_next_page(data, interface):
interface.next_page()
@command(name="update")
def cm_update(data, interface):
# Update articles
data.load()
# Update inbox
if Config["inbox"]:
threading.Thread(target=check_inbox, daemon=True, args=(interface, )).start()
@command(name="open")
def cm_open(data, interface):
interface.entry.open()
interface.need_update = True
if not BackgroundBrowser:
interface.need_redraw = True
@command(name="mark-read")
def cm_mark_read(data, interface):
if not interface.entry.read:
interface.entry.read = True
interface.entry.reval_data()
data.new -= 1
data.need_update = True
@command(name="open-comments")
def cm_opencomments(data, interface):
interface.entry.opencomments()
interface.need_update = True
if not BackgroundBrowser:
interface.need_redraw = True
@command(name="open-both")
def cm_openboth(data, interface):
interface.entry.openboth()
interface.need_update = True
if not BackgroundBrowser:
interface.need_redraw = True
@command(name="open-inbox")
def cm_openinbox(data, interface):
browseropen(INBOX_URI)
if not BackgroundBrowser:
interface.need_redraw = True
interface.need_update = True
interface.messages = 0
@command(name="mark-all-read")
def cm_mark_all_read(data, interface):
for entry in data.entries:
entry.read = True
entry.reval_data()
data.new = 0
data.need_update = True
@command(name="toggle-minimal")
def cm_toggle_minimal(data, interface):
global Minimal
Minimal = not Minimal
interface.need_update = True
@command(name="toggle-hideold")
def cm_toggle_hideold(data, interface):
global HideOld
HideOld = not HideOld
interface.need_update = True
@command(name="frontpage")
def cm_frontpage(data, interface):
if data.uri != Config["frontpage"]:
del interface.data
data = Data(Config["frontpage"])
data.title = "Frontpage"
data.load()
interface.data = data
@command(name="subreddit")
def cm_subreddit(data, interface):
curses.curs_set(1)
curses.echo()
interface.scr.timeout(-1)
height, width = interface.scr.getmaxyx()
interface.setline(height-1, 0, " "*(width-1))
interface.setline(height-1, 1, "Display Subreddit: ")
sub = interface.scr.getstr(height-1, 20).decode("utf-8")
interface.scr.timeout(GETCH_TIMEOUT)
curses.noecho()
curses.curs_set(0)
if not sub:
return
else:
newloc = (SUB_URI % sub)+".json"
if data.uri != newloc:
del interface.data
data = Data(newloc)
data.title = sub
data.load()
interface.data = data
@command(name="search")
def cm_search(data, interface):
curses.curs_set(1)
curses.echo()
interface.scr.timeout(-1)
height, width = interface.scr.getmaxyx()
interface.setline(height-1, 0, " "*(width-1))
interface.setline(height-1, 1, "Search: ")
sub = interface.scr.getstr(height-1, 9).decode("utf-8")
interface.scr.timeout(GETCH_TIMEOUT)
curses.noecho()
curses.curs_set(0)
if not sub:
return
else:
newloc = SEARCH_URI % quote(sub)
if data.uri != newloc:
del interface.data
data = Data(newloc)
data.title = "Search -- " + sub
data.load()
interface.data = data
@command(name="subreddit-go")
def cm_subreddit_go(data, interface):
sub = interface.entry.data["subreddit"]
newloc = (SUB_URI % sub)+".json"
if data.uri == newloc:
newloc = Config["frontpage"]
del interface.data
data = Data(newloc)
data.title = sub
data.load()
interface.data = data
@command(name="redraw")
def cm_redraw(data, interface):
interface.need_update = True
# }}}
# Set LC_ALL locale to force utf-8 to work in curses
import locale
locale.setlocale(locale.LC_ALL,"")
# {{{ Main function
def main():
if len(sys.argv) > 1 and sys.argv[1] in ("-v", "--version"):
print(VERSIONTEXT)
elif len(sys.argv) > 1 and sys.argv[1] in ("--help", "-h"):
print(HELPTEXT)
else:
if len(sys.argv) > 1:
data = Data((SUB_URI % sys.argv[1])+".json")
data.title = sys.argv[1]
else:
data = Data(Config["frontpage"])
data.title = "Frontpage"
data.load()
interface = Interface(data)
try:
interface.init()
interface.main()
interface.cleanup()
except:
interface.cleanup()
import traceback
traceback.print_exc()
# }}}
if __name__ == '__main__':
main()
# vim: fdm=marker:
| [
"miguelmota2@gmail.com"
] | miguelmota2@gmail.com | |
37743ac45717ca0451bcb513c7fe7005ee067bb0 | dd19ab77e237a80e32b25247b514b34bd99e22be | /scripts/export_collection.py | e787c2d4347fb40871293636b2b5fa6bd65eb39d | [] | no_license | aekazakov/seed2kegg | 54d65ab13c660a71c21dfd4dc1426fad9ecbb76c | b41aeda511d017eefe75231121c86d77fa796c55 | refs/heads/master | 2021-09-11T00:03:37.866608 | 2018-04-04T16:21:11 | 2018-04-04T16:21:11 | 125,553,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | #!/usr/bin/python
import sys
import argparse
from context import seed2kegg
from seed2kegg import db_utils
from seed2kegg import data_analysis
def get_args():
desc = 'This script creates FASTA file with proteins from functional collection.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--seed_db', help='SEED sqlite DB path')
parser.add_argument('--kegg_db', help='SEED sqlite DB path')
parser.add_argument('--kegg_prots', help='KEGG proteins in FASTA format')
parser.add_argument('--seed_prots', help='SEED proteins in FASTA format')
parser.add_argument('--name', help='Collection name')
parser.add_argument('--ver', help='Collection version')
parser.add_argument('--outfile', help='Output file name')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return args
def main():
args = get_args()
# Open database
conn = db_utils.connect_local_database(args.seed_db)
c = conn.cursor()
db_utils.attach_local_database(c, args.kegg_db, 'kegg_data')
print ('Finding genes...')
gene_collection = data_analysis.make_collection_gene_list(c, args.name, args.ver)
print(len(gene_collection), 'genes found. Writing output...')
data_analysis.export_collection_proteins(gene_collection,args.seed_prots,args.outfile)
data_analysis.export_collection_proteins(gene_collection,args.kegg_prots,args.outfile)
conn.close()
print ('done.')
if __name__=='__main__':
main()
| [
"aekazakov@iseq.lbl.gov"
] | aekazakov@iseq.lbl.gov |
0700257c7977571409c3931f54630b1087bc2915 | ac4bbbe815e171029181d1ecf2ab37a57c43f9f5 | /base_api/edit_mobile_api.py | 5b4af84f57f751a496b9f7d4fcf06b49c97ae132 | [] | no_license | Xishanqingxue/sanyecai-video-api-test | cb39f5c7975732627ad0de671a67225acf456b4f | 960598ed52c9f000b45b688a4093e3f76b3ba242 | refs/heads/master | 2020-03-23T04:19:20.772741 | 2018-07-19T10:20:59 | 2018-07-19T10:20:59 | 141,076,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # -*- coding:utf-8 -*-
from base.login_base import LoginBaseApi
class EditMobileApi(LoginBaseApi):
"""
修改用户手机号
"""
url = "/finan/editModel"
def build_custom_param(self, data):
return {'mobile':data['mobile'],'verCode':data['verCode'],'type':data['type']}
| [
"gaoyinglong@kong.net"
] | gaoyinglong@kong.net |
8aa017b49485a93529f5842ebd6c1605b6019aba | e63c45db069ea20b41fb850c5940e6f99db94914 | /TranskribusDU/tasks/TablePrototypes/DU_Table_Row.py | c69734cdcc09f2b14bb86df4a56c86e3b895773d | [
"BSD-3-Clause"
] | permissive | Transkribus/TranskribusDU | 669607cc32af98efe7380831d15b087b3fc326c9 | 9f2fed81672dc222ca52ee4329eac3126b500d21 | refs/heads/master | 2021-12-29T10:14:49.153914 | 2021-12-22T10:53:10 | 2021-12-22T10:53:10 | 72,862,342 | 24 | 6 | BSD-3-Clause | 2019-07-22T08:49:02 | 2016-11-04T15:52:04 | Python | UTF-8 | Python | false | false | 5,449 | py | # -*- coding: utf-8 -*-
"""
*** Same as its parent apart that text baselines are reflected as a LineString (instead of its centroid)
DU task for ABP Table:
doing jointly row BIO and near horizontal cuts SIO
block2line edges do not cross another block.
The cut are based on baselines of text blocks, with some positive or negative inclination.
- the labels of cuts are SIO
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import math
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
TranskribusDU_version
from common.trace import traceln
from tasks import _exit
from tasks.DU_CRF_Task import DU_CRF_Task
from tasks.DU_Table.DU_ABPTableSkewed import GraphSkewedCut, main
from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import SkewedCutAnnotator
from tasks.DU_Table.DU_ABPTableSkewed_txtBIO_sepSIO_line import DU_ABPTableSkewedRowCutLine
from tasks.DU_Table.DU_ABPTableSkewed_txtBIOH_sepSIO_line import DU_ABPTableSkewedRowCutLine_BIOH
# ----------------------------------------------------------------------------
if __name__ == "__main__":
version = "v.01"
usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version)
# parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels")
#FOR GCN
# parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges")
parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailed reporting (score per document)")
parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method")
parser.add_option("--line_see_line", dest='iLineVisibility', action="store",
type=int, default=GraphSkewedCut.iLineVisibility,
help="seeline2line: how far in pixel can a line see another cut line?")
parser.add_option("--block_see_line", dest='iBlockVisibility', action="store",
type=int, default=GraphSkewedCut.iBlockVisibility,
help="seeblock2line: how far in pixel can a block see a cut line?")
parser.add_option("--height", dest="fCutHeight", default=GraphSkewedCut.fCutHeight
, action="store", type=float, help="Minimal height of a cut")
# parser.add_option("--cut-above", dest='bCutAbove', action="store_true", default=False
# ,help="Each object defines one or several cuts above it (instead of below as by default)")
parser.add_option("--angle", dest='lsAngle'
, action="store", type="string", default="-1,0,+1"
,help="Allowed cutting angles, in degree, comma-separated")
parser.add_option("--graph", dest='bGraph', action="store_true", help="Store the graph in the XML for displaying it")
parser.add_option("--bioh", "--BIOH", dest='bBIOH', action="store_true", help="Text are categorised along BIOH instead of BIO")
parser.add_option("--text", "--txt", dest='bTxt', action="store_true", help="Use textual features.")
# ---
#parse the command line
(options, args) = parser.parse_args()
options.bCutAbove = True # Forcing this!
if options.bBIOH:
DU_CLASS = DU_ABPTableSkewedRowCutLine_BIOH
else:
DU_CLASS = DU_ABPTableSkewedRowCutLine
if options.bGraph:
import os.path
# hack
DU_CLASS.bCutAbove = options.bCutAbove
traceln("\t%s.bCutAbove=" % DU_CLASS.__name__, DU_CLASS.bCutAbove)
DU_CLASS.lRadAngle = [math.radians(v) for v in [float(s) for s in options.lsAngle.split(",")]]
traceln("\t%s.lRadAngle=" % DU_CLASS.__name__, DU_CLASS.lRadAngle)
for sInputFilename in args:
sp, sf = os.path.split(sInputFilename)
sOutFilename = os.path.join(sp, "graph-" + sf)
doer = DU_CLASS("debug", "."
, iBlockVisibility=options.iBlockVisibility
, iLineVisibility=options.iLineVisibility
, fCutHeight=options.fCutHeight
, bCutAbove=options.bCutAbove
, lRadAngle=[math.radians(float(s)) for s in options.lsAngle.split(",")]
, bTxt=options.bTxt)
o = doer.cGraphClass()
o.parseDocFile(sInputFilename, 9)
o.addEdgeToDoc()
print('Graph edges added to %s'%sOutFilename)
o.doc.write(sOutFilename, encoding='utf-8',pretty_print=True,xml_declaration=True)
SkewedCutAnnotator.gtStatReport()
exit(0)
# ---
try:
sModelDir, sModelName = args
except Exception as e:
traceln("Specify a model folder and a model name!")
_exit(usage, 1, e)
main(DU_CLASS, sModelDir, sModelName, options)
| [
"jean-luc.meunier@naverlabs.com"
] | jean-luc.meunier@naverlabs.com |
94381092a516f8a08784fe6c13b24cdface82703 | 4fb6ff52a99bf2b784fd5458bd9b8c8eda03cd79 | /main.py | c4229dc8aad8c427c27388bd793bb9a90021ca7b | [] | no_license | nahum-smith/Text2Learn-CLI | b34aa0166093081f0ec86911eb8b3f5badd9c2d7 | 94b8a5af7cc86dc741d4c537fe74836b522d359b | refs/heads/master | 2021-05-15T21:47:27.333389 | 2017-10-11T17:13:15 | 2017-10-11T17:13:15 | 106,585,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from question_entry import run_program # import the app
if __name__ == '__main__':
run_program()
| [
"nahumsmith@gmail.com"
] | nahumsmith@gmail.com |
d05741d3e944002c02f7a9053ec5c74173e1d430 | b99f403ef2336491946770b482daf072f46a70f0 | /bin/generate-readme.py | c187aee46f9987be589587d7c4ec72ca2f47985a | [
"CC0-1.0"
] | permissive | Ambrosiani/awesome-openaccess | b1727778c70eb2fe4b3beb951c2fe7d827541ba7 | 84d191721e03d5f104e18c6d33a581bdd66e0a3f | refs/heads/master | 2020-04-17T23:49:16.125271 | 2018-07-05T19:48:46 | 2018-07-05T19:48:46 | 167,052,172 | 0 | 0 | CC0-1.0 | 2019-01-22T19:15:12 | 2019-01-22T19:15:12 | null | UTF-8 | Python | false | false | 3,216 | py | #!/usr/bin/env python
import sys
import os
import os.path
import yaml
import json
import glob
import pprint
def text2emoji(text):
emoji = ''
for thing in sorted(text):
if thing == 'images':
emoji = emoji + ':camera: '
if thing == 'iiif':
emoji = emoji + ':minidisc: '
if thing == 'csv':
emoji = emoji + ':blue_book: '
if thing == 'tsv':
emoji = emoji + ':green_book: '
if thing == 'xml':
emoji = emoji + ':closed_book: '
if thing == 'xls':
emoji = emoji + ':notebook: '
if thing == 'json':
emoji = emoji + ':orange_book: '
if thing == 'api':
emoji = emoji + ':computer: '
print emoji
return emoji
if __name__ == '__main__':
whoami = os.path.abspath(sys.argv[0])
bindir = os.path.dirname(whoami)
datadir = os.path.dirname(bindir)
datadir = os.path.join(datadir, 'data')
readme = open('README.md', 'w')
readme.write('# Awesome OpenAccess\n')
readme.write('[](https://awesome.re)\n')
readme.write('\n')
readme.write('*An awesome list of awesome OpenAccess projects* \n')
readme.write('\n')
readme.write('This is an awesome list of awesome OpenAccess projects. If you\'d like to contribute, please read the guidlines in [CONTRIBUTING.md](https://github.com/micahwalterstudio/awesome-openaccess/blob/master/CONTRIBUTING.md)\n')
## legend
readme.write('### Key\n')
readme.write('| Emoji | Meaning |\n')
readme.write('| --- | --- |\n')
readme.write('| :octocat: | GitHub Repo |\n')
readme.write('| :globe_with_meridians: | Documentation Website |\n')
readme.write('| :camera: | Images |\n')
readme.write('| :minidisc: | IIIF Images |\n')
readme.write('| :blue_book: | CSV Dataset |\n')
readme.write('| :green_book: | TSV Dataset |\n')
readme.write('| :closed_book: | XML Dataset |\n')
readme.write('| :notebook: | XLS Dataset |\n')
readme.write('| :orange_book: | JSON Dataset |\n')
readme.write('| :computer: | API |\n')
readme.write('### Datasets\n')
## read in all the yml files
datafiles = {}
for filename in glob.glob(datadir + '/*.yml'):
stream = file(filename, 'r')
yml = yaml.load(stream)
head, tail = os.path.split(filename)
tail = os.path.splitext(tail)[0]
datafiles[tail] = yml
readme.write('| Organization | Location | Data |\n')
readme.write('| --- | --- | --- |\n')
for org in sorted(datafiles.iterkeys()):
pprint.pprint(datafiles[org]['name'])
links = ''
if 'url' in datafiles[org]:
links = links + '[:globe_with_meridians:](' + datafiles[org]['url'] +') '
if 'github' in datafiles[org]:
links = links + '[:octocat:](' + datafiles[org]['github'] +') '
emoji = text2emoji(datafiles[org]['data'])
readme.write('| [' + datafiles[org]['name'] + '](' + datafiles[org]['url'] + ') | ' + datafiles[org]['location'] + ' | ' + links + emoji +' | \n' )
readme.close()
| [
"micah@micahwalter.com"
] | micah@micahwalter.com |
200e9917ea1a71489173315c12ac6c736aac3a7c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/PyBox/pybox2d/library/Box2D/examples/chain.py | c1f19e55dbac3e2fa63532f8b24c48d5d1e22b19 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e79af0d06dbe1710b8ba767355096adc26f63f6435e754284e2a3caa01b35291
size 2366
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
09ea0a5ceb9d26ce0cf39216249a5e7adcb992b0 | ca5e4643520750ac718dc5bfd89fe59109c02ed8 | /src/app/oanda.py | e70889a490a352ac360bb66c5ee8962c869be893 | [] | no_license | albertomassa/oanda-broker-python | 3d1da569ccdfa9204ecd5ff8de02b03a75496add | a20837db985964505f2c6806616bda107679982e | refs/heads/master | 2023-07-27T15:13:55.451626 | 2021-09-13T09:26:08 | 2021-09-13T09:26:08 | 383,616,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py |
import requests
from requests.structures import CaseInsensitiveDict
import configparser
import json
from model import Instrument, CandleStick
config = configparser.ConfigParser()
config.read('src/ConfigFile.properties')
broker_url = config['OANDABroker']['broker.url']
api_token = config['OANDABroker']['broker.api-token']
headers = CaseInsensitiveDict()
headers['Accept'] = 'application/json'
headers['Authorization'] = 'Bearer ' + api_token
def get(resource_url):
return requests.get(broker_url + resource_url, headers=headers).json()
def post(resource_url, body=None):
# todo
return
def put(resource_url, body=None):
return requests.put(broker_url + resource_url, headers=headers).json()
def delete(resource_url, body=None):
#todo
return
def get_candles(instrument, count, granularity):
instrument = instrument.strip()
url = config['OANDAResources']['broker.resources.candles'].replace('$INSTRUMENT_ID$', instrument)
count = count.strip()
url += '?count=' + count
granularity = granularity.strip()
url += '&granularity=' + granularity
response = get(url)
candles = response.get('candles')
if(candles == None):
print('no candles for query')
return None
list = []
for candle in candles:
c = CandleStick(json.dumps(candle))
list.append(c)
return list
def get_instruments(account):
url = config['OANDAResources']['broker.resources.instruments'].replace('$ACCOUNT_ID$', account)
response = get(url)
instruments = response.get('instruments')
if(instruments == None):
print('no instruments with account: ' + account)
return None
list = []
for instrument in instruments:
i = Instrument(json.dumps(instrument))
list.append(i)
return list
def get_name_instruments(account):
instruments = get_instruments(account)
list = []
for instrument in instruments:
list.append(instrument.name)
return list | [
"albertomassa.info@gmail.com"
] | albertomassa.info@gmail.com |
d3d7b7867e5897701e57c5e1df34476ee66921cd | 02e45339965b0b5e47744c37f85edc1f337ac615 | /aoc/day_16/scanner.py | 9cbf6a7aa31e0cc285a42170b44ee507497dfa5c | [] | no_license | kwinso/thm-writeups | 4ef5a7f0dea91d85067c6ac09a1cae993d49b91d | b6a0c290d3dfa6b08efe09b5ca2f12c03e7e4848 | refs/heads/main | 2023-04-06T17:20:26.011170 | 2021-04-13T19:02:36 | 2021-04-13T19:02:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | import os
import exiftool
import zipfile
files = os.listdir('.') # listing this dir
def scan(filesList):
for file in files:
if file.endswith(".zip");
with zipfilep.ZipFile(file) as zip_file:
zip_file.extract(file)
| [
"pythonisajoke@gmail.com"
] | pythonisajoke@gmail.com |
dbf3663efd36d123f447869cc49f5d273ae52d95 | 2fe9ae409767cf5b10e32ae8243679be86e3cf93 | /test2.py | 2402089344d4af2d20f543586e2b39b3f802ae6b | [] | no_license | Collapseyu/hkStock_monthDataAndReport | 5c26d2e1114eb0d3c502e62f391d75b5726ea096 | 106ffe82ca5870d6e583cba33ba3c75547669256 | refs/heads/master | 2020-05-07T17:01:13.524339 | 2019-04-15T03:38:55 | 2019-04-15T03:38:55 | 180,710,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,977 | py | from socket import *
import sys
from lxml import etree
import datetime,time
import csv
import re
import requests
from bs4 import BeautifulSoup
csv_file=csv.reader(open('goodOrder.csv','r'))
a=[]
for i in csv_file:
tmp=''
for j in i:
tmp=tmp+j
if(tmp>'01100' and tmp<'01201'):
a.append(tmp)
print(a)
totalData=[]
betterOrder=[]
count_t=0
headers= {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'}
for count in a:
url='http://services1.aastocks.com/web/bjty/CompanyFundamental.aspx?Language=Chn&category=FinRatio&symbol='+a[count_t]+'&yearType=Interim'
html=requests.get(url)
#htm=etree.HTML(message)
soup = BeautifulSoup(html.content,'lxml')
#comfund > table > tbody > tr.R0 > td > table > tbody > tr.R2 > td > table > tbody > tr.R2 > td > table > tbody > tr:nth-child(3) > td.IC1
tmp1=soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(3) > td.IC0')
if(tmp1==[] or tmp1[0].get_text()!='流动比率(倍)'):
count_t+=1
continue
tmp2 = soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(4) > td.IC0')
if (tmp2==[] or tmp2[0].get_text() != '速动比率(倍)'):
count_t+=1
continue
dat=soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr.IRTitle >td.IC1')
n=0
for i in dat:
tmp=[a[count_t]]
tmp.append(i.get_text())
datas=soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(3) > td.IC'+str(n+1))
for data in datas:
tmp.append(float(data.get_text()))
datas2=soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(4) > td.IC'+str(n+1))
for j in datas2:
tmp.append(float(j.get_text()))
totalData.append(tmp)
n+=1
time.sleep(0.5)
url='http://services1.aastocks.com/web/bjty/CompanyFundamental.aspx?Language=Chn&category=FinRatio&symbol='+a[count_t]+'&yearType=Annual'
html=requests.get(url)
#htm=etree.HTML(message)
soup = BeautifulSoup(html.content,'lxml')
tmp1 = soup.select(
'#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(3) > td.IC0')
if (tmp1==[] or tmp1[0].get_text() != '流动比率(倍)'):
count_t+=1
continue
tmp2 = soup.select(
'#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(4) > td.IC0')
if (tmp2==[] or tmp2[0].get_text() != '速动比率(倍)'):
count_t+=1
continue
#comfund > table > tbody > tr.R0 > td > table > tbody > tr.R2 > td > table > tbody > tr.R2 > td > table > tbody > tr:nth-child(3) > td.IC1
dat=soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr.IRTitle >td.IC1')
n=0
for i in dat:
tmp=[a[count_t]]
tmp.append(i.get_text())
datas=soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(3) > td.IC'+str(n+1))
for data in datas:
tmp.append(float(data.get_text()))
datas2=soup.select('#comfund > table > tr.R0 > td > table > tr.R2 > td > table > tr.R2 > td > table > tr:nth-child(4) > td.IC'+str(n+1))
for j in datas2:
tmp.append(float(j.get_text()))
totalData.append(tmp)
n+=1
betterOrder.append(a[count_t])
print(a[count_t])
count_t+=1
time.sleep(0.5)
with open('financialReport.csv', 'a+', newline='') as f:
writer = csv.writer(f)
for row in totalData:
writer.writerow(row)
f.close()
with open('betterOrder.csv', 'a+', newline='') as f:
writer = csv.writer(f)
for row in betterOrder:
writer.writerow(row)
f.close()
| [
"yuyicong@yuyicongdeMacBook-Pro.local"
] | yuyicong@yuyicongdeMacBook-Pro.local |
a26ec63f56bad3f7991ace4eb345ea52f222d5e9 | 44032f82bcb767175cf86aeccee623eb6cfbd40e | /deploy/compose/gpu/__init__.py | 2303c0b0cf1621e03ddbbda08853f070befb4247 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | veyselkoparal/DeepVideoAnalytics | 3628d41f8e06547e177a7badd20b399bd7f9028a | 013f7e1efcc11f9ed5762192a91589aa6b4df359 | refs/heads/master | 2020-03-16T04:22:46.603989 | 2018-05-07T06:55:47 | 2018-05-07T06:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,462 | py | """
Code in this file assumes that it is being run via dvactl and git repo root as current directory
"""
CONFIG = {
"deploy/gpu/docker-compose-2-gpus.yml": {"global_model_gpu_id": 0,
"global_model_memory_fraction": 0.1,
"workers":
[(0, 0.25, "LAUNCH_BY_NAME_indexer_inception", "inception"),
(0, 0.2, "LAUNCH_BY_NAME_analyzer_crnn", "crnn"),
(0, 0.5, "LAUNCH_BY_NAME_detector_coco", "coco"),
(1, 0.5, "LAUNCH_BY_NAME_detector_textbox", "textbox"),
(1, 0.19, "LAUNCH_BY_NAME_detector_face", "face"),
(1, 0.15, "LAUNCH_BY_NAME_indexer_facenet", "facenet"),
(1, 0.15, "LAUNCH_BY_NAME_analyzer_tagger", "tagger")]
},
"deploy/gpu/docker-compose-4-gpus.yml": {"global_model_gpu_id": 2,
"global_model_memory_fraction": 0.29,
"workers":
[(0, 0.3, "LAUNCH_BY_NAME_indexer_inception", "inception"),
(0, 0.4, "LAUNCH_BY_NAME_analyzer_tagger", "tagger"),
(0, 0.2, "LAUNCH_BY_NAME_analyzer_crnn", "crnn"),
(1, 1.0, "LAUNCH_BY_NAME_detector_coco", "coco"),
(2, 0.7, "LAUNCH_BY_NAME_detector_face", "face"),
(3, 0.5, "LAUNCH_BY_NAME_detector_textbox", "textbox"),
(3, 0.45, "LAUNCH_BY_NAME_indexer_facenet", "facenet")
]
},
}
SKELETON = """ version: '3'
services:
db:
image: postgres:9.6.6
container_name: dva-pg
volumes:
- dvapgdata:/var/lib/postgresql/data
env_file:
- ../../../custom.env
rabbit:
image: rabbitmq
container_name: dva-rmq
env_file:
- ../../../custom.env
volumes:
- dvarabbit:/var/lib/rabbitmq
redis:
image: bitnami/redis:latest
container_name: dva-redis
env_file:
- ../../../custom.env
volumes:
- dvaredis:/bitnami
webserver:
image: akshayubhat/dva-auto:gpu
container_name: webserver
env_file:
- ../../../custom.env
environment:
- LAUNCH_SERVER_NGINX=1
- LAUNCH_NOTEBOOK=1
command: bash -c "git reset --hard && git pull && sleep 10 && ./start_container.py"
ports:
- "127.0.0.1:8000:80"
- "127.0.0.1:8888:8888"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
non-gpu-workers:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- LAUNCH_BY_NAME_retriever_inception=1
- LAUNCH_BY_NAME_retriever_facenet=1
- LAUNCH_Q_qextract=1
- LAUNCH_Q_qstreamer=1
- LAUNCH_SCHEDULER=1
- LAUNCH_Q_GLOBAL_RETRIEVER=1
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
{gpu_workers}
global-model:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- GPU_AVAILABLE=1
- NVIDIA_VISIBLE_DEVICES={global_model_gpu_id}
- GPU_MEMORY={global_model_memory_fraction}
- LAUNCH_Q_GLOBAL_MODEL=1
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
volumes:
dvapgdata:
dvadata:
dvarabbit:
dvaredis:
"""
BLOCK = """ {worker_name}:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- GPU_AVAILABLE=1
- NVIDIA_VISIBLE_DEVICES={gpu_id}
- GPU_MEMORY={memory_fraction}
- {env_key}={env_value}
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media"""
def generate_multi_gpu_compose():
for fname in CONFIG:
blocks = []
worker_specs = CONFIG[fname]['workers']
for gpu_id, fraction, env_key, worker_name, in worker_specs:
blocks.append(
BLOCK.format(worker_name=worker_name, gpu_id=gpu_id, memory_fraction=fraction, env_key=env_key,
env_value=1))
with open(fname, 'w') as out:
out.write(SKELETON.format(gpu_workers="\n".join(blocks),
global_model_gpu_id=CONFIG[fname]['global_model_gpu_id'],
global_model_memory_fraction=CONFIG[fname]['global_model_memory_fraction']))
| [
"akshayubhat@gmail.com"
] | akshayubhat@gmail.com |
5fb152a03b97239720932a800dcb93ed2841278e | fd6fab64e64031b319b7dc88b66ad960d30fdfc7 | /assignment02_ModelQueryProcess/run_assignment.py | 12b99e32a4e8faed2c013945d46efacf258c313c | [] | no_license | mkadhirvel/DSC650 | 297fa63da3668f91d9ce17c6195522dc21d8b5f2 | 75556e3a11a3b5801cad7df124dcc19df219934d | refs/heads/master | 2023-03-17T12:19:34.332707 | 2021-02-11T00:29:11 | 2021-02-11T00:29:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | """
Author: Alan Danque
Date: 20201205
Class: DSC 650
Exercise: Week 2 Assignment - Run all assignments
"""
import os
os.system('python ./kvdb.py')
os.system('python ./documentdb.py')
os.system('python ./objectdb.py')
os.system('python ./rdbms.py')
| [
"adanque@gmail.com"
] | adanque@gmail.com |
7266db340ad3c001b2476e0d9677e9d1a795cf48 | 46a5df524f1d96baf94f6eb0f6222f2b856235f3 | /src/data/image/sliced_grid.py | 7612a11c9ffd5b6b038a1658df956563308349f9 | [
"MIT"
] | permissive | PhilHarnish/forge | 5dfbb0aa2afdb91e55d85187bd86fbeb9b6b2888 | c544fb8b499e1e13793c94159f4c35bce187311e | refs/heads/master | 2023-03-11T17:23:46.569359 | 2023-02-25T15:09:01 | 2023-02-25T15:09:01 | 1,818,598 | 2 | 0 | MIT | 2023-02-25T15:09:02 | 2011-05-29T19:36:53 | Jupyter Notebook | UTF-8 | Python | false | false | 2,215 | py | import math
from typing import Iterable
import cv2
import numpy as np
from data.image import coloring, image, model
from puzzle.constraints.image import sliced_grid_constraints
from util.geometry import np2d
class SlicedGrid(model.LineSpecification):
_source: image.Image
_constraints: sliced_grid_constraints.SlicedGridConstraints
def __init__(
self,
source: image.Image,
constraints: sliced_grid_constraints) -> None:
self._source = source
self._constraints = constraints
def set_source(self, source: image.Image) -> None:
self._source = source
self._constraints.set_source(source)
def get_debug_data(self) -> np.ndarray:
data = cv2.cvtColor(self._source.get_debug_data(), cv2.COLOR_GRAY2RGB)
c = self._constraints.center
cv2.circle(data, c, 3, coloring.WHITE, thickness=3)
for (theta, distances, divisions), color in zip(
self._constraints.get_specs(),
coloring.colors(self._constraints.slices)):
for distance in distances:
x, y = np2d.move_from(c, theta, distance)
cv2.circle(data, (round(x), round(y)), 3, color, thickness=3)
return data
def __iter__(self) -> Iterable[model.Divisions]:
c = self._constraints.center
max_distance = sum(self._source.shape)
for theta, distances, divisions in self._constraints.get_specs():
endpoints = []
total_distance = 0
for distance in distances:
moved = np2d.move_from(c, theta, distance)
endpoints.append(moved)
total_distance += abs(distance)
start, end = endpoints
division_distance = math.copysign(
total_distance / divisions, -distances[0])
right_angle = theta + math.pi / 2
dx = round(math.cos(right_angle) * max_distance)
dy = round(math.sin(right_angle) * max_distance)
result = []
for i in range(0, divisions + 1): # n_divisions requires n+1 iterations.
x, y = np2d.move_from(start, theta, division_distance * i)
result.append((
theta,
(round(x - dx), round(y - dy)), (round(x + dx), round(y + dy)),
i / divisions))
yield result
def __len__(self) -> int:
return self._constraints.slices
| [
"philharnish@gmail.com"
] | philharnish@gmail.com |
09093b345eb9e6d22ad30b1184a90e5f08c97ffb | 94ac54dca0e5ec8051c9c2611fb5bf51ac40185e | /test/integrationtest/conftest.py | 0910a5352094fb92cef99b556fd798385735e763 | [
"BSD-3-Clause"
] | permissive | bphillab/conda-env-tracker | 1ec893af594249971e1ee51e4e432769a7e4f3e8 | 0e13fe08ec6747dc70b8b5863235f84092331f04 | refs/heads/master | 2020-07-31T19:24:48.779199 | 2019-08-20T20:16:22 | 2019-08-20T20:16:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | """Taken from pytest-ordering, https://github.com/ftobia/pytest-ordering/blob/develop/pytest_ordering/__init__.py"""
import shutil
from pathlib import Path
import pytest
from conda_env_tracker.errors import CondaEnvTrackerCondaError
from conda_env_tracker.gateways.conda import delete_conda_environment
from conda_env_tracker.gateways.io import USER_ENVS_DIR
from conda_env_tracker.main import create, setup_remote
@pytest.fixture(scope="module")
def end_to_end_setup(request):
"""Setup and teardown for tests."""
name = "end_to_end_test"
channels = ["defaults"]
env_dir = USER_ENVS_DIR / name
remote_path = Path(__file__).parent.absolute() / "remote_test_dir"
if remote_path.exists():
shutil.rmtree(remote_path)
remote_path.mkdir()
def teardown():
delete_conda_environment(name=name)
if env_dir.is_dir():
shutil.rmtree(env_dir)
if remote_path.is_dir():
shutil.rmtree(remote_path)
request.addfinalizer(teardown)
try:
env = create(
name=name, specs=["python=3.6", "colorama"], channels=channels, yes=True
)
setup_remote(name=name, remote_dir=remote_path, yes=True)
except CondaEnvTrackerCondaError as err:
teardown()
raise err
channel_command = (
"--override-channels --strict-channel-priority --channel "
+ " --channel ".join(channels)
)
return {
"name": name,
"env": env,
"env_dir": env_dir,
"channels": channels,
"channel_command": channel_command,
"remote_dir": remote_path,
}
@pytest.fixture(scope="module")
def r_end_to_end_setup(request):
"""Setup and teardown for R end to end tests."""
name = "end_to_end_test"
channels = ["r", "defaults"]
env_dir = USER_ENVS_DIR / name
remote_path = Path(__file__).parent.absolute() / "remote_test_dir"
if remote_path.exists():
shutil.rmtree(remote_path)
remote_path.mkdir()
def teardown():
delete_conda_environment(name=name)
if env_dir.is_dir():
shutil.rmtree(env_dir)
if remote_path.is_dir():
shutil.rmtree(remote_path)
request.addfinalizer(teardown)
try:
env = create(
name=name, specs=["r-base", "r-devtools"], channels=channels, yes=True
)
setup_remote(name=name, remote_dir=remote_path, yes=True)
except CondaEnvTrackerCondaError as err:
teardown()
raise err
return {
"name": name,
"env": env,
"env_dir": env_dir,
"channels": channels,
"remote_dir": remote_path,
}
| [
"jesse.lord@allstate.com"
] | jesse.lord@allstate.com |
b9fc285d949d0b051b5a31996a5dd0c8a128fe54 | 5b47158a85428490511eecc9e2d18454d914d47e | /yasql/apps/sqlorders/api/generateRollbacksql.py | 99a6a7d1a70ae8e1a1bd3b2cb3784cbaebb6dc8b | [
"Apache-2.0"
] | permissive | dbtool/YaSQL | 946dacede73811351ecfeb26d1907acf4b46e83c | deaa144d7b66756b38fc476ccbfb8ab6e5f66ad9 | refs/heads/master | 2023-03-25T01:26:53.224918 | 2021-03-17T06:56:03 | 2021-03-17T06:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,172 | py | # -*- coding:utf-8 -*-
# edit by fuzongfei
import datetime
import json
import logging
import simplejson
from pymysql import escape_string
from pymysql.constants import FIELD_TYPE
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.event import QueryEvent
from pymysqlreplication.row_event import DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent
logger = logging.getLogger('main')
class ReadRemoteBinlog(object):
"""
binlog_file:读取的binlog文件
start_pos:开始读取的position
end_pos:结束读取的position
trx_timestamp: 事务开始的时间
affected_rows:事务影响的行数
返回数据:
success: {'status': 'success', 'data': [rollbacksql]}
fail: {'status': 'fail', 'msg': str(err)}
"""
def __init__(self, binlog_file=None, start_pos=None, end_pos=None,
host=None, port=None, user=None, password=None, thread_id=None,
only_schema=None, only_tables=None):
self.binlog_file = binlog_file
self.start_pos = start_pos
self.end_pos = end_pos
self.thread_id = thread_id
# only_schema和only_table必须为list类型
self.only_schemas = only_schema
self.only_tables = only_tables
# 目标数据库配置
self.mysql_setting = {'host': host,
'port': port,
'user': user,
'passwd': password,
}
def _handler_date(self, obj):
"""格式化时间"""
if type(obj) == datetime.datetime:
return '{0.year:04d}-{0.month:02d}-{0.day:02d} {0.hour:02d}:{0.minute:02d}:{0.second:02d}'.format(obj)
if type(obj) == datetime.date:
return '{0.year:04d}-{0.month:02d}-{0.day:02d} 00:00:00'.format(obj)
if type(obj) == datetime.timedelta:
return str(obj)
def _val_join(self, items):
"""组合column name, column value"""
k, v = items
if v is None:
return f"{k} IS NULL"
else:
if isinstance(v, int):
return f"`{k}`={v}"
else:
return f"`{k}`=\"{v}\""
def _del_join(self, items):
"""
type == 'DELETE'类型
对values进行处理
"""
v = items
if isinstance(v, type(None)):
return 'NULL'
elif isinstance(v, int):
return f'{v}'
elif isinstance(v, str):
# 使用pymysql.escape_string对数据中的引号进行转义
escape_v = escape_string(v)
return f"\"{escape_v}\""
else:
return f"\"{v}\""
def _upd_join(self, items):
"""
type == 'UPDATE'类型
组合column name, column value
"""
k, v = items
if v is None:
return f"{k}=NUll"
else:
if isinstance(v, int):
return f"`{k}`={v}"
elif isinstance(v, str):
# 使用pymysql.escape_string对数据中的引号进行转义
escape_v = escape_string(v)
return f"`{k}`=\"{escape_v}\""
else:
return f"`{k}`=\"{v}\""
def _format_binlog(self, row):
return simplejson.dumps(row, default=self._handler_date)
def _geometry(self, row):
"""解码Geometry类型
列类型:{'column': 'GEO_LOCATION', 'type': 255}
# pymysqlreplication返回的原始数据
> a = b'\x00\x00\x00\x00\x01\x01\x00\x00\x00\xcd#\x7f0\xf0\x19]@\xb0\x1e\xf7\xad\xd6\xf3C@'
> bytes.hex(a)
Out[54]: '000000000101000000cd237f30f0195d40b01ef7add6f34340'
# 在数据库存储的原始数据为:unhex('000000000101000000cd237f30f0195d40b01ef7add6f34340')
# 需要将回滚语句里面的"unhex('xxx')"改写为unhex('xxx')插入即可
mysql> select AsText(unhex('000000000101000000cd237f30f0195d40b01ef7add6f34340'));
+------------------------------------------------------------------------+
| AsText(unhex('000000000101000000cd237f30f0195d40b01ef7add6f34340')) |
+------------------------------------------------------------------------+
| POINT(116.405285 39.904989) |
+------------------------------------------------------------------------+
然后在插入
"""
for col in row['columns']:
if col['type'] == FIELD_TYPE.GEOMETRY:
name = col['column']
if row['type'] in ['INSERT', 'DELETE']:
row['values'][name] = f"unhex('{bytes.hex(row['values'][name])}')"
if row['type'] == 'UPDATE':
row['before'][name] = f"unhex('{bytes.hex(row['before'][name])}')"
row['after'][name] = f"unhex('{bytes.hex(row['after'][name])}')"
return row
def _generate_rollback_sql(self, rows):
rollback_statement = []
for row in rows:
row = self._geometry(row)
format_row = json.loads(self._format_binlog(row))
type = format_row['type']
database = format_row['database']
table = format_row['table']
# 主键可能由一个字段或多个字段组成
primary_key = ([format_row.get('primary_key')] if isinstance(format_row.get('primary_key'), str) else list(
format_row.get('primary_key'))) if format_row.get('primary_key') else []
sql = ''
if type == 'INSERT':
if primary_key:
where = ' AND '.join(
['='.join((primary, str(row['values'].get(primary)))) for primary in primary_key])
else:
where = ' AND '.join(map(self._val_join, row['values'].items()))
sql = f"DELETE FROM `{database}`.`{table}` WHERE {where} LIMIT 1;"
elif type == 'DELETE':
column_name = ', '.join(map(lambda key: f'`{key}`', row['values'].keys()))
column_value = ', '.join(map(self._del_join, row['values'].values()))
sql = f"INSERT INTO `{database}`.`{table}`({column_name}) VALUES ({column_value});"
elif type == 'UPDATE':
before_values = ', '.join(map(self._upd_join, row['before'].items()))
if primary_key:
where = ' AND '.join(
['='.join((primary, str(row['after'].get(primary)))) for primary in primary_key])
else:
where = ' AND '.join(map(self._val_join, row['after'].items()))
sql = f"UPDATE `{database}`.`{table}` SET {before_values} WHERE {where};"
rollback_statement.append(sql)
return rollback_statement
def run_by_rows(self):
try:
server_id = 6666666 + int(self.thread_id)
stream = BinLogStreamReader(connection_settings=self.mysql_setting,
server_id=server_id,
only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, QueryEvent],
resume_stream=True,
blocking=False,
log_file=f'{self.binlog_file}',
log_pos=self.start_pos,
only_schemas=f'{self.only_schemas}',
only_tables=f'{self.only_tables}'
)
rows = []
thread_id = query = None
for binlogevent in stream:
log_pos = binlogevent.packet.log_pos
if log_pos >= self.end_pos:
# 当当前的binlogevent日志位置大于结束的binlog时,退出
stream.close()
break
else:
if isinstance(binlogevent, QueryEvent):
thread_id = binlogevent.slave_proxy_id
query = binlogevent.query
if not isinstance(binlogevent, QueryEvent):
if self.thread_id == thread_id and query == 'BEGIN':
for row in binlogevent.rows:
columns = [{'column': x.name, 'type': x.type} for x in binlogevent.columns]
binlog = {'database': binlogevent.schema,
'table': binlogevent.table,
'primary_key': binlogevent.primary_key,
'columns': columns
}
if isinstance(binlogevent, DeleteRowsEvent):
binlog['values'] = row["values"]
binlog['type'] = 'DELETE'
rows.append(binlog)
if isinstance(binlogevent, UpdateRowsEvent):
binlog["before"] = row["before_values"]
binlog["after"] = row["after_values"]
binlog['type'] = 'UPDATE'
rows.append(binlog)
if isinstance(binlogevent, WriteRowsEvent):
binlog['values'] = row["values"]
binlog['type'] = 'INSERT'
rows.append(binlog)
stream.close()
result = {'status': 'success', 'data': self._generate_rollback_sql(rows)}
except Exception as err:
# print("Exception in user code:")
# print('-' * 60)
# traceback.print_exc(file=sys.stdout)
# print('-' * 60)
print(err)
result = {'status': 'fail', 'msg': str(err)}
return result
| [
"zongfei.fu@yunzhanghu.com"
] | zongfei.fu@yunzhanghu.com |
be81ee524aa81dde5ccebbd21e988d98106b13a7 | e9caa3bdbc563f2a09363ab0cd59c2e7d749a502 | /tuyochat_flask/api_commands_test.py | 1d80d88ad61631f3f5b84f21575b922e403140a8 | [
"Apache-2.0"
] | permissive | von-dee/tuyochatapi | d903d941ec233b8f90e3257e67394bb58655cdec | 55dadc24fb2077c25f4bb5d489c86778639a10a6 | refs/heads/master | 2022-12-14T10:49:20.615173 | 2019-05-31T15:17:24 | 2019-05-31T15:17:24 | 189,615,356 | 0 | 0 | Apache-2.0 | 2022-08-06T05:30:55 | 2019-05-31T15:15:23 | Python | UTF-8 | Python | false | false | 1,376 | py | """
Demo the Bebop indoors (sets small speeds and then flies just a small amount)
Note, the bebop will hurt your furniture if it hits it. Even though this is a very small
amount of flying, be sure you are doing this in an open area and are prepared to catch!
Author: Amy McGovern
"""
from pyparrot.pyparrot.Bebop import Bebop
def simpleflight():
bebop = Bebop(drone_type="Bebop2")
print("connecting")
success = bebop.connect(10)
print(success)
if (success):
print("turning on the video")
bebop.start_video_stream()
print("sleeping")
bebop.smart_sleep(2)
bebop.ask_for_state_update()
bebop.safe_takeoff(10)
# set safe indoor parameters
bebop.set_max_tilt(5)
bebop.set_max_vertical_speed(1)
# trying out the new hull protector parameters - set to 1 for a hull protection and 0 without protection
# bebop.set_hull_protection(1)
print("Flying direct: Slow move for indoors")
bebop.fly_direct(roll=0, pitch=20, yaw=0, vertical_movement=0, duration=2)
bebop.smart_sleep(5)
bebop.safe_land(10)
print("DONE - disconnecting")
bebop.stop_video_stream()
bebop.smart_sleep(5)
print(bebop.sensors.battery)
bebop.disconnect()
def battery_status():
response = "77%"
return response
| [
"vondeesela@gmail.com"
] | vondeesela@gmail.com |
1bb7b97ff0d7ed871f4280d115fe7d2651c8300f | e2334e514d9a0321fc834d6398519fa86dc1ba93 | /cira_ml_short_course/utils/upconvnet.py | 2c80a660190e61d2e1945a456101ea1ecc85d46e | [
"MIT"
] | permissive | ChanJeunlam/cira_ml_short_course | 4fc99da5a6e051a51fe7fdc307df17eeb06516eb | 23741f7ebba9dde8e4f5985ed43bed50b4f99cc3 | refs/heads/master | 2023-04-30T20:33:37.974674 | 2021-05-10T17:14:36 | 2021-05-10T17:14:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,151 | py | """Helper methods for upconvnets (upconvolutional networks)."""
import copy
import numpy
import keras.models
from cira_ml_short_course.utils import cnn, utils, image_utils, \
image_normalization
KERNEL_INITIALIZER_NAME = cnn.KERNEL_INITIALIZER_NAME
BIAS_INITIALIZER_NAME = cnn.BIAS_INITIALIZER_NAME
PLATEAU_PATIENCE_EPOCHS = cnn.PLATEAU_PATIENCE_EPOCHS
PLATEAU_LEARNING_RATE_MULTIPLIER = cnn.PLATEAU_LEARNING_RATE_MULTIPLIER
PLATEAU_COOLDOWN_EPOCHS = cnn.PLATEAU_COOLDOWN_EPOCHS
EARLY_STOPPING_PATIENCE_EPOCHS = cnn.EARLY_STOPPING_PATIENCE_EPOCHS
LOSS_PATIENCE = cnn.LOSS_PATIENCE
DEFAULT_INPUT_DIMENSIONS = numpy.array([4, 4, 256], dtype=int)
DEFAULT_CONV_BLOCK_LAYER_COUNTS = numpy.array([2, 2, 2, 2], dtype=int)
DEFAULT_CONV_CHANNEL_COUNTS = numpy.array(
[256, 128, 128, 64, 64, 32, 32, 4], dtype=int
)
DEFAULT_CONV_DROPOUT_RATES = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0])
DEFAULT_CONV_FILTER_SIZES = numpy.full(8, 3, dtype=int)
DEFAULT_INNER_ACTIV_FUNCTION_NAME = copy.deepcopy(utils.RELU_FUNCTION_NAME)
DEFAULT_INNER_ACTIV_FUNCTION_ALPHA = 0.2
DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME = None
DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA = 0.
DEFAULT_L1_WEIGHT = 0.
DEFAULT_L2_WEIGHT = 0.001
def _get_transposed_conv_layer(
num_rows_in_filter, num_columns_in_filter, upsampling_factor,
num_filters, weight_regularizer=None):
"""Creates layer for 2-D transposed convolution.
:param num_rows_in_filter: Number of rows in each filter (kernel).
:param num_columns_in_filter: Number of columns in each filter (kernel).
:param upsampling_factor: Upsampling factor (integer >= 1).
:param num_filters: Number of filters (output channels).
:param weight_regularizer: Will be used to regularize weights in the new
layer. This may be instance of `keras.regularizers` or None (if you
want no regularization).
:return: layer_object: Instance of `keras.layers.Conv2DTranspose`.
"""
return keras.layers.Conv2DTranspose(
filters=num_filters,
kernel_size=(num_rows_in_filter, num_columns_in_filter),
strides=(upsampling_factor, upsampling_factor),
padding='same',
dilation_rate=(1, 1), activation=None, use_bias=True,
kernel_initializer=KERNEL_INITIALIZER_NAME,
bias_initializer=BIAS_INITIALIZER_NAME,
kernel_regularizer=weight_regularizer,
bias_regularizer=weight_regularizer
)
def _get_upsampling_layer(upsampling_factor):
"""Creates layer for 2-D upsampling.
:param upsampling_factor: Upsampling factor (integer >= 1).
:return: layer_object: Instance of `keras.layers.Upsampling2D`.
"""
try:
return keras.layers.UpSampling2D(
size=(upsampling_factor, upsampling_factor),
data_format='channels_last', interpolation='bilinear'
)
except:
return keras.layers.UpSampling2D(
size=(upsampling_factor, upsampling_factor),
data_format='channels_last'
)
def setup_upconvnet(
input_dimensions=DEFAULT_INPUT_DIMENSIONS,
conv_block_layer_counts=DEFAULT_CONV_BLOCK_LAYER_COUNTS,
conv_layer_channel_counts=DEFAULT_CONV_CHANNEL_COUNTS,
conv_layer_dropout_rates=DEFAULT_CONV_DROPOUT_RATES,
conv_layer_filter_sizes=DEFAULT_CONV_FILTER_SIZES,
inner_activ_function_name=DEFAULT_INNER_ACTIV_FUNCTION_NAME,
inner_activ_function_alpha=DEFAULT_INNER_ACTIV_FUNCTION_ALPHA,
output_activ_function_name=DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME,
output_activ_function_alpha=DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA,
l1_weight=DEFAULT_L1_WEIGHT, l2_weight=DEFAULT_L2_WEIGHT,
use_transposed_conv=True, use_batch_norm_inner=True,
use_batch_norm_output=True):
"""Sets up (but does not train) upconvnet.
This method sets up the architecture, loss function, and optimizer.
B = number of convolutional blocks
C = number of convolutional layers
D = number of dense layers
:param input_dimensions: numpy array with dimensions of input data. Entries
should be (num_grid_rows, num_grid_columns, num_channels).
:param conv_block_layer_counts: length-B numpy array with number of
convolutional layers in each block. Remember that each conv block
except the last upsamples the image by a factor of 2.
:param conv_layer_channel_counts: length-C numpy array with number of
channels (filters) produced by each convolutional layer.
:param conv_layer_dropout_rates: length-C numpy array of dropout rates. To
turn off dropout for a given layer, use NaN or a non-positive number.
:param conv_layer_filter_sizes: length-C numpy array of filter sizes. All
filters will be square (num rows = num columns).
:param inner_activ_function_name: Name of activation function for all inner
(non-output) layers.
:param inner_activ_function_alpha: Alpha (slope parameter) for
activation function for all inner layers. Applies only to ReLU and eLU.
:param output_activ_function_name: Same as `inner_activ_function_name` but
for output layer. This may be None.
:param output_activ_function_alpha: Same as `inner_activ_function_alpha` but
for output layer.
:param l1_weight: Weight for L_1 regularization.
:param l2_weight: Weight for L_2 regularization.
:param use_transposed_conv: Boolean flag. If True (False), will use
transposed convolution (upsampling followed by normal convolution).
:param use_batch_norm_inner: Boolean flag. If True, will use batch
normalization after each inner layer.
:param use_batch_norm_output: Same but for output layer.
:return: model_object: Untrained instance of `keras.models.Model`.
"""
num_conv_layers = len(conv_layer_channel_counts)
assert numpy.sum(conv_block_layer_counts) == num_conv_layers
num_input_rows = input_dimensions[0]
num_input_columns = input_dimensions[1]
num_input_channels = input_dimensions[2]
input_layer_object = keras.layers.Input(
shape=(numpy.prod(input_dimensions),)
)
regularizer_object = utils._get_weight_regularizer(
l1_weight=l1_weight, l2_weight=l2_weight
)
layer_object = keras.layers.Reshape(
target_shape=(num_input_rows, num_input_columns, num_input_channels)
)(input_layer_object)
for i in range(num_conv_layers):
if (
i + 1 in numpy.cumsum(conv_block_layer_counts)
and i != num_conv_layers - 1
):
if use_transposed_conv:
layer_object = _get_transposed_conv_layer(
num_rows_in_filter=conv_layer_filter_sizes[i],
num_columns_in_filter=conv_layer_filter_sizes[i],
upsampling_factor=2,
num_filters=conv_layer_channel_counts[i],
weight_regularizer=regularizer_object
)(layer_object)
else:
layer_object = _get_upsampling_layer(
upsampling_factor=2
)(layer_object)
layer_object = cnn._get_2d_conv_layer(
num_rows_in_filter=conv_layer_filter_sizes[i],
num_columns_in_filter=conv_layer_filter_sizes[i],
num_rows_per_stride=1, num_columns_per_stride=1,
num_filters=conv_layer_channel_counts[i],
use_edge_padding=True,
weight_regularizer=regularizer_object
)(layer_object)
else:
layer_object = cnn._get_2d_conv_layer(
num_rows_in_filter=conv_layer_filter_sizes[i],
num_columns_in_filter=conv_layer_filter_sizes[i],
num_rows_per_stride=1, num_columns_per_stride=1,
num_filters=conv_layer_channel_counts[i], use_edge_padding=True,
weight_regularizer=regularizer_object
)(layer_object)
if i == num_conv_layers - 1:
if output_activ_function_name is not None:
layer_object = utils._get_activation_layer(
function_name=output_activ_function_name,
slope_param=output_activ_function_alpha
)(layer_object)
else:
layer_object = utils._get_activation_layer(
function_name=inner_activ_function_name,
slope_param=inner_activ_function_alpha
)(layer_object)
if conv_layer_dropout_rates[i] > 0:
layer_object = utils._get_dropout_layer(
dropout_fraction=conv_layer_dropout_rates[i]
)(layer_object)
if i != num_conv_layers - 1 and use_batch_norm_inner:
layer_object = utils._get_batch_norm_layer()(layer_object)
if i == num_conv_layers - 1 and use_batch_norm_output:
layer_object = utils._get_batch_norm_layer()(layer_object)
model_object = keras.models.Model(
inputs=input_layer_object, outputs=layer_object
)
model_object.compile(
loss=keras.losses.mean_squared_error,
optimizer=keras.optimizers.Adam()
)
model_object.summary()
return model_object
def create_data(image_file_names, normalization_dict, cnn_model_object):
"""Creates input data for upconvnet.
E = number of examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
Z = number of features (from CNN's flattening layer)
:param image_file_names: 1-D list of paths to input files (readable by
`image_utils.read_file`).
:param normalization_dict: Dictionary with params used to normalize
predictors. See doc for `image_normalization.normalize_data`.
:param cnn_model_object: Trained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`). Inputs for upconvnet will be outputs from
CNN's flattening layer.
:return: feature_matrix: E-by-Z numpy array of features. These are inputs
for the upconvnet.
:return: target_matrix: E-by-M-by-N-by-C numpy array of target values.
These are targets for the upconvnet but inputs for the CNN.
"""
image_dict = image_utils.read_many_files(image_file_names)
target_matrix, _ = image_normalization.normalize_data(
predictor_matrix=image_dict[image_utils.PREDICTOR_MATRIX_KEY],
predictor_names=image_dict[image_utils.PREDICTOR_NAMES_KEY],
normalization_dict=normalization_dict
)
feature_matrix = cnn.apply_model(
model_object=cnn_model_object, predictor_matrix=target_matrix,
verbose=True,
output_layer_name=cnn.get_flattening_layer(cnn_model_object)
)
return feature_matrix, target_matrix
def train_model_sans_generator(
model_object, cnn_model_object, training_file_names,
validation_file_names, num_examples_per_batch, normalization_dict,
num_epochs, output_dir_name):
"""Trains upconvnet without generator.
:param model_object: Untrained upconvnet (instance of `keras.models.Model`
or `keras.models.Sequential`).
:param cnn_model_object: Trained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param training_file_names: 1-D list of paths to training files (readable by
`image_utils.read_file`).
:param validation_file_names: Same but for validation files.
:param num_examples_per_batch: Batch size.
:param normalization_dict: See doc for `create_data`.
:param num_epochs: Number of epochs.
:param output_dir_name: Path to output directory (model will be saved here).
"""
utils._mkdir_recursive_if_necessary(directory_name=output_dir_name)
model_file_name = (
output_dir_name + '/model_epoch={epoch:03d}_val-loss={val_loss:.6f}.h5'
)
history_object = keras.callbacks.CSVLogger(
filename='{0:s}/history.csv'.format(output_dir_name),
separator=',', append=False
)
checkpoint_object = keras.callbacks.ModelCheckpoint(
filepath=model_file_name, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False, mode='min', period=1
)
early_stopping_object = keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=LOSS_PATIENCE,
patience=EARLY_STOPPING_PATIENCE_EPOCHS, verbose=1, mode='min'
)
plateau_object = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=PLATEAU_LEARNING_RATE_MULTIPLIER,
patience=PLATEAU_PATIENCE_EPOCHS, verbose=1, mode='min',
min_delta=LOSS_PATIENCE, cooldown=PLATEAU_COOLDOWN_EPOCHS
)
list_of_callback_objects = [
history_object, checkpoint_object, early_stopping_object, plateau_object
]
training_feature_matrix, training_target_matrix = create_data(
image_file_names=training_file_names,
normalization_dict=normalization_dict,
cnn_model_object=cnn_model_object
)
print('\n')
validation_feature_matrix, validation_target_matrix = create_data(
image_file_names=validation_file_names,
normalization_dict=normalization_dict,
cnn_model_object=cnn_model_object
)
print('\n')
model_object.fit(
x=training_feature_matrix, y=training_target_matrix,
batch_size=num_examples_per_batch, epochs=num_epochs,
steps_per_epoch=None, shuffle=True, verbose=1,
callbacks=list_of_callback_objects,
validation_data=(validation_feature_matrix, validation_target_matrix),
validation_steps=None
)
def read_model(hdf5_file_name):
"""Reads model from HDF5 file.
:param hdf5_file_name: Path to input file.
"""
return keras.models.load_model(hdf5_file_name)
def apply_model(model_object, cnn_model_object, cnn_predictor_matrix,
verbose=True):
"""Applies trained upconvnet to new data.
E = number of examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param model_object: Trained upconvnet (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param cnn_model_object: Trained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param cnn_predictor_matrix: E-by-M-by-N-by-C numpy array of predictor
values for CNN.
:param verbose: Boolean flag. If True, will print progress messages.
:return: reconstructed_predictor_matrix: Upconvnet reconstruction of
`cnn_predictor_matrix`.
"""
num_examples = cnn_predictor_matrix.shape[0]
num_examples_per_batch = 1000
reconstructed_predictor_matrix = numpy.full(
cnn_predictor_matrix.shape, numpy.nan
)
for i in range(0, num_examples, num_examples_per_batch):
this_first_index = i
this_last_index = min(
[i + num_examples_per_batch - 1, num_examples - 1]
)
if verbose:
print((
'Applying upconvnet to examples {0:d}-{1:d} of {2:d}...'
).format(
this_first_index, this_last_index, num_examples
))
these_indices = numpy.linspace(
this_first_index, this_last_index,
num=this_last_index - this_first_index + 1, dtype=int
)
this_feature_matrix = cnn.apply_model(
model_object=cnn_model_object,
predictor_matrix=cnn_predictor_matrix[these_indices, ...],
verbose=False,
output_layer_name=cnn.get_flattening_layer(cnn_model_object)
)
reconstructed_predictor_matrix[these_indices, ...] = (
model_object.predict(
this_feature_matrix, batch_size=len(these_indices)
)
)
if verbose:
print('Have applied upconvnet to all {0:d} examples!'.format(
num_examples
))
return reconstructed_predictor_matrix
| [
"lagerqui@ualberta.ca"
] | lagerqui@ualberta.ca |
a52d6c5f00d2ac38e53b3c57ba2e432d9ccc0772 | 1505d132eae07c4e148b1a6ec897742e73bf03a2 | /contacts/migrations/0001_initial.py | 1dde64cf08cab546c7fab3838da56a069035b37f | [] | no_license | ankurmogera/Django-Webapp | e9809525e7157882e909994c82ecad5ceea2d902 | 24a5b992ab48a05f25184e83b1eda1e95388abb7 | refs/heads/main | 2022-12-31T16:34:20.366689 | 2020-10-23T20:31:28 | 2020-10-23T20:31:28 | 303,438,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # Generated by Django 3.1 on 2020-08-28 17:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='contactRecords',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=25)),
('last_name', models.CharField(max_length=25)),
('phone_number', models.CharField(max_length=15)),
('email', models.CharField(max_length=25)),
('dob', models.DateField()),
('address', models.CharField(max_length=100)),
],
),
]
| [
"noreply@github.com"
] | ankurmogera.noreply@github.com |
31c0c7fd583c2dddb1ea5eee74eed5aa2c69d932 | 6ec8bc3773de45c8905dac428f99f69daab1e187 | /Source_Code/mutate.py | c9040f41a8cca82a0aa84095a026c561283b0f85 | [] | no_license | Lampedrazago29/ARM_Input_Generator-LMPG | 20c3c46ac00bd5f79a30f0b98f273ea26a5efb57 | e82ca81195a2e12894ada759a780426ba6e1a761 | refs/heads/master | 2020-03-18T21:15:13.322212 | 2018-06-04T11:33:09 | 2018-06-04T11:33:09 | 135,269,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,595 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Laura Pedraza-Gonzalez | March 2018
import os
import sys
import glob # The glob module provides a function for making file lists from directory wildcard searches
import tempfile
import re
import numpy as np
import commands
import time
import textwrap
modellerScript = "mod9.19"
scrl4Script = "Scwrl4"
threeToOneDic = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL': 'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'UNK': '*'}
oneToTreeDic = {}
for key in threeToOneDic:
oneToTreeDic.update({threeToOneDic[key] : key})
#############################
#This step generates the getpir.py script. This script is then executed to obtain the .pir file.
#############################
def get_pir_Script(pdbFile, resNumID, chainName, FormatPDB1, moveFile):
global sequenceWT
pdbFileTemp = pdbFile[:-3]+"temp"
pirFile = pdbFile[:-3]+"pir"
pirFileTemp = pirFile+".temp"
getpir = ["import os \n",
"import sys \n \n",
"from modeller import * \n \n",
"env = environ() \n",
"aln = alignment(env) \n"
"mdl = model(env, file='"+pdbFile+"') \n",
"aln.append_model (mdl, align_codes='wt_"+pdbFile+"',atom_files='"+pdbFile+"') \n",
"aln.write(file='"+pirFile+"', alignment_format='PIR') \n" ]
getpirScript = "getpyr.py"
with open(getpirScript, "w") as getpirFile:
getpirFile.writelines(getpir)
os.system(modellerScript +" "+getpirScript )
#Identify missing residues and write the pir file in correct format
FormatPDB1(pdbFile, pdbFileTemp)
realResNumDic = {}
with open(pdbFileTemp, "r") as file:
for line in file:
if "ATOM" in line:
realResNumDic.update({int(line.split()[5]) : threeToOneDic[str(line.split()[3])]})
globals().update({ "realResNumDic": realResNumDic})
sequence = ''
sequenceWT = ''
sequenceWTList = []
with open(pirFile, "r") as pir, open(pirFileTemp, "w") as temp:
for line in pir:
if str(pdbFile) in line:
temp.writelines(line)
missResList = []
for i in range(0,resNumID):
i = i+1
if i not in realResNumDic:
missResList.append(i)
sequence = sequence+"-"
sequenceWTList.append("-")
else:
res = str(realResNumDic[i]).lower()
sequence = sequence+res
sequenceWT = sequenceWT+res
sequenceWTList.append(res)
temp.writelines('\n'.join(textwrap.wrap(sequence, width=75)))
temp.writelines('* \n')
globals().update({ "sequenceWTList": sequenceWTList})
moveFile(pirFileTemp, pirFile)
print "\n The file "+pirFile+" has been generated using the MODELLER 9.19 software. The missing residues "+str(missResList)+" were considered."
os.remove(pdbFileTemp)
#############################
#This step generates the mutate_model.py script. This script is then executed to *****
#############################
def mutate_model_Script():
global mutate_modelScript
mutate_modelScript = "mutate_model.py"
mutate_model = ["import sys \n",
"import os \n",
" \n",
"from modeller import * \n",
"from modeller.optimizers import molecular_dynamics, conjugate_gradients \n",
"from modeller.automodel import autosched \n",
" \n",
"# \n",
"# mutate_model.py \n",
"# \n",
"# Usage: python mutate_model.py modelname respos resname chain > logfile \n",
"# \n",
"# Example: python mutate_model.py 1t29 1699 LEU A > 1t29.log \n",
"# \n",
"# \n",
"# Creates a single in silico point mutation to sidechain type and at residue position \n",
"# input by the user, in the structure whose file is modelname.pdb \n",
"# The conformation of the mutant sidechain is optimized by conjugate gradient and \n",
"# refined using some MD. \n",
"# \n",
"# Note: if the model has no chain identifier, specify "" for the chain argument. \n",
"# \n",
" \n",
" \n",
"def optimize(atmsel, sched): \n",
" #conjugate gradient \n",
" for step in sched: \n",
" step.optimize(atmsel, max_iterations=200, min_atom_shift=0.001) \n",
" #md \n",
" refine(atmsel) \n",
" cg = conjugate_gradients() \n",
" cg.optimize(atmsel, max_iterations=200, min_atom_shift=0.001) \n",
" \n",
" \n",
"#molecular dynamics \n",
"def refine(atmsel): \n",
" # at T=1000, max_atom_shift for 4fs is cca 0.15 A. \n",
" md = molecular_dynamics(cap_atom_shift=0.39, md_time_step=4.0, \n",
" md_return='FINAL') \n",
" init_vel = True \n",
" for (its, equil, temps) in ((200, 20, (150.0, 250.0, 400.0, 700.0, 1000.0)), \n",
" (200, 600, \n",
" (1000.0, 800.0, 600.0, 500.0, 400.0, 300.0))): \n",
" for temp in temps: \n",
" md.optimize(atmsel, init_velocities=init_vel, temperature=temp, \n",
" max_iterations=its, equilibrate=equil) \n",
" init_vel = False \n",
" \n",
" \n",
"#use homologs and dihedral library for dihedral angle restraints \n",
"def make_restraints(mdl1, aln): \n",
" rsr = mdl1.restraints \n",
" rsr.clear() \n",
" s = selection(mdl1) \n",
" for typ in ('stereo', 'phi-psi_binormal'): \n",
" rsr.make(s, restraint_type=typ, aln=aln, spline_on_site=True) \n",
" for typ in ('omega', 'chi1', 'chi2', 'chi3', 'chi4'): \n",
" rsr.make(s, restraint_type=typ+'_dihedral', spline_range=4.0, \n",
" spline_dx=0.3, spline_min_points = 5, aln=aln, \n",
" spline_on_site=True) \n",
" \n",
"#first argument \n",
"modelname, respos, restyp, chain, = sys.argv[1:] \n",
" \n",
" \n",
"log.verbose() \n",
" \n",
"# Set a different value for rand_seed to get a different final model \n",
"env = environ(rand_seed=-49837) \n",
" \n",
"env.io.hetatm = True \n",
"#soft sphere potential \n",
"env.edat.dynamic_sphere=False \n",
"#lennard-jones potential (more accurate) \n",
"env.edat.dynamic_lennard=True \n",
"env.edat.contact_shell = 4.0 \n",
"env.edat.update_dynamic = 0.39 \n",
" \n",
"# Read customized topology file with phosphoserines (or standardd one) \n",
"env.libs.topology.read(file='$(LIB)/top_heav.lib') \n",
" \n",
"# Read customized CHARMM parameter library with phosphoserines (or standardd one) \n",
"env.libs.parameters.read(file='$(LIB)/par.lib') \n",
" \n",
" \n",
"# Read the original PDB file and copy its sequence to the alignment array: \n",
"mdl1 = model(env, file=modelname) \n",
"ali = alignment(env) \n",
"ali.append_model(mdl1, atom_files=modelname, align_codes=modelname) \n",
" \n",
"#set up the mutate residue selection segment \n",
"s = selection(mdl1.chains[chain].residues[respos]) \n",
" \n",
"#perform the mutate residue operation \n",
"s.mutate(residue_type=restyp) \n",
"#get two copies of the sequence. A modeller trick to get things set up \n",
"ali.append_model(mdl1, align_codes=modelname) \n",
" \n",
"# Generate molecular topology for mutant \n",
"mdl1.clear_topology() \n",
"mdl1.generate_topology(ali[-1]) \n",
" \n",
" \n",
"# Transfer all the coordinates you can from the template native structure \n",
"# to the mutant (this works even if the order of atoms in the native PDB \n",
"# file is not standardd): \n",
"#here we are generating the model by reading the template coordinates \n",
"mdl1.transfer_xyz(ali) \n",
" \n",
"# Build the remaining unknown coordinates \n",
"mdl1.build(initialize_xyz=False, build_method='INTERNAL_COORDINATES') \n",
" \n",
"#yes model2 is the same file as model1. It's a modeller trick. \n",
"mdl2 = model(env, file=modelname) \n",
" \n",
"#required to do a transfer_res_numb \n",
"#ali.append_model(mdl2, atom_files=modelname, align_codes=modelname) \n",
"#transfers from 'model 2' to 'model 1' \n",
"mdl1.res_num_from(mdl2,ali) \n",
" \n",
"#It is usually necessary to write the mutated sequence out and read it in \n",
"#before proceeding, because not all sequence related information about MODEL \n",
"#is changed by this command (e.g., internal coordinates, charges, and atom \n",
"#types and radii are not updated). \n",
" \n",
"mdl1.write(file=modelname+restyp+respos+'.tmp') \n",
"mdl1.read(file=modelname+restyp+respos+'.tmp') \n",
" \n",
"#set up restraints before computing energy \n",
"#we do this a second time because the model has been written out and read in, \n",
"#clearing the previously set restraints \n",
"make_restraints(mdl1, ali) \n",
" \n",
"#a non-bonded pair has to have at least as many selected atoms \n",
"mdl1.env.edat.nonbonded_sel_atoms=1 \n",
" \n",
"sched = autosched.loop.make_for_model(mdl1) \n",
" \n",
"#only optimize the selected residue (in first pass, just atoms in selected \n",
"#residue, in second pass, include nonbonded neighboring atoms) \n",
"#set up the mutate residue selection segment \n",
"s = selection(mdl1.chains[chain].residues[respos]) \n",
" \n",
"mdl1.restraints.unpick_all() \n",
"mdl1.restraints.pick(s) \n",
" \n",
"s.energy() \n",
" \n",
"s.randomize_xyz(deviation=4.0) \n",
" \n",
"mdl1.env.edat.nonbonded_sel_atoms=2 \n",
"optimize(s, sched) \n",
" \n",
"#feels environment (energy computed on pairs that have at least one member \n",
"#in the selected) \n",
"mdl1.env.edat.nonbonded_sel_atoms=1 \n",
"optimize(s, sched) \n",
" \n",
"s.energy() \n",
" \n",
"#give a proper name \n",
"mdl1.write(file=modelname+'_'+restyp+respos+'.pdb') \n",
" \n",
"#delete the temporary file \n",
"os.remove(modelname+restyp+respos+'.tmp')"]
with open(mutate_modelScript, "w") as mutate_modelFile:
mutate_modelFile.writelines(mutate_model)
#############################
#This step ask the user for the list of mutations. Similar to the seqmut file
#############################
def Insert_mutations(yes_no, warning, workingFolder, pdbFile, copyFile, mutation_seqmut):
for i in range(0, len(mutation_seqmut)):
mutation_seqmut[i] = mutation_seqmut[i].upper()
globals().update({"mutation_seqmut" : mutation_seqmut})
mutationsFormat(yes_no, warning)
#Create a new working folder and a new pdb file for the mutation
mutFile=''
for i in range(0,len(mutation_seqmut)):
mutFile = mutFile+mutation_seqmut[i]+"-"
globals().update({ "mutFile" : mutFile})
mut_Folder = mutFile+workingFolder
os.system("mkdir "+mut_Folder)
os.system("cp "+pdbFile+" "+mut_Folder)
os.chdir(mut_Folder)
global mut_pdbFile, mut_pdbFileTemp, mutation_output
mut_pdbFile = mutFile+pdbFile
mut_pdbFileTemp = mut_pdbFile[:-3]+"temp"
mutation_output = mut_pdbFile[:-3]+"output"
copyFile(pdbFile, mut_pdbFile) #working mutation File
NumIDmutations = []
with open("seqmut"+mut_pdbFile[:-8], "w") as seqmutFile:
for i in range(0,len(mutation_seqmut)):
seqmutFile.writelines(mutation_seqmut[i]+"\n")
NumID = re.findall('\d+', str(mutation_seqmut[i]))[0]
#List with the ResID numbers of the residues to be mutated is stored as NumIDmutationsList
NumIDmutations.append(NumID)
globals().update({"NumIDmutationsList" : NumIDmutations})
print "\n ---> The following mutation(s) will be performed: "
for i in range(0,len(mutation_seqmut)):
print str(i+1)+") ", mutation_seqmut[i]
#############################
#This step recognices non-standard residues in the mutations and unifies the format to 1 letter amino acid
#############################
def mutationsFormat(yes_no, warning):
#Recognizes non-standard residues and ask the user to insert the mutation again
NonStandarddResID(warning, yes_no)
#Unifies the format to 1 letter amino acid
for i in range(0,len(mutation_seqmut)):
if (mutation_seqmut[i][0:3]) in threeToOneDic:
mutation_seqmut[i] = mutation_seqmut[i].replace(mutation_seqmut[i][0:3],threeToOneDic[mutation_seqmut[i][0:3]] )
if (mutation_seqmut[i][-3:]) in threeToOneDic:
mutation_seqmut[i] = mutation_seqmut[i].replace(mutation_seqmut[i][-3:],threeToOneDic[mutation_seqmut[i][-3:]] )
globals().update({"mutation_seqmut" : mutation_seqmut})
#Unifies the format to 3 letter amino acid
for i in range(0,len(mutation_seqmut)):
if (mutation_seqmut[i][0]) in oneToTreeDic:
mutation_seqmut[i] = mutation_seqmut[i].replace(mutation_seqmut[i][0],oneToTreeDic[mutation_seqmut[i][0]] )
if (mutation_seqmut[i][-1:]) in oneToTreeDic:
mutation_seqmut[i] = mutation_seqmut[i].replace(mutation_seqmut[i][-1:],oneToTreeDic[mutation_seqmut[i][-1:]] )
globals().update({"mutation_seqmut" : mutation_seqmut})
#SIMPLIFY!!
def NonStandarddResID(warning, yes_no):
#Recognizes non-standard residues 3 letter format
for i in range(0,len(mutation_seqmut)):
if (mutation_seqmut[i][0:3]).isalpha() == True:
if (mutation_seqmut[i][0:3]) not in threeToOneDic:
print "\n", warning, "The following residue is not recognized:", '\x1b[0;33;49m'+(mutation_seqmut[i][0:3])+'\x1b[0m', "\n Try again!"
Insert_mutations(yes_no, warning, workingFolder, pdbFile, copyFile)
else:
if (mutation_seqmut[i][0]).isalpha() == True:
if (mutation_seqmut[i][0]) not in oneToTreeDic:
print "\n", warning, "The following residue is not recognized:", '\x1b[0;33;49m'+(mutation_seqmut[i][0])+'\x1b[0m', "\n Try again!"
Insert_mutations(yes_no, warning, workingFolder, pdbFile, copyFile)
else:
print "\n", warning, "The following residue is not recognized:", '\x1b[0;33;49m'+(mutation_seqmut[i][0])+'\x1b[0m', "\n Try again!"
Insert_mutations(yes_no, warning, workingFolder, pdbFile, copyFile)
if (mutation_seqmut[i][-3:]).isalpha() == True:
if (mutation_seqmut[i][-3:]) not in threeToOneDic:
print "\n", warning, "The following residue is not recognized:", '\x1b[0;33;49m'+mutation_seqmut[i][-3:]+'\x1b[0m', "\n Try again!"
Insert_mutations(yes_no, warning, workingFolder, pdbFile, copyFile)
else:
if (mutation_seqmut[i][-1:]).isalpha() == True:
if (mutation_seqmut[i][-1:]) not in oneToTreeDic:
print "\n", warning, "The following residue is not recognized:", '\x1b[0;33;49m'+(mutation_seqmut[i][-1:])+'\x1b[0m', "\n Try again!"
Insert_mutations(yes_no, warning, workingFolder, pdbFile, copyFile)
else:
print "\n", warning, "The following residue is not recognized:", '\x1b[0;33;49m'+(mutation_seqmut[i][-1:])+'\x1b[0m', "\n Try again!"
Insert_mutations(yes_no, warning, workingFolder, pdbFile, copyFile)
#############################
#This step ask the user for select the software for the mutations
#############################
def Select_mutation_Software(ChooseNumOption, pdbFile, copyFile):
mutationSoftwareList = ["Modeller", "Scwrl4"]
ChooseNumOption(mutationSoftwareList, "mutation_Software", "mutation_Software", '\n Choose the ', 'to perform the mutations:', 'will be used to perform the mutations.', True)
#############################
#Modeller and SCWR4 complete the missing atoms of the amino acids.
#To preserve the structure of the wild type is necessary to include the geometry of the substitution in the wild type file.
#############################
def MutatedToARMFormat(pdbFile, moveFile, FormatPDB1, FormatPDB, NumIDmut, chainName):
#Obtain the geometry of the new residue mutation
geometry_Mutation=[]
with open(mutation_output, "r") as oldfile, open(mut_pdbFileTemp, "w") as newfile:
for line in oldfile:
if 'ATOM' in line and chainName+'{:>4}'.format(str(NumIDmut)) in line:
newfile.writelines(line)
geometry_Mutation.append(line)
moveFile(mut_pdbFileTemp, mutation_output)
#Calculate the number of atoms of old residue
i = 0
numAtomOldRes = ''
with open(mut_pdbFile, "r") as oldfile:
for line in oldfile:
if 'ATOM' in line and chainName+'{:>4}'.format(str(NumIDmut)) in line:
i = i+1
numAtomOldRes = i
#Insert the geometry of the new residue mutation in the wild type geometry
i = 0
with open(mut_pdbFile, "r") as oldfile, open(mut_pdbFileTemp, "w") as newfile:
for line in oldfile:
if 'ATOM' in line and chainName+'{:>4}'.format(str(NumIDmut)) in line:
i = i+1
if i == numAtomOldRes:
newfile.writelines(geometry_Mutation)
else:
newfile.writelines(line)
#Write the new pdb using the correct format
moveFile(mut_pdbFileTemp, mut_pdbFile)
FormatPDB1(mut_pdbFile, mut_pdbFileTemp)
FormatPDB(mut_pdbFileTemp, mut_pdbFile, mut_pdbFile)
#############################
#Modeller routine
#############################
def Modeller_mutations(chainName, pdbFile, copyFile, moveFile, FormatPDB1, FormatPDB):
from mutate import mutate_model_Script
mutate_model_Script()
for i in range(0,len(mutation_seqmut)):
print str("Running MODELLER9.19 for the mutation number " + str(i+1)).rjust(100, '.')
os.system(modellerScript+" "+mutate_modelScript+" "+mut_pdbFile[:-4]+" "+str(NumIDmutationsList[i])+" "+str(mutation_seqmut[i][-3:])+" "+chainName)
moveFile(mut_pdbFile[:-4]+"_"+str(mutation_seqmut[i][-3:])+str(NumIDmutationsList[i])+".pdb", mutation_output)
NumIDmut = NumIDmutationsList[i]
print "\n The mutation ", mutation_seqmut[i], "has been succesfully generated!"
MutatedToARMFormat(pdbFile, moveFile, FormatPDB1, FormatPDB, NumIDmut, chainName)
os.chdir("../")
#############################
#SCWRL4 routine
#############################
def Scwrl4_mutations(resNumID, chainName, FormatPDB1, moveFile, pdbFile, FormatPDB):
pdbHETATM = mut_pdbFile[:-3]+"HETATM.pdb"
with open(mut_pdbFile, "r") as pdb, open(pdbHETATM, "w") as hetatm:
for line in pdb:
if "HETATM" in line:
hetatm.writelines(line)
global sequenceWTList
seqFileName = mut_pdbFile[:-7]+"seqFileName"
get_pir_Script(mut_pdbFile, resNumID, chainName, FormatPDB1, moveFile)
for i in range(0,len(mutation_seqmut)):
NumIDmut = NumIDmutationsList[i]
sequenceWTList[int(NumIDmut)-1] = threeToOneDic[mutation_seqmut[i][-3:]]
with open(seqFileName, "w") as seqFile:
for j in range(0, len(sequenceWTList)):
if sequenceWTList[j] != "-":
seqFile.writelines(sequenceWTList[j])
print str("Running SCWRL4 for the mutation number " + str(i+1)).rjust(100, '.')
os.system(scrl4Script+" -i "+mut_pdbFile+" -o "+mutation_output+" -h -f "+pdbHETATM+" -s "+seqFileName+" > scwrl4_mut.log" )
print (scrl4Script+" -i "+mut_pdbFile+" -o "+mutation_output+" -h -f "+pdbHETATM+" -s "+seqFileName+" > scwrl4_mut.log" )
print "\n The mutation ", mutation_seqmut[i], "has been succesfully generated!"
sequenceWTList[int(NumIDmut)-1] = sequenceWTList[int(NumIDmut)-1].lower()
#Fix the format of the mutation_ouput file
FormatPDB1(mutation_output, "mut_temp")
with open("mut_temp", "r") as out, open("mutation_output", "w") as temp:
for line in out:
if "ATOM" in line:
temp.writelines(line.split()[0]+"\t"+line.split()[1]+"\t"+line.split()[2]+"\t"+line.split()[3]+"\t"+line.split()[4]+"\t"+line.split()[5]+"\t"+line.split()[6]+"\t"+line.split()[7]+"\t"+line.split()[8]+"\t"+line.split()[9]+"\t"+str("0.0")+"\t"+line.split()[10]+"\n")
FormatPDB("mutation_output", mutation_output, mutation_output)
os.remove("mut_temp")
MutatedToARMFormat(pdbFile, moveFile, FormatPDB1, FormatPDB, NumIDmut, chainName)
os.system("pwd")
#############################
# Propka
#############################
def protonation_mutant(protonation, numberCounterions, protAA, HISName, protResDictionary, replaceLine, chainName, Step):
def protonation_mut(mut_pdbFile, protAA, protResDictionary,replaceLine, chainName, Step):
for key in protResDictionary:
if protResDictionary[key] == "HIS":
replaceLine(mut_pdbFile, "ATOM", chainName+'{:>4}'.format(key), {"HIS" : HISName})
else:
replaceLine(mut_pdbFile, "ATOM", chainName+'{:>4}'.format(key), protAA)
protonation_mut(mut_pdbFile, protAA, protResDictionary,replaceLine, chainName, Step)
numberCounterions(mut_pdbFile)
Step('The '+'\x1b[0;33;49m'+mut_pdbFile+' file is ready to be used as input for the ARM protocol! \x1b[0m')
os.chdir("../")
| [
"la.pedrazagonzalez@student.unisi.it"
] | la.pedrazagonzalez@student.unisi.it |
a6b8557de93800110cd9a1e4bf3c2635091d5b3d | 70f4171d3024d22de8686ffff5b4bfee9bffa4b0 | /uri-online-judge/1079.py | 029bdb1edd0d86bcead7a3126383a1cae72715e6 | [] | no_license | raquelsouto/python | 1ea105a5a3844cf1827da10b4b74b8366cfed39c | dbafa504e83a20c8eb3ed453069b49d631a13d2c | refs/heads/master | 2022-11-21T23:36:28.973184 | 2020-07-23T02:08:12 | 2020-07-23T02:10:01 | 281,823,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | n = int(input())
cont_media = 0
cont_print = 0
lista_medias = []
while cont_media < n:
nota1, nota2, nota3 = map(float, input().split())
media = (nota1*2 + nota2*3 + nota3*5)/10
lista_medias.append(media)
cont_media += 1
while cont_print < n:
print('%.1f' %lista_medias[cont_print])
cont_print += 1 | [
"kelsouto@gmail.com"
] | kelsouto@gmail.com |
a424866970aa0002e806903b5cbd09b518cbb260 | ddbe13cc82934230dc2dc2f46d2cb491ca98b2f6 | /ex03.py | 65c64549da03931b9f11fddb0da483a7adad6f9b | [] | no_license | key70/day0411 | ff7c2f3a14f04550c1aa4c291689504e6b415ec8 | 3646822537cba18a763cfabedf4c61a122d0faa4 | refs/heads/master | 2020-05-07T18:17:24.683487 | 2019-04-11T09:36:27 | 2019-04-11T09:36:27 | 180,760,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py |
# 어떤 feature가 그것을 결정하는 가장 중요한 요인인가를 파악중요하다.
# 그것을 결정하는데 필요한 데이터를 수집하는것이 중요
import numpy as np
import pandas as pd
from sklearn import linear_model, model_selection
names = ['age','workclass','fnlwgt','education','education-num','marital-status',
'occupation','relationship','race','sex','capital-gain','capital-loss','hours-per-week',
'native-country','income']
df = pd.read_csv("../Data/adult.data.txt", header=None, names=names)
df = df[['age','workclass','education','occupation',
'sex','race','hours-per-week','income'] ]
new_df = pd.get_dummies(df)
print(new_df.head())
print(new_df.columns)
x = new_df.iloc[:,:-2]
y = new_df.iloc[:,-1]
#문제와 답의 차수를 확인해 봅시다.
print(x.shape) #(32561, 44) 2차원
print(y.shape) #(32561,) 1차원
train_x, test_x, train_y, test_y = model_selection.train_test_split(x,y)
lr = linear_model.LogisticRegression()
lr.fit(train_x,train_y) #훈련용 데이터와 답을 갖고 학습을 시킨다.
n = [[47, ' Private', ' Prof-school',' Prof-specialty', ' Female',' White',60, ' <=50K']]
n_df = pd.DataFrame(n, columns=['age','workclass','education','occupation',
'sex','race','hours-per-week','income'])
df2 = df.append(n_df)
#알고자하는 데이터를 훈련시킨 feature의 수와 동일하게 하기 위하여
#원래 원본데이터의 맨마지막에 추가시키고
#one-hot Encoding을 합시다.
one_hot = pd.get_dummies(df2)
print(len(one_hot.columns)) #51
print(len(new_df.columns)) #51
pred_x = np.array( one_hot.iloc[-1, :-2]).reshape(1,-1)
pred_y = lr.predict(pred_x)
print(pred_y)
# n_df = pd.DataFrame(n, columns=['age','workclass','education','occupation',
# 'sex','race','hours-per-week','income'])
#연습) 고객의 나이, 직업분류, 학력, 직업, 성별, 인종, 주당근무시간을
# 입력받아 연봉이 50000달러 이상이면 "대출가능"
# 그렇지 않으면 "대출불가능"을 출력하는 웹어프리케션을 구현합니다.
# 단, 직업분류, 학력, 직업, 성별, 인종은
# 우리가 훈련시킬 데이터 adult.data.txt의 내용으로 제한하도록 합니다.
| [
"rolakim70@naver.com"
] | rolakim70@naver.com |
4299a22e5f834c8e5eed9f8ddf958ec3d0cda2fa | 9ae04b301adaa77558c3cbd1fe6b9adc2f9d7e9f | /flaskblog/users/forms.py | 2720ae2fe8d6f4d1779e37ae43f3ce66aaabbf67 | [] | no_license | soumyaevan/Blog_App | 3401daa6e79e5a457e9e81540f00e5af046fdffd | 488b27d5d1ddf57b7be9a4f2e2c493fa6bd06f41 | refs/heads/master | 2022-12-24T01:50:01.164796 | 2020-09-17T20:00:56 | 2020-09-17T20:00:56 | 293,937,873 | 0 | 0 | null | 2020-09-17T20:00:57 | 2020-09-08T22:06:37 | HTML | UTF-8 | Python | false | false | 3,126 | py | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError, Regexp
from flask_login import current_user
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username',validators=[DataRequired(), Length(min=2, max=20),
Regexp("^[a-zA-Z]+[\._-]?[a-zA-Z0-9]+")])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Passsword', validators=[DataRequired(), Length(min=3, max=15)])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('This username already exists. Please choose a different one')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('This email address already exists. Please choose a different one')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Passsword', validators=[DataRequired(), Length(min=3, max=15)])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username',validators=[DataRequired(), Length(min=2, max=20),
Regexp("^[a-zA-Z]+[\._-]?[a-zA-Z0-9]+")])
email = StringField('Email', validators=[DataRequired(), Email()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg','png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('This username already exists. Please choose a different one')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('This email address already exists. Please choose a different one')
class RequestResetForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Reset Password')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('This Email Address is not found!!!')
class PassswordResetForm(FlaskForm):
password = PasswordField('Passsword', validators=[DataRequired(), Length(min=3, max=15)])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset') | [
"sensoumya94@gmail.com"
] | sensoumya94@gmail.com |
6e14b2e0b728e21378e28a1927f97c4935ef9061 | 6af7dedd9f8335f1bd1f2b40b5050d0cafa6411a | /Basic/format.py | 8559a7875e16b96db5a4c2799e79858aef530804 | [] | no_license | Gruci/python | 0944e8de684af2ed158356bb9d4c230aca50b4d5 | 8bfb98a8cd865a17dc9621344fc791908aa8f56a | refs/heads/master | 2020-04-17T06:20:16.069026 | 2016-11-14T09:17:43 | 2016-11-14T09:17:43 | 67,502,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | number = 20
greeting = '안녕 안녕'
place = '문자열 포맷'
welcome = '하이 하이'
print (number,'번 손님', greeting, '.', place, '이다', welcome)
base = '{}번 손님, {}. {}이다 {}'
new_way = base.format(number, greeting, place, welcome)
print(base)
print(new_way)
print ('나는 {} ,너는 {}, 그래서 {}'.format
(greeting, welcome, place)) | [
"skeksksk@gmail.com"
] | skeksksk@gmail.com |
a17465ee9bd77c044fdbc517bdc07720e69bb280 | aaa79e1992c9ea57a80f8bba98e2251cc2814cc3 | /project/final/random_forest.py | 4b9a88c9372d2e8e6108bd4df57dd7e7df02e74f | [] | no_license | Xin-Cheng/CS412 | abcba546383af359290035d8e231e2d601f949a9 | 252662ac1b9c527eaad31de3cff1a75a1790b62f | refs/heads/master | 2021-03-30T16:31:09.316692 | 2017-04-29T06:27:27 | 2017-04-29T06:27:27 | 83,969,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,764 | py | import numpy as np
import pandas as pd
import pickle
import itertools
from math import *
from numpy import *
from collections import deque
class Decision_Tree:
def __init__(self, name, condition, is_label):
self.name = name # name of each tree node
self.condition = condition
self.constraint = None
if condition is None or is_label:
num_of_children = 1
else:
num_of_children = 2 if isinstance(condition, int) or isinstance(condition, float) else len(condition)
self.children = [None]*num_of_children
def set_constraint(self, constraint):
self.constraint = constraint
# load data and preprocess
def preprocess():
# load data
users = pd.read_csv('data/user.txt')
movies = pd.read_csv('data/movie.txt')
train = pd.read_csv('data/train.txt')
test = pd.read_csv('data/test.txt')
# preprocessing data
fill_na(users)
fill_na(movies)
# training data
user_train = pd.merge(users, train, how='inner', left_on='ID', right_on='user-Id')
whole_train_data = pd.merge(user_train, movies, how='inner', left_on='movie-Id', right_on='Id')
train_data = whole_train_data[['Gender', 'Age', 'Occupation', 'Year', 'Genre', 'rating']]
# build random forest
forest = build_forest(train_data)
# test data
user_test = pd.merge(users, test, how='inner', left_on='ID', right_on='user-Id')
whole_test_data = pd.merge(user_test, movies, how='inner', left_on='movie-Id', right_on='Id')
test_data = whole_test_data[['Id_x', 'Gender', 'Age', 'Occupation', 'Year', 'Genre']]
test_data = test_data.rename(index=str, columns={'Id_x': 'Id'})
predict(test_data, forest)
def predict(test_data, forest):
test_data['rating_str'] = ''
for decision_tree in forest:
queries = build_queries(decision_tree)
for q in queries:
exec(q)
# test_data.to_csv('forest_prediction.csv',index=False)
rating = []
for index, row in test_data.iterrows():
votes = array(map(int, list(row['rating_str'])))
rating.append(bincount(votes).argmax())
test_data['rating'] = rating
result = test_data[['Id', 'rating']]
result.sort(['rating'], inplace = True)
result.to_csv('forest_prediction.csv',index=False)
def build_queries(decision_tree):
queries = []
prefix = 'test_data.loc['
suffix = ', "rating_str"]= test_data["rating_str"] + '
node_list = deque([])
node_list.append(decision_tree)
while node_list:
curr_node = node_list.popleft()
if curr_node.name == 'label' and curr_node.constraint is not None:
queries.append(prefix + curr_node.constraint + suffix + '\"' + str(curr_node.condition) + '\"')
for node in curr_node.children:
if node is not None:
node_list.append(node)
return queries
def build_forest(train_data):
# features = list(train_data)[0 : train_data.shape[1] - 1]
# combinations = itertools.combinations(features, 3)
# forest = []
# for c in combinations:
# tr = list(c)
# tr.append('rating')
# # build single decision tree
# root = Decision_Tree('root', None, False)
# build_decision_tree(train_data[tr], root)
# forest.append(root)
# pickle.dump( forest, open( 'forest-3.p', 'wb' ) )
random_forest = pickle.load( open( 'forest-3.p', 'rb' ) )
return random_forest
# find split feature according to information gain
def find_split(train_data):
size = train_data.groupby('rating').size().shape[0]
if size == 1:
return Decision_Tree('label', train_data['rating'].tolist()[0], True)
# go for majority vote
elif train_data.shape[1] == 1:
return Decision_Tree('label', bincount(train_data['rating']).argmax(), True)
# find split feature
# calculate infomation of each feature
feature_names = list(train_data)
information = zeros(len(feature_names) - 1)
information_split = zeros([len(feature_names) - 1, 2])
for i in range(0, len(feature_names) - 1):
if feature_names[i] == 'Gender':
information[i] = discrete_information(train_data, feature_names[i])
elif feature_names[i] == 'Genre':
information[i] = combined_discrete_info(train_data, feature_names[i])
else:
info, split = continuous_info(train_data, feature_names[i])
information_split[i, :] = [info, split]
information[i] = info
# choose the feature with lowest infomation as current tree node
node_name = feature_names[argmin(information)]
if node_name == 'Gender':
condition = ['M', 'F']
elif node_name == 'Genre':
condition = unique(('|'.join(train_data[node_name].unique())).split('|'))
else:
condition = information_split[argmin(information)][1]
return Decision_Tree(node_name, condition, False)
# build decision tree
def build_decision_tree(train_data, tree_root):
if tree_root.condition is None:
tree_root.children[0] = find_split(train_data)
build_decision_tree(train_data, tree_root.children[0])
elif tree_root.name == 'label':
return
else:
condition = tree_root.condition
name = tree_root.name
prev_constraint = tree_root.constraint + ' & ' if tree_root.constraint is not None else ''
if name != 'Genre':
left = (train_data[train_data[name] <= condition] if name != 'Gender' else train_data.groupby('Gender').get_group('M')).drop(name, axis=1)
right = (train_data[train_data[name] > condition] if name != 'Gender' else train_data.groupby('Gender').get_group('F')).drop(name, axis=1)
tree_root.children[0] = find_split(left)
tree_root.children[1] = find_split(right)
if name != 'Gender':
tree_root.children[0].set_constraint(prev_constraint + '(test_data[\"' + name + '\"]' + '<=' + str(condition) + ')')
tree_root.children[1].set_constraint(prev_constraint + '(test_data[\"' + name + '\"]' + '>' + str(condition) + ')')
else:
tree_root.children[0].set_constraint(prev_constraint + '(test_data["Gender"] == \"M\")')
tree_root.children[1].set_constraint(prev_constraint + '(test_data["Gender"] == \"F\")')
build_decision_tree(left, tree_root.children[0])
build_decision_tree(right, tree_root.children[1])
else:
for i in range(len(condition)):
group = (train_data[train_data['Genre'].str.contains(condition[i])]).drop(name, axis=1)
tree_root.children[i] = find_split(group)
tree_root.children[i].set_constraint(prev_constraint + '(test_data[\"' + name + '\"]' + '.str.contains(' + '\"' + condition[i] + '\")' + ')')
build_decision_tree(group, tree_root.children[i])
# calculate continuous feature, 'Age', 'Occupation', and 'Year' in this project
def continuous_info(train_data, f_name):
size = train_data.shape[0]
features = train_data[f_name].unique()
sorted_features = sort(features)
split_info = zeros(len(sorted_features) - 1, dtype=float)
split_points = zeros(len(sorted_features) - 1, dtype=float)
# find split point
for i in range(len(sorted_features) - 1):
split = (sorted_features[i] + sorted_features[i + 1])/2
split_points[i] = split
left = train_data[train_data[f_name] <= split]
right = train_data[train_data[f_name] > split]
info = entropy(left)*(float(left.shape[0])/size) + entropy(right)*(float(right.shape[0])/size)
split_info[i] = info
min_split = argmin(split_info)
return split_info[min_split], split_points[min_split]
# calculate combined discrete feature, genre in this project
def combined_discrete_info(train_data, f_name):
size = train_data.shape[0]
# get distinct genres
genres_str = '|'.join(train_data['Genre'].unique())
genres = np.unique(genres_str.split('|'))
# calculate entropy of each distinct value
counts = zeros(len(genres), dtype=float)
eps = zeros(len(genres))
for i in range(len(genres)):
group = train_data[train_data['Genre'].str.contains(genres[i])]
counts[i] = group.shape[0]
eps[i] = entropy(group)
group_probability = (counts/size)/sum(counts/size)
info = dot(group_probability, eps)
return info
# calculate information of discrete feature, gender in this project
def discrete_information(train_data, f_name):
size = train_data.shape[0]
# calculate the probability of each distinct value of this feature
groups = train_data.groupby(f_name)
counts = groups.size().reset_index(name='count')
group_probability = array(counts['count'], dtype=float)/size
# calculate entropy of each distinct value
distinct_names = train_data[f_name].unique()
eps = zeros(len(distinct_names))
for i in range(len(distinct_names)):
eps[i] = entropy(groups.get_group(distinct_names[i]))
info = dot(group_probability, eps)
return info
# calculate entropy
def entropy(group):
size = group.shape[0]
groups = group.groupby('rating').size().reset_index(name='count')
ratings = array(groups['rating'])
counts = array(groups['count'], dtype=float)
probabilities = counts/size
log_probabilities = -log2(probabilities)
entropy = dot(probabilities, log_probabilities)
return entropy
# assign the most common value of the attribute to missing values
def fill_na(dataframe):
for column in dataframe:
dataframe[column].fillna(value=dataframe[column].value_counts().idxmax(), inplace = True)
def main():
preprocess()
if __name__ == "__main__":
main() | [
"x_cheng@outlook.com"
] | x_cheng@outlook.com |
bcf478a008f68005da747ea683705b87c418a752 | e1e977ffa4324d4f61da66a4d4b0869a87a6f049 | /tests/test_postcode_parser.py | c37075c84bf47e1453feffa10c027ffbd36d21d4 | [
"MIT"
] | permissive | WintersDeep/wintersdeep_postcode | 5bfa24eaa4df94e5a765e4dedd0a58168b840a3d | b5f53484d2209d48919a4db663f05c9d39a396fa | refs/heads/master | 2022-11-03T20:19:35.737929 | 2020-06-19T01:11:51 | 2020-06-19T01:11:51 | 272,838,311 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,963 | py | # python3 imports
from os.path import abspath, dirname, join
from sys import path as python_path
from unittest import TestCase
from re import compile
# determine where we are running (needed to patch PYTHON_PATH)
TEST_CASE_PATH = abspath( __file__ )
TEST_CASE_DIRECTORY = dirname( TEST_CASE_PATH )
PROJECT_ROOT_DIRECTORY = abspath( join( TEST_CASE_DIRECTORY, ".." ) )
# patch up PYTHON_PATH if required.
if not PROJECT_ROOT_DIRECTORY in python_path:
python_path.insert(0, PROJECT_ROOT_DIRECTORY)
# project imports
from wintersdeep_postcode.postcode_parser import PostcodeParser
## Unit Test class for PostcodeParser
class TestPostcodeParser(TestCase):
## test that we throw an error if excess / unrecognised keywords are recevied.
def test__PostcodeParser_ctor_excess_keywords(self):
self.assertRaises(TypeError, PostcodeParser, unused="value")
self.assertRaises(TypeError, PostcodeParser, whitespace="tolerant", unused="value")
## tests that the PostcodeParser::_get_whitespace_pattern raises an exception when
# unrecognised type strings are provided to the method.
def test__PostcodeParser_get_whitespace_pattern__unsupported(self):
self.assertRaises(ValueError, PostcodeParser._get_whitespace_pattern, 'unsupported')
self.assertRaises(ValueError, PostcodeParser._get_whitespace_pattern, 'TOLERANT')
self.assertRaises(ValueError, PostcodeParser._get_whitespace_pattern, 'tolerant ')
self.assertRaises(ValueError, PostcodeParser._get_whitespace_pattern, '')
self.assertRaises(ValueError, PostcodeParser._get_whitespace_pattern, False)
self.assertRaises(ValueError, PostcodeParser._get_whitespace_pattern, None)
self.assertRaises(ValueError, PostcodeParser._get_whitespace_pattern, 1)
## tests that the PostcodeParser::_get_whitespace_pattern creates a suitable pattern
# when created with the 'strict' keyword.
def test__PostcodeParser_get_whitespace_pattern__strict(self):
whitespace_pattern = PostcodeParser._get_whitespace_pattern('strict')
test_regex = compile(f"^{whitespace_pattern}$")
self.assertTrue( test_regex.match(" ") )
self.assertFalse( test_regex.match("") )
self.assertFalse( test_regex.match(" ") )
self.assertFalse( test_regex.match("\t") )
self.assertFalse( test_regex.match("\t ") )
self.assertFalse( test_regex.match(" \t ") )
self.assertFalse( test_regex.match("-") )
self.assertFalse( test_regex.match("TEXT") )
## tests that the PostcodeParser::_get_whitespace_pattern creates a suitable pattern
# when created with the 'tolerant' keyword.
def test__PostcodeParser_get_whitespace_pattern__tolerant(self):
whitespace_pattern = PostcodeParser._get_whitespace_pattern('tolerant')
test_regex = compile(f"^{whitespace_pattern}$")
self.assertTrue( test_regex.match(" ") )
self.assertTrue( test_regex.match("") )
self.assertFalse( test_regex.match(" ") )
self.assertFalse( test_regex.match("\t") )
self.assertFalse( test_regex.match("\t ") )
self.assertFalse( test_regex.match(" \t ") )
self.assertFalse( test_regex.match("-") )
self.assertFalse( test_regex.match("TEXT") )
## tests that the PostcodeParser::_get_whitespace_pattern creates a suitable pattern
# when created with the 'lenient' keyword.
def test__PostcodeParser_get_whitespace_pattern__lenient(self):
whitespace_pattern = PostcodeParser._get_whitespace_pattern('lenient')
test_regex = compile(f"^{whitespace_pattern}$")
self.assertTrue( test_regex.match(" ") )
self.assertTrue( test_regex.match("") )
self.assertTrue( test_regex.match(" ") )
self.assertTrue( test_regex.match("\t") )
self.assertTrue( test_regex.match("\t ") )
self.assertTrue( test_regex.match(" \t ") )
self.assertFalse( test_regex.match("-") )
self.assertFalse( test_regex.match("TEXT") )
## tests that the PostcodeParser::_build_input_translater method creates functions
# that perform the expected actions - no operation pipeline
def test__PostcodeParser_build_input_translator__nop(self):
pipeline = PostcodeParser._build_input_translater(trim=False, uppercase=False)
self.assertEqual( pipeline("NO CHANGE"), "NO CHANGE" )
self.assertEqual( pipeline(" TRIM TEST\t "), " TRIM TEST\t " )
self.assertEqual( pipeline("Uppercase Test"), "Uppercase Test" )
## tests that the PostcodeParser::_build_input_translater method creates functions
# that perform the expected actions - trim only pipeline
def test__PostcodeParser_build_input_translator__trim(self):
pipeline = PostcodeParser._build_input_translater(trim=True, uppercase=False)
self.assertEqual( pipeline("NO CHANGE"), "NO CHANGE" )
self.assertEqual( pipeline(" TRIM TEST\t "), "TRIM TEST" )
self.assertEqual( pipeline("Uppercase Test"), "Uppercase Test" )
## tests that the PostcodeParser::_build_input_translater method creates functions
# that perform the expected actions - uppercase only pipeline
def test__PostcodeParser_build_input_translator__uppercase(self):
pipeline = PostcodeParser._build_input_translater(trim=False, uppercase=True)
self.assertEqual( pipeline("NO CHANGE"), "NO CHANGE" )
self.assertEqual( pipeline(" TRIM TEST\t "), " TRIM TEST\t " )
self.assertEqual( pipeline("Uppercase Test"), "UPPERCASE TEST" )
## tests that the PostcodeParser::_build_input_translater method creates functions
# that perform the expected actions - full pipeline
def test__PostcodeParser_build_input_translator__full(self):
pipeline = PostcodeParser._build_input_translater(trim=True, uppercase=True)
self.assertEqual( pipeline("NO CHANGE"), "NO CHANGE" )
self.assertEqual( pipeline(" TRIM TEST\t "), "TRIM TEST" )
self.assertEqual( pipeline("Uppercase Test"), "UPPERCASE TEST" )
## This test to make sure we throw if we try and create a parser with an unknown
# method of handling whitespace in a predicable manner
def test__PostcodeParser_ctor__with_bad_whitespace_handler(self):
from wintersdeep_postcode.exceptions import ParseError
self.assertRaises( ValueError, PostcodeParser,
trim_whitespace=False,
force_case=False,
whitespace='error'
)
## This test is for the parser in its most strict configuration - strict whitepace
# handling, and no input translation. This is to ensure that in this mode, only
# well formed postcodes are parsed.
def test__PostcodeParser_parse__with_no_translation(self):
from wintersdeep_postcode.exceptions import ParseError
postcode_parser = PostcodeParser(trim_whitespace=False, force_case=False, whitespace='strict', validate=False)
self.assertRaises(ParseError, postcode_parser, "aa1 1aa")
self.assertRaises(ParseError, postcode_parser, "AA11AA")
self.assertRaises(ParseError, postcode_parser, "AA1 1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t 1AA")
self.assertRaises(ParseError, postcode_parser, " AA1 1AA")
self.assertRaises(ParseError, postcode_parser, "AA1 1AA ")
self.assertRaises(ParseError, postcode_parser, " AA1 1AA ")
self.assertRaises(ParseError, postcode_parser, 1)
self.assertRaises(ParseError, postcode_parser, False)
self.assertIsNotNone( postcode_parser("A1 1AA") )
self.assertIsNotNone( postcode_parser("A11 1AA") )
self.assertIsNotNone( postcode_parser("A1A 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA11 1AA") )
self.assertIsNotNone( postcode_parser("AA1A 1AA") )
## This test is for the parser in a strict configuration - strict whitepace
# handling, and only case correction enabled. This is to ensure that in
# this mode, well formed postcodes of any case are parsed.
def test__PostcodeParser_parse__with_caps_correction(self):
from wintersdeep_postcode.exceptions import ParseError
postcode_parser = PostcodeParser(trim_whitespace=False, force_case=True, whitespace='strict', validate=False)
self.assertIsNotNone( postcode_parser("aa1 1aa") )
self.assertRaises(ParseError, postcode_parser, "AA11AA")
self.assertRaises(ParseError, postcode_parser, "AA1 1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t 1AA")
self.assertRaises(ParseError, postcode_parser, " AA1 1AA")
self.assertRaises(ParseError, postcode_parser, "AA1 1AA ")
self.assertRaises(ParseError, postcode_parser, " AA1 1AA ")
self.assertRaises(ParseError, postcode_parser, 1)
self.assertRaises(ParseError, postcode_parser, False)
self.assertIsNotNone( postcode_parser("A1 1AA") )
self.assertIsNotNone( postcode_parser("A11 1AA") )
self.assertIsNotNone( postcode_parser("A1A 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA11 1AA") )
self.assertIsNotNone( postcode_parser("AA1A 1AA") )
## This test is for the parser in a strict configuration - strict whitepace
# handling, and ony whitepace trimming enabled. This is to ensure that in this
# mode, well formed postcodes with whitespace padding are parsed correctly.
def test__PostcodeParser_parse__with_trimmed_whitespace(self):
from wintersdeep_postcode.exceptions import ParseError
postcode_parser = PostcodeParser(trim_whitespace=True, force_case=False, whitespace='strict', validate=False)
self.assertRaises(ParseError, postcode_parser, "aa1 1aa")
self.assertRaises(ParseError, postcode_parser, "AA11AA")
self.assertRaises(ParseError, postcode_parser, "AA1 1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t 1AA")
self.assertIsNotNone( postcode_parser(" AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA ") )
self.assertIsNotNone( postcode_parser(" AA1 1AA ") )
self.assertRaises(ParseError, postcode_parser, 1)
self.assertRaises(ParseError, postcode_parser, False)
self.assertIsNotNone( postcode_parser("A1 1AA") )
self.assertIsNotNone( postcode_parser("A11 1AA") )
self.assertIsNotNone( postcode_parser("A1A 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA11 1AA") )
self.assertIsNotNone( postcode_parser("AA1A 1AA") )
## This test is for the parser in a severe configuration - strict whitepace
# handling, but full pre-processing enabled. This is to ensure that in this
# mode, well formed postcodes which may be slightly "dirty" are parsed.
def test__PostcodeParser_parse__with_full_translation(self):
from wintersdeep_postcode.exceptions import ParseError
postcode_parser = PostcodeParser(trim_whitespace=True, force_case=True, whitespace='strict', validate=False)
self.assertIsNotNone( postcode_parser("aa1 1aa") )
self.assertRaises(ParseError, postcode_parser, "AA11AA")
self.assertRaises(ParseError, postcode_parser, "AA1 1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t 1AA")
self.assertIsNotNone( postcode_parser(" AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA ") )
self.assertIsNotNone( postcode_parser(" AA1 1AA ") )
self.assertRaises(ParseError, postcode_parser, 1)
self.assertRaises(ParseError, postcode_parser, False)
self.assertIsNotNone( postcode_parser("A1 1AA") )
self.assertIsNotNone( postcode_parser("A11 1AA") )
self.assertIsNotNone( postcode_parser("A1A 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA11 1AA") )
self.assertIsNotNone( postcode_parser("AA1A 1AA") )
## This test is for the parser in a tolerant configuration - tolerant whitepace
# handling, and full pre-processing enabled. This is to ensure that in this
# mode, well formed postcodes which may be slightly "dirty" are parsed.
def test__PostcodeParser_parse__tolerant(self):
from wintersdeep_postcode.exceptions import ParseError
postcode_parser = PostcodeParser(trim_whitespace=True, force_case=True, whitespace='tolerant', validate=False)
self.assertIsNotNone( postcode_parser("aa1 1aa") )
self.assertIsNotNone( postcode_parser("AA11AA") )
self.assertRaises(ParseError, postcode_parser, "AA1 1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t1AA")
self.assertRaises(ParseError, postcode_parser, "AA1\t 1AA")
self.assertIsNotNone( postcode_parser(" AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA ") )
self.assertIsNotNone( postcode_parser(" AA1 1AA ") )
self.assertRaises(ParseError, postcode_parser, 1)
self.assertRaises(ParseError, postcode_parser, False)
self.assertIsNotNone( postcode_parser("A1 1AA") )
self.assertIsNotNone( postcode_parser("A11 1AA") )
self.assertIsNotNone( postcode_parser("A1A 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA11 1AA") )
self.assertIsNotNone( postcode_parser("AA1A 1AA") )
## This test is for the parser in a lenient configuration - lenient whitepace
# handling, and full pre-processing enabled. This is to ensure that in this
# mode, well formed postcodes which may be slightly "dirty" are parsed.
def test__PostcodeParser_parse__lenient(self):
from wintersdeep_postcode.exceptions import ParseError
postcode_parser = PostcodeParser(trim_whitespace=True, force_case=True, whitespace='lenient', validate=False)
self.assertIsNotNone( postcode_parser("aa1 1aa") )
self.assertIsNotNone( postcode_parser("AA11AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA1\t1AA") )
self.assertIsNotNone( postcode_parser("AA1\t 1AA") )
self.assertIsNotNone( postcode_parser(" AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA ") )
self.assertIsNotNone( postcode_parser(" AA1 1AA ") )
self.assertRaises(ParseError, postcode_parser, 1)
self.assertRaises(ParseError, postcode_parser, False)
self.assertIsNotNone( postcode_parser("A1 1AA") )
self.assertIsNotNone( postcode_parser("A11 1AA") )
self.assertIsNotNone( postcode_parser("A1A 1AA") )
self.assertIsNotNone( postcode_parser("AA1 1AA") )
self.assertIsNotNone( postcode_parser("AA11 1AA") )
self.assertIsNotNone( postcode_parser("AA1A 1AA") )
## tests that the _get_parser_regex_list throws as expected when given bad params
def test__PostcodeParser_get_parser_regex_list__bad_args(self):
self.assertRaises(ValueError, PostcodeParser._get_parser_regex_list, type_list=[])
## tests that when we ask for the default parser (passing None, or ommiting) we
# get back a parser that loads all postcode types.
def test__PostcodeParser_get_parser_regex_list__all_types(self):
from re import compile
from wintersdeep_postcode.postcode_types import postcode_type_objects
parser_list = PostcodeParser._get_parser_regex_list(type_list=None)
compile_regex_type = compile("^$").__class__
# make sure it appears we loaded all types (basic count check only)
self.assertEqual( len(postcode_type_objects), len(parser_list) )
# and that the returned list appears usable
for regex, factory in parser_list:
self.assertIsInstance(regex, compile_regex_type)
self.assertTrue( callable(factory) )
# and that the default list, is still the same as the None call.
self.assertListEqual( parser_list, PostcodeParser._get_parser_regex_list() )
## tests that when we ask for a selective parser (passing a specific list) we
# get back a parser that is loaded correctly
def test__PostcodeParser_get_parser_regex_list__specific_type(self):
from wintersdeep_postcode.postcode_types import postcode_type_objects
test_type = postcode_type_objects[0]
parser_list = PostcodeParser._get_parser_regex_list(
type_list=[ test_type.PostcodeType ]
)
# make sure it appears we loaded all types (basic count check only)
self.assertEqual( len(parser_list), 1 )
self.assertIs( parser_list[0][1], test_type)
## tests that the postcode parser respects the validate keyword argument
def test__PostcodeParser_ctor__validate_keyword(self):
from wintersdeep_postcode.postcode_types import StandardPostcode
from wintersdeep_postcode.exceptions import ParseError, ValidationError
# one postcode to check that validation errors are thrown, and another for parser errors
postcode_invalid = "LL9 2XX"
postcode_valid = "LL20 2XX"
postcode_malformed = "LL20 XXX"
postcode_parser = PostcodeParser(validate=True)
self.assertRaises(ParseError, postcode_parser.parse, postcode_malformed)
self.assertRaises(ValidationError, postcode_parser.parse, postcode_invalid)
postcode = postcode_parser.parse(postcode_valid)
self.assertIsInstance(postcode, StandardPostcode)
self.assertFalse(postcode.validation_faults)
self.assertTrue(postcode.is_validated)
postcode_parser = PostcodeParser(validate=False)
self.assertRaises(ParseError, postcode_parser.parse, postcode_malformed)
postcode = postcode_parser.parse(postcode_invalid)
self.assertIsInstance(postcode, StandardPostcode)
self.assertFalse(postcode.validation_faults)
self.assertFalse(postcode.is_validated)
## tests that the ignore faults keyword allows people to mask postcode faults they
# wish to supress (future troubleshooting, errors, or to be more forgiving)
def test__PostcodeParser_ctor__ignore_faults(self):
from wintersdeep_postcode.postcode_types import StandardPostcode
from wintersdeep_postcode.exceptions import ParseError, ValidationError
# one postcode to check that validation errors are thrown, and another for parser errors
postcode_invalid_but_ignored = "LL9 2XX"
postcode_invalid_not_ignored = "HX10 2XX"
postcode_valid = "LL20 2XX"
postcode_malformed = "LL20 XXX"
supress_error = StandardPostcode.ExpectedDoubleDigitDistrict
for test_error in [ supress_error, int(supress_error) ]:
postcode_parser = PostcodeParser(validate=True, ignored_faults=[ test_error ])
self.assertRaises(ParseError, postcode_parser.parse, postcode_malformed)
postcode = postcode_parser.parse(postcode_valid)
self.assertIsInstance(postcode, StandardPostcode)
self.assertFalse(postcode.validation_faults)
self.assertTrue(postcode.is_validated)
postcode = postcode_parser.parse(postcode_invalid_but_ignored)
self.assertIsInstance(postcode, StandardPostcode)
self.assertEqual( len(postcode.validation_faults), 1)
self.assertTrue(postcode.is_validated)
try:
postcode_parser.parse(postcode_invalid_not_ignored)
self.fail(f"Parsing '{postcode_invalid_not_ignored}' was expected to trigger an exception.")
except ValidationError as ex:
self.assertTrue( int(StandardPostcode.ExpectedSingleDigitDistrict) in ex.postcode.validation_faults)
self.assertEqual( len(ex.postcode.validation_faults), 1)
self.assertFalse(ex.postcode.is_validated)
## attempts to parse every postcode in the UK to check we are good.
# @remarks will only do this if the relevant file is available.
def test_parse_all_current_uk_postcodes__if_available(self):
from os.path import exists
root_relative_file_path = join("reference", "current-uk-postcodes.txt")
file_path = join(PROJECT_ROOT_DIRECTORY, root_relative_file_path)
if not exists(file_path):
error_major = f"Can't run test without {root_relative_file_path}"
error_minor = "this may not be checked-in/available for licencing or file size reasons."
self.skipTest(f"{error_major}; {error_minor}")
from wintersdeep_postcode.exceptions import ParseError
with open(file_path, 'r') as file_handle:
parser = PostcodeParser()
postcode_string = file_handle.readline()
while postcode_string:
try:
postcode = parser(postcode_string)
self.assertTrue(postcode.is_validated)
except ParseError as ex:
print(str(ex))
postcode_string = file_handle.readline()
if __name__ == "__main__":
##
## if this file is the main entry point, run the contained tests.
##
from unittest import main as unit_test_entry_point
unit_test_entry_point() | [
"admin@wintersdeep.com"
] | admin@wintersdeep.com |
1beeb283036f8942d827ce37f399f0e69c19519f | ad5f3ed89e0fed30fa3e2eff6a4baa12e8391504 | /tensorflow/python/keras/applications/mobilenet.py | 224e8c84496ef63c1a35e1597b4b253dc1747dab | [
"Apache-2.0"
] | permissive | DunyaELBASAN/Tensorflow-C- | aa5c66b32f7e5dcfc93092021afee1bf3c97e04b | 7a435c0946bdd900e5c0df95cad64005c8ad22f9 | refs/heads/master | 2022-11-29T23:37:53.695820 | 2020-02-21T18:16:44 | 2020-02-21T18:21:51 | 242,206,767 | 1 | 0 | Apache-2.0 | 2022-11-21T22:39:51 | 2020-02-21T18:38:41 | C++ | UTF-8 | Python | false | false | 19,201 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
Reference paper:
- [MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications](https://arxiv.org/abs/1704.04861)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet/')
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNet architecture.
Reference paper:
- [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications](https://arxiv.org/abs/1704.04861)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in the `tf.keras.backend.image_data_format()`.
Arguments:
input_shape: Optional shape tuple, only to be specified if `include_top`
is False (otherwise the input shape has to be `(224, 224, 3)` (with
`channels_last` data format) or (3, 224, 224) (with `channels_first`
data format). It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would be one
valid value. Default to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally
decreases the number of filters in each layer. - If `alpha` > 1.0,
proportionally increases the number of filters in each layer. - If
`alpha` = 1, default number of filters from the paper are used at each
layer. Default to 1.0.
depth_multiplier: Depth multiplier for depthwise convolution. This is
called the resolution multiplier in the MobileNet paper. Default to 1.0.
dropout: Dropout rate. Default to 0.001.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Default to `True`.
weights: One of `None` (random initialization), 'imagenet' (pre-training
on ImageNet), or the path to the weights file to be loaded. Default to
`imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to
use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified. Defaults to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if 'layers' in kwargs:
global layers
layers = kwargs.pop('layers')
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if backend.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height should
be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width and
height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1. # Input shape
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name='conv1_pad')(inputs)
x = layers.Conv2D(
filters,
kernel,
padding='valid',
use_bias=False,
strides=strides,
name='conv1')(
x)
x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return layers.ReLU(6., name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Arguments:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(
inputs)
x = layers.DepthwiseConv2D((3, 3),
padding='same' if strides == (1, 1) else 'valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(
x)
x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_pw_%d_bn' % block_id)(
x)
return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
@keras_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
086d2e43f6dc73531c6c4ac008738208df450b36 | 97a3ef213466f439dd4ab46e5389aad03354813f | /romanos.py | 2e005c6b22d0ce79c95a0db734b84ecb7767bce7 | [] | no_license | MaikolEdu/CODING-DOJO | 425487727b904379418db16937b00b98a30739c1 | 28a0c1ca7dcf8a72fb0aeeb8fdc287339b0688cb | refs/heads/master | 2020-04-29T09:32:09.781147 | 2013-01-27T00:07:31 | 2013-01-27T00:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | class Romanos:
resultado=""
def ingresar(yo):
numero= raw_input('\tIngresar numero :')
yo.numero= int (numero)
def caso1(yo,letra,num):
return letra*num
def caso2(yo,letra,num,letra2):
if num==4:
return letra+letra2
return letra+"X"
def caso3(yo,letra,numero):
if numero==5:
return letra
numero-=5
return letra + yo.caso1("I",numero)
def proceso(yo):
numero= yo.numero
if yo.numero==4 or yo.numero==9:
yo.resultado+=yo.caso2("I",numero,"V")
elif yo.numero >= 5:
yo.resultado+=yo.caso3("V",numero)
elif yo.numero>=1:
yo.resultado+=yo.caso1("I",numero)
print yo.resultado
def __init__(yo):
yo.ingresar()
yo.proceso()
objeto = Romanos()
| [
"edwin@ubuntu.(none)"
] | edwin@ubuntu.(none) |
c724bfac75d44df03b34bc2e2ab182a750aeed1c | b324da25b3feef8a90b2b080535cffaa72968b24 | /django intek/main/views.py | 5a609e031f6a71299a3102d1a1acbb78cb65c2a0 | [] | no_license | vfelinis/Code-examples | b1c0821b9a4db84df3dc403b3aec67a8d8d8d9ff | 1f7e610b07b1969f0f5f12f1ab6bade89f9d93c5 | refs/heads/master | 2021-01-13T14:47:42.930088 | 2016-09-24T15:32:49 | 2016-09-24T15:32:49 | 69,037,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,195 | py | # -*- coding: utf-8 -*-
# Create your views here.
from django.shortcuts import render, render_to_response, redirect, get_object_or_404, get_list_or_404
from django.http.response import HttpResponse
from main.models import Home, About, Contact, Service, Article, Social
from main.forms import ContactForm
from django.core.mail import send_mail
from django.conf import settings
from django.core.mail.message import BadHeaderError
from django.http import Http404
def test(request):
try:
contents = Home.objects.get(id=2)
except Home.DoesNotExist:
raise Http404
return render(request, "main/home.html", {'contents' : contents})
def home(request):
args = {}
args['contents'] = get_object_or_404(Home, id=1)
args['sliders'] = get_list_or_404(Service)
args['navbar'] = get_list_or_404(Service)
args['contacts'] = get_object_or_404(Contact, id=2)
args['socials'] = get_list_or_404(Social)
return render(request, "main/home.html", args)
def message(request):
form = ContactForm(request.POST or None)
if form.is_valid():
theme = form.cleaned_data.get("subject")
email = form.cleaned_data.get("sender")
name = form.cleaned_data.get("full_name")
text = form.cleaned_data.get("message")
from_email = settings.EMAIL_HOST_USER
to_email = [settings.EMAIL_HOST_USER, 'stroyintek@mail.ru']
contact_message = "%s: %s от %s"%(name, text, email)
try:
send_mail(theme,
contact_message,
from_email,
to_email,
fail_silently=False)
except BadHeaderError:
return HttpResponse('Invalid header found')
context = {
"form" : form,
"title" : "Спасибо, ваша заявка отправлена!",
"contacts" : get_object_or_404(Contact, id=2),
"navbar" : get_list_or_404(Service),
"socials" : get_list_or_404(Social)
}
return render(request, "main/form.html", context)
context = {
"form" : form,
"contacts" : get_object_or_404(Contact, id=2),
"navbar" : get_list_or_404(Service),
"socials" : get_list_or_404(Social)
}
return render(request, "main/form.html", context)
def about(request):
args = {}
args['contents'] = get_object_or_404(About, id=1)
args['contacts'] = get_object_or_404(Contact, id=2)
args['navbar'] = get_list_or_404(Service)
args['socials'] = get_list_or_404(Social)
return render(request, "main/about.html", args)
def contact(request):
args = {}
args['contents'] = get_object_or_404(Contact, id=2)
args['contacts'] = get_object_or_404(Contact, id=2)
args['navbar'] = get_list_or_404(Service)
args['socials'] = get_list_or_404(Social)
return render(request, "main/contact.html", args)
def get_service(request, service_id):
args = {}
args['contents'] = get_object_or_404(Service, id=service_id)
args['contacts'] = get_object_or_404(Contact, id=2)
args['navbar'] = get_list_or_404(Service)
args['links'] = get_list_or_404(Service)
args['socials'] = get_list_or_404(Social)
return render(request, "main/service.html", args)
def services(request):
args = {}
args['contents'] = get_list_or_404(Service)
args['contacts'] = get_object_or_404(Contact, id=2)
args['navbar'] = get_list_or_404(Service)
args['socials'] = get_list_or_404(Social)
return render(request, "main/services.html", args)
def articles(request):
args = {}
args['contents'] = Article.objects.all()
args['contacts'] = get_object_or_404(Contact, id=2)
args['navbar'] = get_list_or_404(Service)
args['socials'] = get_list_or_404(Social)
return render(request, "main/articles.html", args)
def get_article(request, article_id):
args = {}
args['contents'] = get_object_or_404(Article, id=article_id)
args['contacts'] = get_object_or_404(Contact, id=2)
args['navbar'] = get_list_or_404(Service)
args['socials'] = get_list_or_404(Social)
return render(request, "main/article.html", args)
def yandex(request):
return render_to_response("main/yandex_6379745acdb3f8f4.html")
def google(request):
return render_to_response("main/google69bbe4774885e620.html")
def yandex2(request):
return render_to_response("main/yandex_69b37a055164dea3.html") | [
"vfelinis@yandex.ru"
] | vfelinis@yandex.ru |
d1d7b71b580026eddf95b6c544a982e2a444d43e | 3740c26f3f96115c9d56e2faa415fa9b77cb9120 | /app_linkup/urls.py | aaed9a1899d9ced05d475eb6245007a00e68aa0c | [] | no_license | JMVasquezR/proeyctLinkUp | 39e955df5529690085c7847722509a99e2908fd2 | d2646cca82ae370ff4688b824318d5649f5550ed | refs/heads/master | 2020-05-01T07:35:56.788262 | 2019-03-24T00:57:48 | 2019-03-24T00:57:48 | 177,355,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib.auth.views import LogoutView
from app_linkup.views import LoginView, EncuestaViewSet
urlpatterns = [
url(r'^api/', include(('app_linkup.api.urls', 'api'), namespace='api')),
url(r'^login/', LoginView.as_view(), name='login'),
url(r'^encuesta/', EncuestaViewSet.as_view(), name='encuesta'),
url(r'^logout/', LogoutView.as_view(), name='salida'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"josemartivr@gmail.com"
] | josemartivr@gmail.com |
b002d21553131485b9e090708656d8985086fad2 | 24021fe2eb98aca9bc35f5b017fa8c631581bb54 | /vulnerabilities/migrations/0027_alter_vulnerabilityreference_url.py | 90fb447c112ed0ec23e2b7f3c1430bf60bc48afa | [
"Apache-2.0",
"CC-BY-SA-4.0",
"Python-2.0"
] | permissive | nexB/vulnerablecode | 0253160c1b04cd992899bf5b74ad76ac125a68ae | eec05bb0f796d743e408a1b402df8abfc8344669 | refs/heads/main | 2023-09-03T21:48:21.368810 | 2023-08-31T16:52:41 | 2023-08-31T16:52:41 | 91,780,998 | 371 | 181 | Apache-2.0 | 2023-09-11T19:24:24 | 2017-05-19T07:56:17 | Python | UTF-8 | Python | false | false | 495 | py | # Generated by Django 4.0.7 on 2022-09-09 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vulnerabilities', '0026_alter_vulnerabilityreference_unique_together'),
]
operations = [
migrations.AlterField(
model_name='vulnerabilityreference',
name='url',
field=models.URLField(help_text='URL to the vulnerability reference', max_length=1024, unique=True),
),
]
| [
"pombredanne@nexb.com"
] | pombredanne@nexb.com |
dcc47b23a0a71640a7d9beee45059f336eaf00b9 | 2926780ac13b74882b906d920968c00c48d7b181 | /simple_social_clone/simplesocial/accounts/models.py | e2ed8c06f4c4463e24d4a1591ecc00c341b30a42 | [] | no_license | SodimuDemilade/my_space | 58aaf42701d277cd75de8fef64a55ce5b8f032b3 | 11918964bb382667746a63158869b2dd75bacaac | refs/heads/master | 2023-06-23T04:35:03.784930 | 2021-07-13T21:46:44 | 2021-07-13T21:46:44 | 385,739,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.db import models
from django.contrib import auth
# Create your models here.
class User(auth.models.User,auth.models.PermissionsMixin):
def __str__(self):
return "@{}".format(self.username)
# username is an attribute that comes built in with user
| [
"demisodimu@gmail.com"
] | demisodimu@gmail.com |
af115a58f063c98093648abb62f32c0fd485085a | 038ac28874902d5cab51837b750e27a092f68b7e | /quant_sim/sources/yeod_source.py | 1c9773e4dfe26441f5f3af921f95b7aeb9ff8f6c | [] | no_license | TotallyBullshit/quant_sim | ef13e229254e8cc2882d8f2363db0852cbe7ab10 | 2d246819d2a5a3ca0f514951ecde8194b3196a33 | refs/heads/master | 2021-01-13T02:03:53.398806 | 2013-10-01T23:59:01 | 2013-10-01T23:59:01 | 21,032,661 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,973 | py |
import re
import urllib
import datetime as dt
from copy import copy
from collections import OrderedDict
from stock_eod import EOD_Data
class YEOD_Source(object):
def __init__(self, source_dir):
self.source_dir = source_dir
def get(self,sids):
def get_eod(sid):
now = dt.datetime.now()
starty, startm, startd = '1950', '01', '01'
endy, endm, endd = now.year, now.month, now.day
url_str = 'http://ichart.finance.yahoo.com/table.csv?s=%s&a=%s&b=%s&c=%s&d=%02d&e=%02d&f=%d&g=d&ignore=.csv'%(sid,startm,startd,starty,endm,endd,endy)
eod_raw = urllib.urlopen(url_str)
lines = eod_raw.readlines()
eod_raw.close()
if not lines or len(lines)<1: return None
if lines[0][0] == "<":
print "Error loading Yahoo / Cannot find %s"%(sid)
return None
f = open('%s%s_eod.csv'%(self.source_dir,sid),"w")
lines.reverse()
for line in lines[:-1]:
f.write(line)
f.close()
def get_div(sid):
now = dt.datetime.now()
starty, startm, startd = '1950', '01', '01'
endy, endm, endd = now.year, now.month, now.day
url_str = 'http://ichart.finance.yahoo.com/table.csv?s=%s&a=%s&b=%s&c=%s&d=%02d&e=%02d&f=%d&g=v&ignore=.csv'%(sid,startm,startd,starty,endm,endd,endy)
div_raw = urllib.urlopen(url_str)
lines = div_raw.readlines()
div_raw.close()
if (not lines) or (len(lines) < 1): return 0
if lines[0][0] == "<": return ""
f = open('%s%s_div.csv'%(self.source_dir,sid),"w")
lines.reverse()
for line in lines[:-1]:
f.write(line)
f.close()
def get_split(sid):
url_str = 'http://getsplithistory.com/'+sid
f = urllib.urlopen(url_str)
splits_raw = f.read()
f.close()
splitpat = re.compile('<tr class="([0-9][0-9]?[0-9]?\.?[0-9]?[0-9]?[0-9]?[0-9]?[0-9]?)">')
datepat = re.compile('<td>([A-z][a-z][a-z] [0-9][0-9], [0-9][0-9][0-9][0-9])</td>')
splits = splitpat.findall(splits_raw)
dates = datepat.findall(splits_raw)
if len(dates) > 0:
dates = [dt.datetime.strptime(d,'%b %d, %Y') for d in dates]
f = open('%s%s_split.csv'%(self.source_dir,sid),"w")
for i,v in enumerate(splits[0:-1]):
f.write('%s,%s\n'%(dates[i].strftime('%Y-%m-%d'),v))
f.close()
get_eod(sid)
get_div(sid)
#get_split(sid)
def load(self,sid):
def load_div(sid):
f = open('%s%s_div.csv'%(self.source_dir,sid),"r")
div_dict = {}
for line in f:
d,amt = line.rstrip().split(',')
div_dict[dt.datetime.strptime(d,'%Y-%m-%d')] = float(amt)
f.close()
return div_dict
def load_split(sid):
f = open('%s%s_split.csv'%(self.source_dir,sid),"r")
split_dict = {}
for line in f:
d,amt = line.rstrip().split(',')
split_dict[dt.datetime.strptime(d,'%Y-%m-%d')] = float(amt)
f.close()
return split_dict
def load_eod(eod_dict,sid,div_dict,split_dict):
f = open('%s%s_eod.csv'%(self.source_dir,sid),"r")
prev_eod = None
for line in f:
d,o,h,l,c,v,ac = line.rstrip().split(',')
now = dt.datetime.strptime(d,'%Y-%m-%d')
eod_obj = EOD_Data(sid,d,o,h,l,c,v,ac,div_dict.get(now),split_dict.get(now),prev_eod)
if prev_eod != None: prev_eod.next = eod_obj
prev_eod = eod_obj
if now not in eod_dict:
eod_dict[now] = {}
eod_dict[now] = eod_obj
f.close()
return eod_dict
eod_dict = OrderedDict()
div_dict = {}
split_dict = {}
try: div_dict = load_div(sid)
except: pass
try: split_dict = load_split(sid)
except: pass
return load_eod(eod_dict,sid,div_dict,split_dict)
if __name__ == '__main__':
import sys
if len(sys.argv) < 4 or (sys.argv[2] not in ['get','load']):
print 'Usage: python yeod_source.py dir [get|load] [sid]+'
print 'Example: python yeod_source.py J:/LanahanMain/code_projects/data get SPY DIA'
sys.exit()
sids = sys.argv[3:]
action = sys.argv[2]
data_dir = sys.argv[1]
yeod_source = YEOD_Source(data_dir+'/eod_data/')
if action == 'get':
for sid in sids:
print 'Updating %s' % (sid)
yeod_source.get(sid)
elif action == 'load':
print 'Loading:',sids[0]
data = yeod_source.load(sids[0]).keys()
print 'Starts:',data[0]
print 'Ends:',data[-1]
| [
"quantistician@quantistician.com"
] | quantistician@quantistician.com |
f56a71654436830f620bcdd6213f4cf4422e7649 | cb1b108d97e8c039bdcfe039da00ab8ff5290448 | /main/forms.py | 4bf73137a247cbc4a9cddb176a1c3d77ddeda3ca | [] | no_license | Maia-Nguyen/CPSC-362-Group-Project | 6de1cb8300534d0512a8af593cbadd233a99d26b | 538760b39aa23b62942bf0f5dd0fc56c6a20bda4 | refs/heads/master | 2023-04-08T13:41:56.004472 | 2021-04-13T04:57:47 | 2021-04-13T04:57:47 | 344,632,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | from django import forms
from .models import MyModel
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.contrib import messages
# Profile Form
class MyForm(forms.ModelForm):
# Defines form attributes
class Meta:
model = MyModel
fields = ["first_name", "middle_name", "last_name", "age", ]
# Customizes form field labels to display
labels = {'first_name': "Name", 'middle_name': "Middle", 'last_name': "Last", 'age': "Age", }
# Register Form
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ["username", "first_name", "last_name", "email", "password1", "password2"]
def clean_user(self):
username = self.cleaned_data.get("username")
for instance in User.objects.all():
if instance.username == username:
raise forms.ValidationError()
return username
def clean_email(self):
email = self.cleaned_data.get("email")
for instance in User.objects.all():
if instance.email == email:
raise forms.ValidationError('Email is already in use')
return email
# User List Form
# UI Home
| [
"noreply@github.com"
] | Maia-Nguyen.noreply@github.com |
e3bc863f35e9d360be7dcaeed446c1f6fb3fe5bb | 777150135a2970ca3d7459515436da9e5920ba88 | /app/database/queries.py | c7de448fd7ac0c20f3865ae6647ae17749b75a72 | [] | no_license | Naatoo/emissions-visualization | 5c30810fb5131639f297f7cb95b7f8b309c76ca0 | f045576ee30a2274356e7a0bb75c6ad038e034f9 | refs/heads/master | 2023-07-23T23:18:56.723798 | 2019-09-04T16:42:37 | 2019-09-04T16:42:37 | 185,286,377 | 1 | 0 | null | 2023-07-06T21:41:13 | 2019-05-06T23:33:00 | Python | UTF-8 | Python | false | false | 4,556 | py | import secrets
from flask import current_app as app
from sqlalchemy import and_
from app.database.database import db
from app.models.dataset import DatasetInfo, DatasetValues
from app.models.countries import Countries
from app.models.auth import User
from app.tools.exceptions import LonLatResolutionException, ZoomingRelativeDataException
def insert_new_file_data(parser, **kwargs):
dataset_hash = secrets.token_hex(nbytes=16)
db.session.add(DatasetInfo(
dataset_hash=dataset_hash,
compound=kwargs["compound"],
physical_quantity=kwargs["physical_quantity"],
unit=kwargs["unit"],
year=kwargs["year"],
name=kwargs["name"],
lon_resolution=kwargs["lon_resolution"],
lat_resolution=kwargs["lat_resolution"],
relative_data=kwargs["relative_data"],
))
db.session.commit()
for (lon, lat, value) in parser.rows_generator():
db.session.add(DatasetValues(dataset_hash=dataset_hash,
lon=lon,
lat=lat,
value=value
))
db.session.flush()
db.session.commit()
def delete_data(dataset_hash):
db.session.delete(DatasetInfo.query.filter_by(dataset_hash=dataset_hash).one())
db.session.commit()
for row in DatasetValues.query.filter_by(dataset_hash=dataset_hash).all():
db.session.delete(row)
db.session.flush()
db.session.commit()
def get_dataset(dataset_hash, rows_limit: int=None):
dataset = DatasetValues.query.filter_by(dataset_hash=dataset_hash)
if rows_limit:
dataset = dataset.limit(rows_limit)
return [(row.lon, row.lat, row.value) for row in dataset.all()]
def get_dataset_by_coordinates(dataset_hash, boundary_coordinates: dict):
dataset = DatasetValues.query.filter_by(and_(dataset_hash=dataset_hash, **boundary_coordinates))
return [(row.lon, row.lat, row.value) for row in dataset.all()]
def get_data_metadata(dataset_hash):
data = DatasetInfo.query.filter_by(dataset_hash=dataset_hash).one()
return data
def get_country_bounding_box(code: str) -> tuple:
data = Countries.query.filter_by(code=code).one()
return data.box_lon_min, data.box_lon_max, data.box_lat_min, data.box_lat_max
def get_country_centroid(code: str) -> tuple:
data = Countries.query.filter_by(code=code).one()
return data.centroid_lat, data.centroid_lon
def get_country_name(code: str) -> str:
data = Countries.query.filter_by(code=code).one()
return data.name
def get_selected_data_str():
dataset_hash = app.config.get('CURRENT_DATA_HASH')
if dataset_hash:
metadata = get_data_metadata(dataset_hash)
boundary_values = get_boundary_values_for_dataset(dataset_hash)
selected_data_str = f"{metadata.name}, {metadata.physical_quantity}, " \
f" {metadata.compound}, {metadata.unit}, {metadata.year}, " \
f"Longitude=({boundary_values['lon_min']}, {boundary_values['lon_max']}), " \
f"Latitude=({boundary_values['lat_min']},{boundary_values['lat_max']})"
else:
selected_data_str = "No data selected"
return selected_data_str
def assert_lon_lat_resolution_identical(dataset_hash):
data = get_data_metadata(dataset_hash)
if float(data.lon_resolution) != float(data.lat_resolution):
raise LonLatResolutionException
def assert_zooming_relative_data(dataset_hash, zoom_value: int):
data = get_data_metadata(dataset_hash)
if zoom_value != 0 and data.relative_data is False:
raise ZoomingRelativeDataException
def get_boundary_values_for_dataset(dataset_hash: str) -> dict:
lon_min = DatasetValues.query.filter_by(dataset_hash=dataset_hash).order_by(DatasetValues.lon).first().lon
lon_max = DatasetValues.query.filter_by(dataset_hash=dataset_hash).order_by(DatasetValues.lon.desc()).first().lon
lat_min = DatasetValues.query.filter_by(dataset_hash=dataset_hash).order_by(DatasetValues.lat).first().lat
lat_max = DatasetValues.query.filter_by(dataset_hash=dataset_hash).order_by(DatasetValues.lat.desc()).first().lat
return {"lon_min": lon_min, "lon_max": lon_max, "lat_min": lat_min, "lat_max": lat_max}
def get_user(username: str):
user = User.query.filter_by(username=username).first()
return user
#
# def insert_user(username, password):
# db.session.add(User(username=username, password=password))
# db.session.commit()
| [
"thenatooorat@gmail.com"
] | thenatooorat@gmail.com |
0e892b46364b177bd07fcbe10de8bc758f9ecbf6 | 9d4bd8f2b64f0ff5e8c0fbb0cea434f2ce4a2e46 | /conference/models.py | 265970179b9fb0d91f35f9de73f5b7f7c5524804 | [] | no_license | jskonst/ivbit | 7f05acaa4d93b614a001d37fd973b36a9f74e379 | 9bb8c1ebaddba838ac855a039ff480aa41ca3405 | refs/heads/master | 2020-06-05T15:16:09.842720 | 2014-11-15T07:23:07 | 2014-11-15T07:23:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | #-*- coding=utf8 -*-
from django.db import models
from accounts.models import UserProfile
# Create your models here.
class Section(models.Model):
section = models.CharField(max_length=100, verbose_name=u'Название секции')
def __unicode__(self):
return self.section
class Doklad(models.Model):
section = models.ForeignKey(Section)
title = models.CharField(max_length=1024, verbose_name=u'Назвние доклада')
authors = models.CharField(max_length=1024, blank=True, verbose_name=u'Соавторы')
author = models.ManyToManyField(UserProfile, blank=True)
text = models.TextField(verbose_name=u'Текст доклада (можно использовать HTML разметку, заголовки не выше h3)')
publish = models.BooleanField(blank=True, verbose_name=u'Опубликовать')
def __unicode__(self):
return self.title
| [
"jskonst@yandex.ru"
] | jskonst@yandex.ru |
42e9fe3ab57bd3c1e296f665413fc82fba5070e3 | 21e6a09131ac76d734102c829260c3b8e3a0094b | /solutions/21_textfsm/task_21_4.py | 9986cf1ad1531aef03cb29f28f968dc09e18cec7 | [] | no_license | Egor-Ozhmegoff/Python-for-network-engineers | 5fbe8f3a754263ab65c28093fed667684ae76ded | 6b70f4f9df658698ea0d770a064ee0e12b4e4de2 | refs/heads/master | 2023-08-11T20:52:12.999495 | 2021-09-09T14:42:14 | 2021-09-09T14:42:14 | 306,354,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | # -*- coding: utf-8 -*-
"""
Задание 21.4
Создать функцию send_and_parse_show_command.
Параметры функции:
* device_dict - словарь с параметрами подключения к одному устройству
* command - команда, которую надо выполнить
* templates_path - путь к каталогу с шаблонами TextFSM
* index - имя индекс файла, значение по умолчанию "index"
Функция должна подключаться к одному устройству, отправлять команду show с помощью netmiko,
а затем парсить вывод команды с помощью TextFSM.
Функция должна возвращать список словарей с результатами обработки вывода команды (как в задании 21.1a):
* ключи - имена переменных в шаблоне TextFSM
* значения - части вывода, которые соответствуют переменным
Проверить работу функции на примере вывода команды sh ip int br и устройствах из devices.yaml.
"""
import os
from pprint import pprint
from netmiko import ConnectHandler
import yaml
def send_and_parse_show_command(device_dict, command, templates_path):
if "NET_TEXTFSM" not in os.environ:
os.environ["NET_TEXTFSM"] = templates_path
with ConnectHandler(**device_dict) as ssh:
ssh.enable()
output = ssh.send_command(command, use_textfsm=True)
return output
if __name__ == "__main__":
full_pth = os.path.join(os.getcwd(), "templates")
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
for dev in devices:
result = send_and_parse_show_command(
dev, "sh ip int br", templates_path=full_pth
)
pprint(result, width=120)
# Второй вариант без использования use_textfsm в netmiko
from task_21_3 import parse_command_dynamic
def send_and_parse_show_command(device_dict, command, templates_path, index="index"):
attributes = {"Command": command, "Vendor": device_dict["device_type"]}
with ConnectHandler(**device_dict) as ssh:
ssh.enable()
output = ssh.send_command(command)
parsed_data = parse_command_dynamic(
output, attributes, templ_path=templates_path, index_file=index
)
return parsed_data
if __name__ == "__main__":
full_pth = os.path.join(os.getcwd(), "templates")
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
for dev in devices:
result = send_and_parse_show_command(
dev, "sh ip int br", templates_path=full_pth
)
pprint(result, width=120)
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
40cdb096e1d4bdd8c6d0a26a83bfe1ce360d32d6 | ba845b92ce116f5fea43c99dd67815011d3b3295 | /ExtractDocInfo/sentest.py | 36505c4be3c0e0c02a8f2aca7f037cae918984e5 | [
"MIT"
] | permissive | abhishek9sharma/apibot | 7cac6fccc6c322c195c728672b2bd8a789bf25ac | 2469d3b08f476678ef4d54f7ff58a475a48d8620 | refs/heads/master | 2020-03-06T22:03:53.321473 | 2019-04-08T11:17:46 | 2019-04-08T11:17:46 | 127,093,814 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | from os import listdir
from os.path import isfile, join
mypath='/home/.../indexbuildingtest/.../FACTS/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
cnt = 0
sinceList=[]
for fi in onlyfiles:
f=open(mypath+fi,'r')
try:
data=f.readlines()
except:
print("Exception occured for" + fi)
for d in data:
if(' see ' in d):
if(d not in sinceList):
x=d.replace(',','').replace('\n','')+','+str(fi)
print(x)
sinceList.append(x)
cnt+=1
else:
cnt+=1
#print(str(cnt) +' files processed ')
f.close()
fout=open('sentence2.csv','a')
for s in sinceList:
fout.write(s+'\n')
fout.close()
| [
"abhi0270@gmail.com"
] | abhi0270@gmail.com |
4425e109b0efe53b2e51a04bcddab969c531489c | d27bf22683710ff090642c05c1df2d13b18c2509 | /allauth/openid/admin.py | 0967c5c39ae1d4e1a60416bffb65e3f68ea3ecd1 | [
"MIT"
] | permissive | snswa/django-allauth | b8db554519111e5d022fb137d259e272db9998f4 | 0b58191f5d954d7f5a7c4e5bc8c33cf6fdf0c416 | refs/heads/master | 2021-01-18T10:29:31.434368 | 2010-10-21T18:24:56 | 2010-10-21T18:24:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django.contrib import admin
from models import OpenIDAccount
class OpenIDAccountAdmin(admin.ModelAdmin):
raw_id_fields = ('user',)
admin.site.register(OpenIDAccount, OpenIDAccountAdmin)
| [
"raymond.penners@intenct.nl"
] | raymond.penners@intenct.nl |
ca6d4ae02b1eb34437dddfe4b0feb3d50e46d002 | 9952b652b9c72eb951f639f29b14e1c2b27aa489 | /calculator.py | c53fe25ca753efefd7c21c37963785e1906fa531 | [] | no_license | JeeheeHan/calculator-1 | 66f7ffe1d75c06930b08a424bed78f81560659fc | 800a1bbf9ec967ad19eeec03a4de2f2e7c2239c3 | refs/heads/master | 2023-03-07T22:28:54.948273 | 2021-02-25T00:09:13 | 2021-02-25T00:09:13 | 342,030,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | """CLI application for a prefix-notation calculator."""
from arithmetic import *
while True:
user_input = input("Enter your equation > ")
tokens = user_input.split(" ")
if "q" in tokens:
print("You will exit.")
break
elif len(tokens) < 2:
print("Not enough inputs.")
continue
operator = tokens[0]
num1 = tokens[1]
if len(tokens) < 3:
num2 = "0"
else:
num2 = tokens[2]
if len(tokens) > 3:
num3 = tokens[3]
# A place to store the return value of the math function we call,
# to give us one clear place where that result is printed.
result = None
if not num1.isdigit() or not num2.isdigit():
print("Those aren't numbers!")
continue
# We have to cast each value we pass to an arithmetic function from a
# a string into a numeric type. If we use float across the board, all
# results will have decimal points, so let's do that for consistency.
elif operator == "+":
result = add(float(num1), float(num2))
elif operator == "-":
result = subtract(float(num1), float(num2))
elif operator == "*":
result = multiply(float(num1), float(num2))
elif operator == "/":
result = divide(float(num1), float(num2))
elif operator == "square":
result = square(float(num1))
elif operator == "cube":
result = cube(float(num1))
elif operator == "pow":
result = power(float(num1), float(num2))
elif operator == "mod":
result = mod(float(num1), float(num2))
elif operator == "x+":
result = add_mult(float(num1), float(num2), float(num3))
elif operator == "cubes+":
result = add_cubes(float(num1), float(num2))
else:
result = "Please enter an operator followed by two integers."
print(result)
#adding a comment so we can git commit- Emily & Jenny | [
"han.jennyjh@outlook.com"
] | han.jennyjh@outlook.com |
9e0feb731caaba41e7d4c0be8458f4abd8456f0e | f061602595a78bdbdbf32e2dfdcfe623db5b8efd | /graph/migrations/0001_initial.py | b23196dead9cd932bf09ce3bbb6e066e1eb0df9d | [] | no_license | NorbertMichalski/utilities | b9e0643d4b8e0097e0c774d63adbeaa66d3da06b | da27a23add9c42d62ae21a5e74eef920bbd3d839 | refs/heads/master | 2020-05-14T19:04:23.262384 | 2014-01-27T13:45:28 | 2014-01-27T13:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,771 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OverviewGraph'
db.create_table(u'graph_overviewgraph', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
))
db.send_create_signal(u'graph', ['OverviewGraph'])
# Adding model 'OverviewStat'
db.create_table(u'graph_overviewstat', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('graph', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['graph.OverviewGraph'])),
('price', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=6, decimal_places=2)),
('rank', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=5, decimal_places=2)),
('visits', self.gf('django.db.models.fields.IntegerField')(default=0)),
('sales', self.gf('django.db.models.fields.IntegerField')(default=0)),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.date.today)),
))
db.send_create_signal(u'graph', ['OverviewStat'])
def backwards(self, orm):
# Deleting model 'OverviewGraph'
db.delete_table(u'graph_overviewgraph')
# Deleting model 'OverviewStat'
db.delete_table(u'graph_overviewstat')
models = {
u'graph.overviewgraph': {
'Meta': {'ordering': "['id']", 'object_name': 'OverviewGraph'},
'brand': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'graph.overviewstat': {
'Meta': {'object_name': 'OverviewStat'},
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'graph': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['graph.OverviewGraph']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'rank': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '5', 'decimal_places': '2'}),
'sales': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'visits': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['graph'] | [
"Norbertmichalski16@gmail.com"
] | Norbertmichalski16@gmail.com |
f1979087cd1398a523b893f6bdb223fc4f3c142e | 65585dce782bb50d92caa69be2431e094ac36a1f | /examples/recursive_dirtree_generator.py | 50307af4a1c3021c3703469a8d1c6028f5d8ab66 | [
"Apache-2.0"
] | permissive | vishalbelsare/treelib | 6e52f594cecb69210332b7092abcf1456be14666 | 12d7efd50829a5a18edaab01911b1e546bff2ede | refs/heads/master | 2023-08-31T07:38:06.461212 | 2022-04-13T15:07:52 | 2022-04-13T15:07:52 | 153,905,842 | 0 | 0 | NOASSERTION | 2023-03-27T15:17:00 | 2018-10-20T12:59:18 | Python | UTF-8 | Python | false | false | 1,691 | py | #!/usr/bin/env python
"""
Example of treelib usage to generate recursive tree of directories.
It could be useful to implement Directory Tree data structure
2016 samuelsh
"""
import treelib
import random
import hashlib
from string import digits, letters
MAX_FILES_PER_DIR = 10
def get_random_string(length):
return ''.join(random.choice(digits + letters) for _ in range(length))
def build_recursive_tree(tree, base, depth, width):
"""
Args:
tree: Tree
base: Node
depth: int
width: int
Returns:
"""
if depth >= 0:
depth -= 1
for i in xrange(width):
directory = Directory()
tree.create_node("{0}".format(directory.name), "{0}".format(hashlib.md5(directory.name)),
parent=base.identifier, data=directory) # node identifier is md5 hash of it's name
dirs_nodes = tree.children(base.identifier)
for dir in dirs_nodes:
newbase = tree.get_node(dir.identifier)
build_recursive_tree(tree, newbase, depth, width)
else:
return
class Directory(object):
def __init__(self):
self._name = get_random_string(64)
self._files = [File() for _ in xrange(MAX_FILES_PER_DIR)] # Each directory contains 1000 files
@property
def name(self):
return self._name
@property
def files(self):
return self._files
class File(object):
def __init__(self):
self._name = get_random_string(64)
@property
def name(self):
return self._name
tree = treelib.Tree()
base = tree.create_node('Root', 'root')
build_recursive_tree(tree, base, 2, 10)
tree.show()
| [
"noreply@github.com"
] | vishalbelsare.noreply@github.com |
28b36d3a5573396d06f4fc040e80c83c507ad99a | c81b830732b77519657fde7766200d61bb53e5fd | /python/images.duckduckgo.py | c0930cb72c21eb03492017e8ce45865a4de564e9 | [] | no_license | ericaxu/skinwise-old | 851c2c5b5ae5e94d5be5da029fde2c71273b8c30 | 9026c3a36e68f8580c540450b74b3dc3abca6e71 | refs/heads/master | 2021-05-27T15:39:12.073259 | 2014-11-24T07:03:15 | 2014-11-24T19:06:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,936 | py | import sys
from util import (web, db, parser, util)
db = db.DB("cache/duckduckgo.cache.db")
crawler = web.Crawler(db)
#File in
file_data_json = "data/data.json.txt"
file_images_duckduckgo_corrections_json = "data/images.duckduckgo.corrections.json.txt"
#File out
file_images_duckduckgo_json = "data/images.duckduckgo.json.txt"
#Crawled URLs
url_image_search = "https://duckduckgo.com/i.js?o=json&q=%s"
bad_urls = [
"ebaystatic.com",
"wp-content",
"blogspot"
]
data = util.json_read(file_data_json, "{}")
if 'products' not in data:
sys.exit(0)
products = data['products']
image_corrections = util.json_read(file_images_duckduckgo_corrections_json, "{}")
result = dict()
result['images'] = dict()
for key, product in products.items():
query = "%s %s" % (product['brand'], product['name'])
query = web.urlencode(parser.regex_remove("[^0-9a-zA-Z ]", query))
result_json = crawler.crawl(key="search/%s" % query, url=url_image_search % query)
if not result_json:
print(query)
continue
result_object = util.json_decode(result_json)
results = result_object['results']
final_image = None
if key in image_corrections:
image = dict()
image['source'] = "Correction"
image['width'] = 0
image['height'] = 0
image['url'] = image_corrections[key]
final_image = image
else:
for img in results:
image = dict()
image['source'] = img['s']
image['width'] = int(img['iw'])
image['height'] = int(img['ih'])
image['url'] = img['j']
# size chech
if image['width'] < 300 and image['height'] < 300:
continue
# wordpress
good_url = True
for bad_url in bad_urls:
if bad_url in image['url']:
good_url = False
if not good_url:
continue
final_image = image
break
if final_image is None:
print(query + " not found")
else:
result['images'][key] = final_image
util.json_write(result, file_images_duckduckgo_json)
parser.print_count(result['images'], "Images")
| [
"lishid@gmail.com"
] | lishid@gmail.com |
3fd67b7af28ef9703b6434a9587193792e90f96a | 99868e01a8fcd9830597e46399efe29368097a19 | /5.py | 385ba8848477a3643abe514be578e64e6a22a281 | [] | no_license | bhavdipaakhaja/SDemo0 | 2e0d1acb81d9df4bf9a731f56e6a62e79c948186 | b11d84a1755a5cb16325b778d4fdced9ee52ea5d | refs/heads/master | 2020-05-20T10:42:48.054345 | 2015-08-07T10:01:16 | 2015-08-07T10:01:16 | 40,353,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | color = ['red','blue',12]
color.append(2)
color1 = ['blue','orange']
color2 = [1,2,3,4]
color2.reverse()
color2.remove(3)
color2.pop()
| [
"adminsyss@SPBWSCSEE31.SFU.Local"
] | adminsyss@SPBWSCSEE31.SFU.Local |
a2ac395d9c7b07a78e647acedd851fd2705ba2aa | 538f002a5ec265a56bf81067edc4d23343448336 | /Majid786.py | f9c53ee3007de4208a4adf8c5d4480f9320dceb3 | [] | no_license | majidkan/majid786 | 4d4a38a32bc2eca4a709bfe35484cc3997e67c2d | 475d66837a10f37af829f0da143c00e573229c7e | refs/heads/main | 2023-02-07T00:29:02.146980 | 2020-12-25T05:18:38 | 2020-12-25T05:18:38 | 324,292,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,711 | py | # Compiled By : Majid
# GitHub : https://github.com/Majidkan
# YouTube Channel : Tech Qaiser
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJztWVtvW8cR3kNJlERJ1sW2ZNmufZzEipxEvEmULDtKqosvQmxJpiQrpSAQhzxL8lDnQp1dWpJrFyjcvjepk5cG7UNeAgRNUvQlRYH+kgIF3KeiL/0H6czsOSRlKUnThz6Vl+XsZXZnZ+ebmT0ssuDVAd8fw1f8XWPs8K/MZMzUmM1YjoW0xnJaSEdYLhLSbSzXFtLtLNce0h0s1xHSUZaLhnQny3WGdBfLdYV0N8t1h3SM5WIh3cNyPSHdy3K9Id3Hcn1ER5h9ijn9LNfPNKy3MXuAOYMsN6jq7cjnDLHcENO23OusnZ9muzHmD2vw4oxVzzCzgz3TWFiJtlY6qeJq7H1+lnFNdWmsOszMLlXpZdUh9gxUNcL4CKueY3xUdUDlPMPuC6x6EUeYsMEY9GmaeY6VNRye/xEz+9gvgPsSM08RcZmZ/UTozBwg4gozYTuvMHOIqq8y8zTLvcbKQF9l5hlqHGPmWSJeZ+YwEeOMX2PmCNuNMP9vGh9DCTSXlLc+PgqHbX0Dr5VxOHEmY1BsVHxumGueZ6u2figWPdflRWl57i3f93zV0QnFgu/tC+6Ls0DXrFpat1whDdvWfb5X50IKMfxyj8OLFcO1HnOBM9cOZcVz0/p70BaHSju01WXpuuwCwjEO8tJyuIXrCRSuDotNGGXuSnEJGpcM+5G1m0jFp+NJffye5dYPbuqbN/V51/Q9y9Sn4lPx9E195f1MRl+oW7aZeG91I5NJTl/Tt28vzK8kbi9Mzd8E6mEilYQ54J2ejk9fh6aFh4mpzGxyKjWdhNrS/cRPTe4KSx7OTcaTb+1bpqzMpZLXk29VuFWuyLnUbDr5FEbeW0xYMr+8AWT2yBSL2cSaJyS/7xUsm0PD/dsJQ9QFrrUUUmsriaLnxEtGkRc8bze+a0jDNVCAh4n59c31fC6ZnF+C+vrDRCaO066uJVI4+3zi4Pr0DcN3uFGwJh7NGDd3iiGuUX2LqEI8DDj4O3clmDWYb4SsAeCxPh6BrhWBWt++sqPfOrDkeBueMraD2EiLQ0Hnw7ET520WAoeZ5SzaxHmst9O6HVoR18ZKVyjDupLhqYaYfRphBzcYSLO0k2ZP29iTCCL3icYkSQeolVQFJAE6zz2LsNGRJ21sZAYY906zLZAHWJ6hLUdxA2SURkWaj6tFiSJIXP+KhSRZ0XhH2HggkcM3XNNziA1Jy5W0VRssDEcWuTR2x9tDngKV+1SaVFrHdEAqMorG7uXQaJkWhXefllTawHk7Q238rKGNg3nc6NLOLCoFqCoLtoZqacfjAn1ByzAoSJJqUBHQPwxaedrO9vrYFowDNc1ALeCIklOjfthXFRo7sAv8GXKqk8eTWcliKXDrV64KgeJd2L4qbqYc63J4nBe2kw7t8UpS4D5ipHJSk+Wa/EApkddsMF5lL9LP0pAoVUyvLmn0vm9JTgeRHcBiEIvToY6r2Z4TTQu9xetYPxWotE/r18a1mDak1IrD2kK1YnHwNntCyh1Z2kmihhtGrwxKC+ptBAJJujFJ49peD9si3bQRKmi35a8v/fKfD77+7bu0bdpY9hwWo6EWSnZdVOj40WlRk7A5rxG2aG+PqeTHbQbHVgznsZHCxi7a4aA2AHsUcNwsdmE7dXM247z46Pcvnv88+Hz0RaP5+bOg56MvgG5txp4/EPFpyPlH+jToFuLojE1Wmh2nhjk+Dzk+py4isPfLI8J8RdN92cLaKsGzJmuLtIop2MSnyI0tf6aP4mshWtYDIYPPV9isf99LMU46Ez/sFVN8086iZ3JTXzjU9Rs6NU05GxDT9AeGBdEpHHXHkpV6oWVMRcqauJFIlKkjDm4+gWxHuW4Hjr/Bpbr1+ULBEOGgnwCQ6gX+nctvcJuXfcPRX5rpyEQrnmyZZYXDtu5Yvl3yLe6asdiLjz/8js8D+M4rcln9rDf6JhtUFr4TzZHwWTjyc6QLJ2ioeckSRdsAIPkNCZd12JDrScguRM2DaFywuV7yfN1wD3XHEkKH/CD2X54vRcsjTuS28s0mxaRyG3sKPiWJPmVpZ5ziVzt6kpE7R4KpcrNBKrk3HPgRCk7kReNggYqIN4i4LoaA6Nt+8cnzHf2eVy5bbllfdvVmyGrxNxewuIjFj7A3ErofaUlrlxyMd9zBUCy3dlebMakfnEsX62MUG2nzOCYabj4JE6mNmYG3jGDe0Ba4SNDFE8qyMa+8d8JIVA5jI6CIkTt33VEMRcCunKzKpiEsBApTOTRoFNQJ2TFO+afvmxJSaFS+2YOVYN5elaWPYjJ98mKHOuatkGJDcv2M0o9gmQEateVeYu2QespuVorQ9WAIrweQF1PWfJ69Dyn4yTPTMQ9RDkKJg80NX+ABbf/rd89/tdOA6urqPX1z/VZ2Zf7+rRu6xFi3R8g0EJmK48Unv97RFUfqZQ46PX286Pk+ZOTXKPPGjG3ZfWTYkPJuwlSu4fC4SpovNSU4IsLa/Pr61mp2CURAG8T0+9vWbg4VKK3eXNxC4zomwRpsY9/zzbjoxkzKA2OOywNJlukLDPZh0h/HZdOQ9osz3zLHOK6Y7WgkEZCBcofIggFXEl/iEr6xn7fcWl02UUFDJK1N+/NqkNBhWH2PH9IVhpKV5VVF4yIQsCueSRwq8cXR9UCZdKQ1lEkhzdvl8niG8ib8bGH9OgGsD9LfXnjj70CkA347I2G9D95Bm9YGUBzUTms9VG8gseGG+k9EoklFm4IFFB0BOqMt6OwkKN1HfJHRkhH/hboUyoroOaj1M2qNUes/vgX4PQFKQ+B2IuIAagpIkJpWu8LUCivdChenkDvGqj2EuUbaFSAuqAwq5L5CUgyRFN+wUOJevOk2NkBAQ3tZyb6G6nkVTScVAGzSQfdpufoWRFl9eSnRNMYMjkuHtj3pqHGbAl3tBhyoGx/XVzx9Ga554EkP9TXfg/Di6NfIZLcnm5x4OYqLKGEBXwI9ckz1pp3FiucJrq/W8Lqsv/OOrmfRSMVAYOL6lu+RdweDJUykqEyLiwFUP2ji7471iCvhIEwfhRNlzWIMVz6KWQwfEMgheqzXi0UuRKlu24eBO5jHvNgsTyAe9DAj2d/fjx+qnILSEryfu9xObC7e5XJvfq++uetVDx4+WuOFB7VCsnwyhmkXSbpi4jYDaVo2O94bYjmLsMlew+INLDrDEEa7y+KJZt9iral10QadtgQ9QigWFU/dSeG3FhA14zg0E4wQ0QrN8I2Q7IBrRC99FWg7KTrGGkCFPgAnQbOzNUh+/DI0Mb5EgowBYdHobg/iS0FrB7lB0iC+fEDx5QjcCMnRMIVAKtqgVMhReO5EFDfiJmAP4IsYVWGtMwiHcHuUCokIwgjiFcA5ArCkizRgEm+CeD/sYdVevBxitQPx+aSD7UaZv6wFWxqkuaPsSZRV+7AXxowS2qHhlALnEErWz6oDiNkWgJ9prbRevwLN7H2omWdpzmE2iqvWNXOkOdc5xTFIi+DY0WPrqD58REce4nzTQxzFDd0hT45C114K19OBk4CE29fD1FyfLxa9uivFxLHRSx4mpxCBFcca94XnGnaDI3WMA8ca+grfPzY9wF65MnEVuP6jDJaSyO03d/SVulPgfuKWY1g2BHgEZuCE+oMRoVtEt/IBoiZwBYUJo2Y1Hz+hN1CBMWHUZSVOWnzXILeSxyjozqUnZ2Yys7PJ2cxsajqTuZrOpDMzi8lSaippGAVulgrTGaOYnjFmJme5mTLS6enJQmoMsnbHkHNVUNCYMHfzj0BX4C/nUmMcpZ4TIyDVmO0VDZvPcTe/uT5WC2SeE3exD7jmLE+MlTnkAYbkeQFCwRT5IkhucQFTCas8N1nKZDKl2VmQI1UqmjOGkSxOTZUy10uZdJqXpiV6pdYdKcd6telY/3d+FUMEMrXqnzIcjmlK3hFlZUJN94rZnn6/LqT+kPtW6bBhPQscNMyVBcUtehozoVgDzsBGaIJlM9G0iGURRKgrlM62OGxyyThLNt7w33rDiaPjJ9dNjwwLPnljPF8ibM8wyZ1nZ1jr9SU7ja66M2STBfLqlmVSFX6wWts3aRLTkEb2UjhWnJCC4Wy/wfpC4OfD5KqPfHyMfHqvNkQ+/jL09VJ/6OnR1w8QHaNkjJLAfB4s0s3n6ab1/6fQP/Qp9PgbDSuKhkdPeoXT5PigLIsIpHy8YoiKbRXodH1Oli/p3wjI0bLvsiCtr/s2DsJeBXWsIXuZS/QR1NP4l4GWCv+NkJg3OnVbWjXfQwzDxPGa59lZNBd5umVonB8UOeVwIosQz5J54k1h2anBnYFuDoQL9RyZo4VLRIzg0uQlA1bhbtEj2d/BYYOqLw9ymTbPQ3rpgUSomNuGDR1DL/Xzks9FhQbk0aWQ8Hc3NtayqmdNbQFkQZs0TLMCmgI3SulUFgfTM1MF0WsNdCKQCkZxN4sZDN1ZKDsi6AT2jlcesPcGuo48ScAhbzueWbf5O/TQGBnbtM+0AXifCWA02BaDEtOpi1pvpLuju6s71n0xClRUg/Jq9+Xu13q1fwPoK52T"))))
| [
"noreply@github.com"
] | majidkan.noreply@github.com |
729ca8bfabaa02472ca043bba11652e65f0b5189 | 2b01909b0023e0d1a18b773f00f10d3e5b03070f | /models/hr_holidays.py | f9559f03bc30e056dddb8a1e1b61e6a4b9338200 | [] | no_license | guesmihela/Holidays_Before | f50d7c3edbe98c9712980ee3f629fa388ac8af75 | 6ff345ba359cdf3c3f0a69826fc47c90f3db48c1 | refs/heads/master | 2021-05-07T08:03:09.719536 | 2017-11-14T10:12:00 | 2017-11-14T10:12:00 | 109,246,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,128 | py | # -*- coding: utf-8 -*-
# © 2015 iDT LABS (http://www.@idtlabs.sl)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
import math
from datetime import timedelta
from datetime import datetime
from werkzeug import url_encode
from odoo import api, fields, models
from odoo.exceptions import UserError, AccessError, ValidationError
from openerp.tools import float_compare
from odoo.tools.translate import _
_logger = logging.getLogger(__name__)
class HrHolidays(models.Model):
_inherit = 'hr.holidays'
is_before = fields.Boolean('Apply Double Validation', related='holiday_status_id.is_before')
current_date = fields.Datetime(string='current Date',default=datetime.now())
diff = fields.Float('Before Number of Days', readonly=True, copy=False,
states={'draft': [('readonly', False)], 'confirm': [('readonly', False)]})
val_before = fields.Float('nbr days before',related='holiday_status_id.val_before')
def not_allocation(self):
type = self._context.get('default_type', False)
if type == 'remove':
return True
@api.multi
def _before_number_of_days(self):
for holiday in self:
from_dt = self.date_from
now_dt = self.current_date
self.diff = self._get_number_of_days(now_dt, from_dt, self.employee_id.id)
@api.onchange('date_from')
def _onchange_date_from(self):
""" If there are no date set for date_to, automatically set one 8 hours later than
the date_from. Also update the number_of_days.
"""
date_from = self.date_from
now_dt = self.current_date
# Compute and update the number of days before
if (now_dt and date_from) and (now_dt <= date_from):
self.diff = self._get_number_of_days(now_dt, date_from, self.employee_id.id)
else:
self.diff = 0
@api.multi
def action_draft(self):
for holiday in self:
if not holiday.can_reset:
raise UserError(_('Only an HR Manager or the concerned employee can reset to draft.'))
if holiday.state not in ['confirm', 'refuse']:
raise UserError(_('Leave request state must be "Refused" or "To Approve" in order to reset to Draft.'))
holiday.write({
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
linked_requests = holiday.mapped('linked_request_ids')
for linked_request in linked_requests:
linked_request.action_draft()
linked_requests.unlink()
return True
@api.multi
def action_confirm(self):
for holiday in self:
if holiday.is_before and holiday.diff < holiday.val_before and self.not_allocation():
raise UserError("demande de congé doit etre avant : %r" " " "jours" " " % holiday.val_before)
if self.filtered(lambda holiday: holiday.state != 'draft'):
raise UserError(_('Leave request must be in Draft state ("To Submit") in order to confirm it.'))
return self.write({'state': 'confirm'})
@api.multi
def action_approve(self):
# if double_validation: this method is the first approval approval
# if not double_validation: this method calls action_validate() below
for holiday in self:
if holiday.is_before and holiday.diff < holiday.val_before and self.not_allocation():
raise UserError("demande de congé doit etre avant : %r" " " "jours" " " % holiday.val_before)
if not self.env.user.has_group('hr_holidays.group_hr_holidays_user'):
raise UserError(_('Only an HR Officer or Manager can approve leave requests.'))
manager = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)
for holiday in self:
if holiday.state != 'confirm':
raise UserError(_('Leave request must be confirmed ("To Approve") in order to approve it.'))
if holiday.double_validation:
return holiday.write({'state': 'validate1', 'manager_id': manager.id if manager else False})
else:
holiday.action_validate()
@api.multi
def action_validate(self):
self.write({'pending_approver': None})
for holiday in self:
if holiday.is_before and holiday.diff < holiday.val_before and self.not_allocation():
raise UserError("demande de congé doit etre avant : %r" " " "jours" " " % holiday.val_before)
super(HrHolidays, self).action_validate()
@api.model
def create(self, values):
""" Override to avoid automatic logging of creation """
employee_id = values.get('employee_id', False)
if self.is_before and self.diff < self.val_before and self.not_allocation():
raise UserError("demande de congé doit etre avant : %r" " " "jours" " " % self.val_before)
if not self._check_state_access_right(values):
raise AccessError(_('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
if not values.get('department_id'):
values.update({'department_id': self.env['hr.employee'].browse(employee_id).department_id.id})
holiday = super(HrHolidays, self.with_context(mail_create_nolog=True, mail_create_nosubscribe=True)).create(values)
holiday.add_follower(employee_id)
return holiday
def not_allocation(self):
type = self._context.get('default_type', False)
if type == 'remove':
return True
@api.multi
def write(self, values):
employee_id = values.get('employee_id', False)
if self.is_before and self.diff < self.val_before and self.not_allocation():
raise UserError("demande de congé doit etre avant : %r" " " "jours" " " % self.val_before)
if not self._check_state_access_right(values):
raise AccessError(_('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
result = super(HrHolidays, self).write(values)
self.add_follower(employee_id)
return result
@api.multi
def _create_resource_leave(self):
""" This method will create entry in resource calendar leave object at the time of holidays validated """
for leave in self:
if leave.is_before and leave.diff < leave.val_before and self.not_allocation():
raise UserError("demande de congé doit etre avant : %r" " " "jours" " " % leave.val_before)
self.env['resource.calendar.leaves'].create({
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
})
return True
| [
"guesmihela"
] | guesmihela |
79d3a9ba5b62c1237b2519311bfcb5ab91e2ae08 | 74e80f4d38032eb6c40c52907854deefad82fcd2 | /app_mvc/migrations/0001_initial.py | 69b1a4efa62e3f14eb4192235a11d3e3c5b9006f | [] | no_license | PinSz/MVC-Cofee_Comsci | 5263f2594ac9823cc40a3180e0ce484436f8b3ec | 23bf0219e499154848a6baa585c313483a60adca | refs/heads/master | 2020-06-16T10:17:12.359695 | 2019-07-06T12:31:51 | 2019-07-06T12:31:51 | 195,536,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-02 06:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DeptCode', models.CharField(max_length=2)),
('DeptName', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('EmpNo', models.CharField(default='', max_length=3)),
('FName', models.CharField(max_length=20)),
('LName', models.CharField(max_length=20)),
('Sex', models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('Salary', models.IntegerField(default=0)),
('StartDate', models.DateField(blank=True)),
('published', models.BooleanField(default=True)),
('DeptCode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_mvc.Department')),
],
),
]
| [
"wuttinantsz@gmail.com"
] | wuttinantsz@gmail.com |
76c02fc854fd9add11653cb0b03442602595a47b | 7237fd4dcfcd62804abb8294aeeac127449974e9 | /net/__init__.py | a71d476ea123bd4f056d53b6d01379a52003d0c9 | [] | no_license | riya729/CRVOS | 6984a8540e057a351485217c33c6e4a4fcf012e1 | 5f68c0c590755d25447d3eddf2de44e6cf178b2f | refs/heads/main | 2023-01-03T02:20:48.389425 | 2020-11-01T04:20:17 | 2020-11-01T04:20:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | from .backbone import *
from .CRVOS_model import CRVOS
| [
"saswatsubhajyotimallick@gmail.com"
] | saswatsubhajyotimallick@gmail.com |
88199abd4462b61b8c1e468a0166393a1ea355c4 | 699cad5fee497cce94463decf1bf2b811e3fd244 | /06프로그램의 흐름 제어하기/if.py | 95d092e7f3d31f5adce1aa2a57ab88f03995c7b0 | [] | no_license | Jeonghwan-Yoo/brain_python3 | 91974019a29013abe8c9f9ed132c48b404259e2f | a22e870515e760aaa497cbc99305977cf2f01a3d | refs/heads/master | 2020-07-27T00:02:29.604848 | 2019-09-16T13:16:09 | 2019-09-16T13:16:09 | 208,802,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import sys #파이썬 프로그램을 종료하는 exit()을 사용하기 위해
print('수를 입력하세요 : ')
a=int(input())
if a==0:
print('0은 나눗셈에 이용할 수 없습니다.') #경고 메시지를 출력한 뒤
sys.exit(0) #프로그램을 종료시킵니다.
print('3 /', a, '=', 3/a) | [
"dwgbjdhks2@gmail.com"
] | dwgbjdhks2@gmail.com |
aa9171138de652dc51a2a0671c4687f5705b0780 | a4053d5e06608d6568f9591d807b3b1157b1c228 | /src/eggcounter/api.py | ade16c1136ed63326bf749992f0cca09ffb8350f | [] | no_license | david-furminieux/eggcounter | 1035500a5e00961fe019898b36f87990457f7027 | cc3e4c7e996cf83059fcfb972548b32981287499 | refs/heads/master | 2022-04-21T13:47:09.097730 | 2020-03-26T11:39:08 | 2020-03-26T11:39:08 | 250,238,900 | 0 | 0 | null | 2020-03-26T11:39:09 | 2020-03-26T11:23:57 | Python | UTF-8 | Python | false | false | 107 | py | class EggCounterExcption(Exception):
pass
class ConfigurationException(EggCounterExcption):
pass
| [
"david@galileo.rue33"
] | david@galileo.rue33 |
0da6b21adfab15c88a34f97366287cb8d7180489 | ca3cade826747bdec02bd8fa664ba42f85dfeea2 | /edi.py | 1fbc6a2c1c6d96cab1db84f8a1369f1b6c9a6e4f | [] | no_license | YO5ER/logXchecker | c4c62180845cef0410d3a8d1d0439e5fcd404563 | ad242e3f78ed2c22a65d6186e998359389000725 | refs/heads/master | 2021-01-11T01:50:10.942649 | 2016-10-06T20:09:54 | 2016-10-06T20:09:54 | 70,852,380 | 0 | 0 | null | 2016-10-13T22:13:48 | 2016-10-13T22:13:46 | null | UTF-8 | Python | false | false | 5,780 | py | import re
from collections import namedtuple
class Operator(object):
"""
This will keep the info & logs for each ham operator (team)
"""
callsign = None
info = {}
logs = []
def __init__(self, callsign):
self.callsign = callsign
self.logs = []
def add_log(self, path):
self.logs.append(Log(path))
class Log(object):
"""
This will keep a single log information (header + list of LogQso instances)
"""
path = None
log_content = None # full content of the log
callsign = None
maidenhead_locator = None
band = None
section = None
qsos_tuple = namedtuple('qso_tuple', ['linenr', 'qso', 'valid', 'error'])
qsos = []
def __init__(self, path, checklog=False):
self.path = path
self.log_content = self.read_file_content(self.path)
# _temp = self.get_field('PCall')
# if _temp is None:
# raise ValueError('The PCall field is not present')
# if len(_temp) > 1:
# raise ValueError('The PCall field is present multiple times')
# self.callsign = _temp[0]
self.qsos = []
self.get_qsos()
def read_file_content(self, path):
try:
with open(self.path, 'r') as f:
content = f.readlines()
except IOError as why:
raise
except Exception as why:
raise
return content
def validate_log_content(self):
pass
def get_field(self, field):
"""
Will read the log_content and will return field value
"""
if self.log_content is None:
raise FileNotFoundError("Log content is not available")
value = []
_field = str(field).upper() + '='
for line in self.log_content:
if line.upper().startswith(_field):
value.append(line.split('=', 1)[1].strip())
return value or None
def get_qsos(self):
"""
Will read the log_content and will return a list of LogQso
"""
qso_record_start = "[QSORECORDS"
qso_record_end = "[END;"
qso_lines = []
do_read_qso = False
# read qso lines
for (index, line) in enumerate(self.log_content):
if line.upper().startswith(qso_record_start):
do_read_qso = True
continue
if line.upper().startswith(qso_record_end):
do_read_qso = False
continue
if do_read_qso:
qso_lines.append((index, line.strip()))
# validate qso lines
for qso in qso_lines:
message = LogQso.valid_qso_line(qso[1])
self.qsos.append(
# self.qsos_tuple(linenr=qso[0], qso=qso[1], valid=False if message else True, error=message)
LogQso(qso[1], qso[0])
)
def dump_summary(self):
"""
Based on the output format (text, html...) this will output a summary of the log
"""
pass
class LogQso(object):
"""
This will keep a single QSO
"""
regexMinimalQsoCheck = '(?P<date>.*?);(?P<hour>.*?);(?P<call>.*?);(?P<mode>.*?);' \
'(?P<rst_sent>.*?);(?P<nr_sent>.*?);(?P<rst_recv>.*?);(?P<nr_recv>.*?);' \
'(?P<exchange_recv>.*?);(?P<wwl>.*?);(?P<points>.*?);' \
'(?P<new_exchange>.*?);(?P<new_wwl>.*?);(?P<new_dxcc>.*?);(?P<duplicate_qso>.*?)'
regexMediumQsoCheck = '^\d{6};\d{4};.*?;\d;\d{2,3};\d{2,4};\d{2,3};\d{2,4};.*?;.*?;.*?;.*?;.*?;.*?;.*?'
qso_line_number = 0
qso_line = None
valid_qso = False
error_message = None
qsoFields = {'date': None,
'hour': None,
'call': None,
'mode': None,
'rst_sent': None,
'nr_sent': None,
'rst_recv': None,
'nr_recv': None,
'exchange_recv': None,
'wwl': None,
'points': None,
'new_exchange': None,
'new_wwl': None,
'new_dxcc': None,
'duplicate_qso': None,
}
def __init__(self, qso_line, qso_line_number):
self.qso_line = qso_line
self.qso_line_number = qso_line_number
self.error_message = self.valid_qso_line(qso_line) or None
self.valid_qso = False if self.error_message else True
if self.valid_qso:
self.qso_parser()
self.error_message = self.validate_qso() or None
def qso_parser(self):
"""
This should parse a qso based on log format
"""
res = re.match(self.regexMinimalQsoCheck, self.qso_line)
if res:
for key in self.qsoFields.keys():
self.qsoFields[key] = res.group(key)
def validate_qso(self):
"""
This will validate a parsed qso based on generic rules (simple validation) or based on rules
"""
pass
@classmethod
def valid_qso_line(cls, line):
"""
This will validate the a line of qso from .edi log
:param line:
:return: None or error message
"""
qso_min_line_lenght = 40
if len(line) < qso_min_line_lenght:
return 'QSO line is too short'
res = re.match(cls.regexMinimalQsoCheck, line)
if not res:
return 'Minimal QSO checks didn\'t pass'
res = re.match(cls.regexMediumQsoCheck, line)
if not res:
return 'QSO checks didn\'t pass'
return None
class LogException(Exception):
def __init__(self, message, line):
self.message = message
self.line = line
| [
"cpsorin@gmail.com"
] | cpsorin@gmail.com |
fc91c17e532a9d7b10313aa5cda9af48b6579f20 | ec5797038dcf612b1066a671016c2d31dbc0316d | /Practice/files_exercise_more.py | a5875f6e356be41f39b8fa870ab08d77f911b19b | [] | no_license | elorisraeli/pythonProject1 | fdf6164df3bbce3a5dd6228b1c71ac1eaa6be200 | 6681d3dbdf7a3d38c150d7495df358118a30b9bb | refs/heads/master | 2023-08-29T04:56:22.251888 | 2021-10-14T16:31:12 | 2021-10-14T16:31:12 | 368,580,738 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | def read_file():
count_chars = 0
count_the = 0
count_end_e = 0
count_less_4 = 0
count_upper = 0
file = open(r'C:\Users\Elor Israeli\Desktop\מבוא למחשבים\homework/poem.txt', 'r', encoding='utf-8')
for line in file:
print(line, end='')
words_regular = line.split()
for original_word in words_regular:
for char in original_word:
if 64 < ord(char) < 91: # (Upper case)
count_upper += 1
words_lower = line.lower().split()
for word in words_lower:
count_chars += len(word)
if word == 'the':
count_the += 1
if word[-1] == 'e':
count_end_e += 1
if len(word) < 4:
count_less_4 += 1
print(f"\n\nThe number of chars in file: {count_chars}", end='')
print(f"\nThe number of the word 'the' in file: {count_the}", end='')
print(f"\nThe number of words end with 'e' in file: {count_end_e}", end='')
print(f"\nThe number of words less then 4 chars in file: {count_less_4}", end='')
print(f"\nThe number of upper chars in file: {count_upper}", end='')
file.close()
read_file()
| [
"53333654+elorisraeli@users.noreply.github.com"
] | 53333654+elorisraeli@users.noreply.github.com |
d212b119feedd836b1965727e519777fd8b95557 | fea44d5ca4e6c9b2c7950234718a4531d453849e | /sktime/forecasting/tests/test_all_forecasters.py | c528a23d1d8d1d4b7fe5fc87dd17cbf747f4fa26 | [
"BSD-3-Clause"
] | permissive | mlgig/sktime | 288069ab8c9b0743113877032dfca8cf1c2db3fb | 19618df351a27b77e3979efc191e53987dbd99ae | refs/heads/master | 2023-03-07T20:22:48.553615 | 2023-02-19T18:09:12 | 2023-02-19T18:09:12 | 234,604,691 | 1 | 0 | BSD-3-Clause | 2020-01-17T17:50:12 | 2020-01-17T17:50:11 | null | UTF-8 | Python | false | false | 28,833 | py | # -*- coding: utf-8 -*-
"""Tests for BaseForecaster API points.
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["mloning", "kejsitake", "fkiraly"]
import numpy as np
import pandas as pd
import pytest
from sktime.datatypes import check_is_mtype
from sktime.datatypes._utilities import get_cutoff
from sktime.exceptions import NotFittedError
from sktime.forecasting.base._delegate import _DelegatedForecaster
from sktime.forecasting.model_selection import (
ExpandingWindowSplitter,
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.forecasting.tests._config import (
TEST_ALPHAS,
TEST_FHS,
TEST_OOS_FHS,
TEST_STEP_LENGTHS_INT,
TEST_WINDOW_LENGTHS_INT,
VALID_INDEX_FH_COMBINATIONS,
)
from sktime.performance_metrics.forecasting import mean_absolute_percentage_error
from sktime.tests.test_all_estimators import BaseFixtureGenerator, QuickTester
from sktime.utils._testing.forecasting import (
_assert_correct_columns,
_assert_correct_pred_time_index,
_get_expected_index_for_update_predict,
_get_n_columns,
_make_fh,
make_forecasting_problem,
)
from sktime.utils._testing.series import _make_series
from sktime.utils.validation.forecasting import check_fh
# get all forecasters
FH0 = 1
INVALID_X_INPUT_TYPES = [list("foo"), tuple()]
INVALID_y_INPUT_TYPES = [list("bar"), tuple()]
# testing data
y = make_forecasting_problem()
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# names for index/fh combinations to display in tests
index_fh_comb_names = [f"{x[0]}-{x[1]}-{x[2]}" for x in VALID_INDEX_FH_COMBINATIONS]
pytest_skip_msg = (
"ForecastingHorizon with timedelta values "
"is currently experimental and not supported everywhere"
)
class ForecasterFixtureGenerator(BaseFixtureGenerator):
"""Fixture generator for forecasting tests.
Fixtures parameterized
----------------------
estimator_class: estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
estimator_instance: instance of estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
instances are generated by create_test_instance class method
scenario: instance of TestScenario
ranges over all scenarios returned by retrieve_scenarios
"""
# note: this should be separate from TestAllForecasters
# additional fixtures, parameters, etc should be added here
# TestAllForecasters should contain the tests only
estimator_type_filter = "forecaster"
fixture_sequence = [
"estimator_class",
"estimator_instance",
"n_columns",
"scenario",
# "fh",
"update_params",
"step_length",
]
def _generate_n_columns(self, test_name, **kwargs):
"""Return number of columns for series generation in positive test cases.
Fixtures parameterized
----------------------
n_columns: int
1 for univariate forecasters, 2 for multivariate forecasters
ranges over 1 and 2 for forecasters which are both uni/multivariate
"""
if "estimator_class" in kwargs.keys():
scitype_tag = kwargs["estimator_class"].get_class_tag("scitype:y")
elif "estimator_instance" in kwargs.keys():
scitype_tag = kwargs["estimator_instance"].get_tag("scitype:y")
else:
return []
n_columns_list = _get_n_columns(scitype_tag)
if len(n_columns_list) == 1:
n_columns_names = ["" for x in n_columns_list]
else:
n_columns_names = [f"y:{x}cols" for x in n_columns_list]
return n_columns_list, n_columns_names
def _generate_update_params(self, test_name, **kwargs):
"""Return update_params for update calls.
Fixtures parameterized
----------------------
update_params: bool
whether to update parameters in update; ranges over True, False
"""
return [True, False], ["update_params=True", "update_params=False"]
def _generate_step_length(self, test_name, **kwargs):
"""Return step length for window.
Fixtures parameterized
----------------------
step_length: int
1 if update_params=True; TEST_STEP_LENGTH_INT if update_params=False
"""
update_params = kwargs["update_params"]
if update_params:
return [1], [""]
else:
return TEST_STEP_LENGTHS_INT, [f"step={a}" for a in TEST_STEP_LENGTHS_INT]
class TestAllForecasters(ForecasterFixtureGenerator, QuickTester):
"""Module level tests for all sktime forecasters."""
def test_get_fitted_params(self, estimator_instance, scenario):
"""Test get_fitted_params."""
scenario.run(estimator_instance, method_sequence=["fit"])
try:
params = estimator_instance.get_fitted_params()
assert isinstance(params, dict)
except NotImplementedError:
pass
# todo: should these not be checked in test_all_estimators?
def test_raises_not_fitted_error(self, estimator_instance):
"""Test that calling post-fit methods before fit raises error."""
# We here check extra method of the forecaster API: update and update_predict.
with pytest.raises(NotFittedError):
estimator_instance.update(y_test, update_params=False)
with pytest.raises(NotFittedError):
cv = SlidingWindowSplitter(fh=1, window_length=1, start_with_window=False)
estimator_instance.update_predict(y_test, cv=cv)
try:
with pytest.raises(NotFittedError):
estimator_instance.get_fitted_params()
except NotImplementedError:
pass
def test_y_multivariate_raises_error(self, estimator_instance):
"""Test that wrong y scitype raises error (uni/multivariate not supported)."""
if estimator_instance.get_tag("scitype:y") == "multivariate":
y = _make_series(n_columns=1)
with pytest.raises(ValueError, match=r"two or more variables"):
estimator_instance.fit(y, fh=FH0)
if estimator_instance.get_tag("scitype:y") in ["univariate", "both"]:
# this should pass since "both" allows any number of variables
# and "univariate" automatically vectorizes, behaves multivariate
pass
# todo: should these not be "negative scenarios", tested in test_all_estimators?
@pytest.mark.parametrize("y", INVALID_y_INPUT_TYPES)
def test_y_invalid_type_raises_error(self, estimator_instance, y):
"""Test that invalid y input types raise error."""
with pytest.raises(TypeError, match=r"type"):
estimator_instance.fit(y, fh=FH0)
# todo: should these not be "negative scenarios", tested in test_all_estimators?
@pytest.mark.parametrize("X", INVALID_X_INPUT_TYPES)
def test_X_invalid_type_raises_error(self, estimator_instance, n_columns, X):
"""Test that invalid X input types raise error."""
y_train = _make_series(n_columns=n_columns)
try:
with pytest.raises(TypeError, match=r"type"):
estimator_instance.fit(y_train, X, fh=FH0)
except NotImplementedError as e:
msg = str(e).lower()
assert "exogenous" in msg
# todo: refactor with scenarios. Need to override fh and scenario args for this.
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
@pytest.mark.parametrize("fh_int", TEST_FHS, ids=[f"fh={fh}" for fh in TEST_FHS])
def test_predict_time_index(
self, estimator_instance, n_columns, index_fh_comb, fh_int
):
"""Check that predicted time index matches forecasting horizon.
Tests predicted time index for predict and predict_residuals.
"""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
y_train = _make_series(
n_columns=n_columns, index_type=index_type, n_timepoints=50
)
cutoff = get_cutoff(y_train, return_index=True)
fh = _make_fh(cutoff, fh_int, fh_type, is_relative)
try:
estimator_instance.fit(y_train, fh=fh)
y_pred = estimator_instance.predict()
_assert_correct_pred_time_index(y_pred.index, cutoff, fh=fh_int)
_assert_correct_columns(y_pred, y_train)
y_test = _make_series(
n_columns=n_columns, index_type=index_type, n_timepoints=len(y_pred)
)
y_test.index = y_pred.index
y_res = estimator_instance.predict_residuals(y_test)
_assert_correct_pred_time_index(y_res.index, cutoff, fh=fh)
except NotImplementedError:
pass
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_time_index_with_X(
self, estimator_instance, n_columns, index_fh_comb, fh_int_oos
):
"""Check that predicted time index matches forecasting horizon."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
z, X = make_forecasting_problem(index_type=index_type, make_X=True)
# Some estimators may not support all time index types and fh types, hence we
# need to catch NotImplementedErrors.
y = _make_series(n_columns=n_columns, index_type=index_type)
cutoff = get_cutoff(y.iloc[: len(y) // 2], return_index=True)
fh = _make_fh(cutoff, fh_int_oos, fh_type, is_relative)
y_train, _, X_train, X_test = temporal_train_test_split(y, X, fh=fh)
try:
estimator_instance.fit(y_train, X_train, fh=fh)
y_pred = estimator_instance.predict(X=X_test)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(y_pred.index, cutoff, fh)
_assert_correct_columns(y_pred, y_train)
except NotImplementedError:
pass
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
def test_predict_time_index_in_sample_full(
self, estimator_instance, n_columns, index_fh_comb
):
"""Check that predicted time index equals fh for full in-sample predictions."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
y_train = _make_series(n_columns=n_columns, index_type=index_type)
cutoff = get_cutoff(y_train, return_index=True)
steps = -np.arange(len(y_train))
fh = _make_fh(cutoff, steps, fh_type, is_relative)
try:
estimator_instance.fit(y_train, fh=fh)
y_pred = estimator_instance.predict()
_assert_correct_pred_time_index(y_pred.index, cutoff, fh)
except NotImplementedError:
pass
def test_predict_series_name_preserved(self, estimator_instance):
"""Test that fit/predict preserves name attribute and type of pd.Series."""
# skip this test if estimator needs multivariate data
# because then it does not take pd.Series at all
if estimator_instance.get_tag("scitype:y") == "multivariate":
return None
y_train = _make_series(n_timepoints=15)
y_train.name = "foo"
estimator_instance.fit(y_train, fh=[1, 2, 3])
y_pred = estimator_instance.predict()
_assert_correct_columns(y_pred, y_train)
def _check_pred_ints(
self, pred_ints: pd.DataFrame, y_train: pd.Series, y_pred: pd.Series, fh_int
):
# make iterable
if isinstance(pred_ints, pd.DataFrame):
pred_ints = [pred_ints]
for pred_int in pred_ints:
# check column naming convention
assert list(pred_int.columns) == ["lower", "upper"]
# check time index
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(pred_int.index, cutoff, fh_int)
# check values
assert np.all(pred_int["upper"] >= pred_int["lower"])
# check if errors are weakly monotonically increasing
# pred_errors = y_pred - pred_int["lower"]
# # assert pred_errors.is_mononotic_increasing
# assert np.all(
# pred_errors.values[1:].round(4) >= pred_errors.values[:-1].round(4)
# )
@pytest.mark.parametrize("index_type", [None, "range"])
@pytest.mark.parametrize(
"coverage", TEST_ALPHAS, ids=[f"alpha={a}" for a in TEST_ALPHAS]
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_interval(
self, estimator_instance, n_columns, index_type, fh_int_oos, coverage
):
"""Check prediction intervals returned by predict.
Arguments
---------
estimator_instance : BaseEstimator class descendant instance, forecaster to test
n_columns : number of columns for the test data
index_type : index type of the test data
fh_int_oos : forecasting horizon to test the forecaster at, all out of sample
coverage: float, coverage at which to make prediction intervals
Raises
------
AssertionError - if Forecaster test instance has "capability:pred_int"
and pred. int are not returned correctly when asking predict for them
AssertionError - if Forecaster test instance does not have "capability:pred_int"
and no NotImplementedError is raised when asking predict for pred.int
"""
y_train = _make_series(n_columns=n_columns, index_type=index_type)
estimator_instance.fit(y_train, fh=fh_int_oos)
if estimator_instance.get_tag("capability:pred_int"):
pred_ints = estimator_instance.predict_interval(
fh_int_oos, coverage=coverage
)
valid, msg, _ = check_is_mtype(
pred_ints, mtype="pred_interval", scitype="Proba", return_metadata=True
) # type: ignore
assert valid, msg
else:
with pytest.raises(NotImplementedError, match="prediction intervals"):
estimator_instance.predict_interval(fh_int_oos, coverage=coverage)
def _check_predict_quantiles(
self, pred_quantiles: pd.DataFrame, y_train: pd.Series, fh, alpha
):
# check if the input is a dataframe
assert isinstance(pred_quantiles, pd.DataFrame)
# check time index (also checks forecasting horizon is more than one element)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(pred_quantiles.index, cutoff, fh)
# Forecasters where name of variables do not exist
# In this cases y_train is series - the upper level in dataframe == 'Quantiles'
if isinstance(y_train, pd.Series):
expected = pd.MultiIndex.from_product([["Quantiles"], [alpha]])
else:
# multiply variables with all alpha values
expected = pd.MultiIndex.from_product([y_train.columns, [alpha]])
found = pred_quantiles.columns.to_flat_index()
assert all(expected == found)
if isinstance(alpha, list):
# sorts the columns that correspond to alpha values
pred_quantiles = pred_quantiles.reindex(
columns=pred_quantiles.columns.reindex(sorted(alpha), level=1)[0]
)
# check if values are monotonically increasing
for var in pred_quantiles.columns.levels[0]:
for index in range(len(pred_quantiles.index)):
assert pred_quantiles[var].iloc[index].is_monotonic_increasing
@pytest.mark.parametrize(
"alpha", TEST_ALPHAS, ids=[f"alpha={a}" for a in TEST_ALPHAS]
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_quantiles(self, estimator_instance, n_columns, fh_int_oos, alpha):
"""Check prediction quantiles returned by predict.
Arguments
---------
Forecaster: BaseEstimator class descendant, forecaster to test
fh: ForecastingHorizon, fh at which to test prediction
alpha: float, alpha at which to make prediction intervals
Raises
------
AssertionError - if Forecaster test instance has "capability:pred_int"
and pred. int are not returned correctly when asking predict for them
AssertionError - if Forecaster test instance does not have "capability:pred_int"
and no NotImplementedError is raised when asking predict for pred.int
"""
y_train = _make_series(n_columns=n_columns)
estimator_instance.fit(y_train, fh=fh_int_oos)
try:
quantiles = estimator_instance.predict_quantiles(fh=fh_int_oos, alpha=alpha)
self._check_predict_quantiles(quantiles, y_train, fh_int_oos, alpha)
except NotImplementedError:
pass
def test_pred_int_tag(self, estimator_instance):
"""Checks whether the capability:pred_int tag is correctly set.
Arguments
---------
estimator_instance : instance of BaseForecaster
Raises
------
ValueError - if capability:pred_int is True, but neither
predict_interval nor predict_quantiles have implemented content
this can be by direct implementation of _predict_interval/_predict_quantiles
or by defaulting to each other and/or _predict_proba
"""
f = estimator_instance
# we skip the _DelegatedForecaster, since it implements delegation methods
# which may look like the method is implemented, but in fact it is not
if isinstance(f, _DelegatedForecaster):
return None
# check which methods are implemented
implements_interval = f._has_implementation_of("_predict_interval")
implements_quantiles = f._has_implementation_of("_predict_quantiles")
implements_proba = f._has_implementation_of("_predict_proba")
pred_int_works = implements_interval or implements_quantiles or implements_proba
if not pred_int_works and f.get_class_tag("capability:pred_int", False):
raise ValueError(
f"{type(f).__name__} does not implement probabilistic forecasting, "
'but "capability:pred_int" flag has been set to True incorrectly. '
'The flag "capability:pred_int" should instead be set to False.'
)
if pred_int_works and not f.get_class_tag("capability:pred_int", False):
raise ValueError(
f"{type(f).__name__} does implement probabilistic forecasting, "
'but "capability:pred_int" flag has been set to False incorrectly. '
'The flag "capability:pred_int" should instead be set to True.'
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_score(self, estimator_instance, n_columns, fh_int_oos):
"""Check score method."""
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.predict()
fh_idx = check_fh(fh_int_oos).to_indexer() # get zero based index
expected = mean_absolute_percentage_error(
y_test.iloc[fh_idx], y_pred, symmetric=False
)
# compare expected score with actual score
actual = estimator_instance.score(y_test.iloc[fh_idx], fh=fh_int_oos)
assert actual == expected
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_update_predict_single(
self, estimator_instance, n_columns, fh_int_oos, update_params
):
"""Check correct time index of update-predict."""
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.update_predict_single(
y_test, update_params=update_params
)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(y_pred.index, cutoff, fh_int_oos)
_assert_correct_columns(y_pred, y_train)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
@pytest.mark.parametrize("initial_window", TEST_WINDOW_LENGTHS_INT)
def test_update_predict_predicted_index(
self,
estimator_instance,
n_columns,
fh_int_oos,
step_length,
initial_window,
update_params,
):
"""Check predicted index in update_predict."""
y = _make_series(n_columns=n_columns, all_positive=True, index_type="datetime")
y_train, y_test = temporal_train_test_split(y)
cv = ExpandingWindowSplitter(
fh=fh_int_oos,
initial_window=initial_window,
step_length=step_length,
)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.update_predict(
y_test, cv=cv, update_params=update_params
)
assert isinstance(y_pred, (pd.Series, pd.DataFrame))
expected = _get_expected_index_for_update_predict(
y_test, fh_int_oos, step_length, initial_window
)
actual = y_pred.index
np.testing.assert_array_equal(actual, expected)
def test__y_and_cutoff(self, estimator_instance, n_columns):
"""Check cutoff and _y."""
# check _y and cutoff is None after construction
f = estimator_instance
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# check that _y and cutoff are empty when estimator is constructed
assert f._y is None
assert f.cutoff is None
# check that _y and cutoff is updated during fit
f.fit(y_train, fh=FH0)
# assert isinstance(f._y, pd.Series)
# action:uncomments the line above
# why: fails for multivariates cause they are DataFrames
# solution: look for a general solution for Series and DataFrames
assert len(f._y) > 0
assert f.cutoff == y_train.index[-1]
# check data pointers
np.testing.assert_array_equal(f._y.index, y_train.index)
# check that _y and cutoff is updated during update
f.update(y_test, update_params=False)
np.testing.assert_array_equal(
f._y.index, np.append(y_train.index, y_test.index)
)
assert f.cutoff == y_test.index[-1]
def test__y_when_refitting(self, estimator_instance, n_columns):
"""Test that _y is updated when forecaster is refitted."""
y_train = _make_series(n_columns=n_columns)
estimator_instance.fit(y_train, fh=FH0)
estimator_instance.fit(y_train[3:], fh=FH0)
# using np.squeeze to make the test flexible to shape differeces like
# (50,) and (50, 1)
assert np.all(np.squeeze(estimator_instance._y) == np.squeeze(y_train[3:]))
def test_fh_attribute(self, estimator_instance, n_columns):
"""Check fh attribute and error handling if two different fh are passed."""
f = estimator_instance
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict()
np.testing.assert_array_equal(f.fh, FH0)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
# if fh is not required in fit, test this again with fh passed late
if not f.get_tag("requires-fh-in-fit"):
f.fit(y_train)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
def test_fh_not_passed_error_handling(self, estimator_instance, n_columns):
"""Check that not passing fh in fit/predict raises correct error."""
f = estimator_instance
y_train = _make_series(n_columns=n_columns)
if f.get_tag("requires-fh-in-fit"):
# if fh required in fit, should raise error if not passed in fit
with pytest.raises(ValueError):
f.fit(y_train)
else:
# if fh not required in fit, should raise error if not passed until predict
f.fit(y_train)
with pytest.raises(ValueError):
f.predict()
def test_different_fh_in_fit_and_predict_error_handling(
self, estimator_instance, n_columns
):
"""Check that fh different in fit and predict raises correct error."""
f = estimator_instance
# if fh is not required in fit, can be overwritten, should not raise error
if not f.get_tag("requires-fh-in-fit"):
return None
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
# changing fh during predict should raise error
with pytest.raises(ValueError):
f.predict(fh=FH0 + 1)
def test_hierarchical_with_exogeneous(self, estimator_instance, n_columns):
"""Check that hierarchical forecasting works, also see bug #3961.
Arguments
---------
estimator_instance : instance of BaseForecaster
n_columns : number of columns, of the endogeneous data y_train
Raises
------
Exception - if fit/predict does not complete without error
AssertionError - if forecast is not expected mtype pd_multiindex_hier,
and does not have expected row and column indices
"""
from sktime.datatypes import check_is_mtype
from sktime.datatypes._utilities import get_window
from sktime.utils._testing.hierarchical import _make_hierarchical
y_train = _make_hierarchical(
hierarchy_levels=(2, 4),
n_columns=n_columns,
min_timepoints=22,
max_timepoints=22,
index_type="period",
)
X = _make_hierarchical(
hierarchy_levels=(2, 4),
n_columns=2,
min_timepoints=24,
max_timepoints=24,
index_type="period",
)
X.columns = ["foo", "bar"]
X_train = get_window(X, lag=2)
X_test = get_window(X, window_length=2)
fh = [1, 2]
estimator_instance.fit(y=y_train, X=X_train, fh=fh)
y_pred = estimator_instance.predict(X=X_test)
assert isinstance(y_pred, pd.DataFrame)
assert check_is_mtype(y_pred, "pd_multiindex_hier")
msg = (
"returned columns after predict are not as expected. "
f"expected: {y_train.columns}. Found: {y_pred.columns}"
)
assert np.all(y_pred.columns == y_train.columns), msg
# check consistency of forecast hierarchy with training data
# some forecasters add __total levels, e.g., ReconcilerForecaster
# if = not such a forecaster; else = levels are added
if len(y_pred.index) == len(X_test.index):
# the indices should be equal iff no levels are added
assert np.all(y_pred.index == X_test.index)
else:
# if levels are added, all expected levels and times should be contained
assert set(X_test.index).issubset(y_pred.index)
| [
"noreply@github.com"
] | mlgig.noreply@github.com |
91ff95988bce1d58997328ad6d6def469c878d07 | 452c33c0622ec36e93e6ff6637533a15a067a8a4 | /samples/client/petstore/python-experimental/petstore_api/models/outer_composite.py | f3887c8a3267c6a6532d498e3de2a32c135c4da3 | [
"Apache-2.0"
] | permissive | eric-erki/openapi-generator | 40c4294433bada9f693aca0c32326609e2234f9c | 0ea1ead59e41e4e8a959235dc8234d44447a9658 | refs/heads/master | 2023-01-07T03:33:36.315459 | 2019-09-20T18:13:33 | 2019-09-20T18:13:33 | 209,955,560 | 1 | 3 | Apache-2.0 | 2023-01-04T10:58:25 | 2019-09-21T09:09:49 | Java | UTF-8 | Python | false | false | 4,876 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class OuterComposite(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'my_number': 'float',
'my_string': 'str',
'my_boolean': 'bool',
}
attribute_map = {
'my_number': 'my_number', # noqa: E501
'my_string': 'my_string', # noqa: E501
'my_boolean': 'my_boolean', # noqa: E501
}
def __init__(self, my_number=None, my_string=None, my_boolean=None): # noqa: E501
"""OuterComposite - a model defined in OpenAPI
Keyword Args:
my_number (float): [optional] # noqa: E501
my_string (str): [optional] # noqa: E501
my_boolean (bool): [optional] # noqa: E501
"""
self._my_number = None
self._my_string = None
self._my_boolean = None
self.discriminator = None
if my_number is not None:
self.my_number = my_number # noqa: E501
if my_string is not None:
self.my_string = my_string # noqa: E501
if my_boolean is not None:
self.my_boolean = my_boolean # noqa: E501
@property
def my_number(self):
"""Gets the my_number of this OuterComposite. # noqa: E501
:return: The my_number of this OuterComposite. # noqa: E501
:rtype: float
"""
return self._my_number
@my_number.setter
def my_number(
self,
my_number):
"""Sets the my_number of this OuterComposite.
:param my_number: The my_number of this OuterComposite. # noqa: E501
:type: float
"""
self._my_number = (
my_number)
@property
def my_string(self):
"""Gets the my_string of this OuterComposite. # noqa: E501
:return: The my_string of this OuterComposite. # noqa: E501
:rtype: str
"""
return self._my_string
@my_string.setter
def my_string(
self,
my_string):
"""Sets the my_string of this OuterComposite.
:param my_string: The my_string of this OuterComposite. # noqa: E501
:type: str
"""
self._my_string = (
my_string)
@property
def my_boolean(self):
"""Gets the my_boolean of this OuterComposite. # noqa: E501
:return: The my_boolean of this OuterComposite. # noqa: E501
:rtype: bool
"""
return self._my_boolean
@my_boolean.setter
def my_boolean(
self,
my_boolean):
"""Sets the my_boolean of this OuterComposite.
:param my_boolean: The my_boolean of this OuterComposite. # noqa: E501
:type: bool
"""
self._my_boolean = (
my_boolean)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OuterComposite):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"wing328hk@gmail.com"
] | wing328hk@gmail.com |
ba497dd3afdf87eae4b1e1d9fa84bbe788335f77 | 385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28 | /source/hall/src/hall/entity/hallfree.py | 63e7e839d8986e8730bf43df1ef165e4c0acc70a | [] | no_license | csirui/hall37 | 17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db | 5c4eb4b2bf57bbbee4731470c830d8d81915d603 | refs/heads/master | 2021-09-04T03:55:12.460035 | 2018-01-15T15:12:30 | 2018-01-15T15:12:30 | 117,560,615 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,420 | py | # -*- coding=utf-8 -*-
from sre_compile import isstring
from datetime import datetime
import freetime.util.log as ftlog
import poker.entity.events.tyeventbus as pkeventbus
from hall.entity import hallconf, hallpopwnd, datachangenotify
from hall.entity.hallconf import HALL_GAMEID
from hall.entity.hallusercond import UserConditionRegister
from poker.entity.biz.exceptions import TYBizConfException
from poker.entity.events.tyevent import EventConfigure, ItemCountChangeEvent
class HallFree(object):
def __init__(self):
self.freeItemId = None
self.iconRes = None
self.itemName = None # 前端图片上显示的字
self.states = []
def decodeFromDict(self, d):
self.freeItemId = d.get('freeItemId')
self.iconRes = d.get('iconRes')
self.itemName = d.get("itemName", "")
self.states = []
for state in d.get('states', []):
self.states.append(HallFreeState().decodeFromDict(state))
return self
class HallFreeState(object):
def __init__(self):
# str
self.desc = ''
# str
self.btnText = ''
# bool
self.hasMark = False
# int
self.enable = True
# bool
self.visible = True
# 条件
self.conditionList = None
# todotask
self.todotaskList = None
def decodeFromDict(self, d):
self.desc = d.get('desc', '')
self.btnText = d.get('btnText', '')
self.hasMark = d.get('hasMark', False)
self.enable = d.get('enable', True)
self.visible = d.get('visible', True)
self.conditionList = UserConditionRegister.decodeList(d.get('conditions', []))
self.todotaskList = []
for todotaskDict in d.get('todotasks', []):
self.todotaskList.append(hallpopwnd.decodeTodotaskFactoryByDict(todotaskDict))
return self
class HallFreeTemplate(object):
def __init__(self):
self.name = None
self.freeItems = None
def decodeFromDict(self, d, freeItemMap):
self.name = d.get('name')
if not isstring(self.name) or not self.name:
raise TYBizConfException(d, 'HallFreeTemplate.name must be not empty string')
self.freeItems = []
for itemId in d.get('freeItems', []):
if freeItemMap.has_key(itemId):
self.freeItems.append(freeItemMap[itemId])
return self
_inited = False
# key=promotionId, value=HallPromotion
_freeItemMap = {}
# key=templateName, value=HallPromoteTemplate
_templateMap = {}
def _reloadConf():
global _freeItemMap
global _templateMap
freeItemMap = {}
templateMap = {}
conf = hallconf.getFreeConf()
for freeDict in conf.get('freeItems', []):
freeItem = HallFree().decodeFromDict(freeDict)
if freeItem.freeItemId in freeItemMap:
raise TYBizConfException(freeDict, 'Duplicate freeId %s' % (freeItem.freeItemId))
freeItemMap[freeItem.freeItemId] = freeItem
if ftlog.is_debug():
ftlog.debug('hallfree._reloadConf freeIds=', freeItemMap.keys())
for templateDict in conf.get('templates', []):
template = HallFreeTemplate().decodeFromDict(templateDict, freeItemMap)
if template.name in templateMap:
raise TYBizConfException(templateDict, 'Duplicate templateName %s' % (template.name))
templateMap[template.name] = template
_freeItemMap = freeItemMap
_templateMap = templateMap
ftlog.debug('hallfree._reloadConf successed freeIds=', _freeItemMap.keys(),
'templateNames=', _templateMap.keys())
def _onConfChanged(event):
if _inited and event.isChanged('game:9999:free:tc'):
ftlog.debug('hallfree._onConfChanged')
_reloadConf()
def _onItemCountChanged(event):
if _inited:
ftlog.debug('hallfree._onItemCountChanged', event.userId)
datachangenotify.sendDataChangeNotify(HALL_GAMEID, event.userId, ['free', 'promotion_loc'])
def _initialize():
ftlog.debug('hallfree._initialize begin')
global _inited
if not _inited:
_inited = True
_reloadConf()
pkeventbus.globalEventBus.subscribe(EventConfigure, _onConfChanged)
pkeventbus.globalEventBus.subscribe(ItemCountChangeEvent, _onItemCountChanged)
ftlog.debug('hallfree._initialize end')
# 获取用户对应的免费列表配置数据
def getFree(gameId, userId, clientId, timestamp):
ret = []
templateName = hallconf.getFreeTemplateName(clientId)
template = _templateMap.get(templateName)
if ftlog.is_debug():
ftlog.debug('hallfree.getFree gameId=', gameId,
'userId=', userId,
'clientId=', clientId,
'timestamp=', datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
'templateName=', templateName)
if not template:
template = _templateMap.get('default')
if ftlog.is_debug():
ftlog.debug('hallfree.getFree gameId=', gameId,
'userId=', userId,
'clientId=', clientId,
'timestamp=', datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
'freeItems=', [fi.freeItemId for fi in template.freeItems] if template else [])
if template:
for freeItem in template.freeItems:
ret.append(freeItem)
return ret
| [
"cg@ibenxi.com"
] | cg@ibenxi.com |
bcbae855606de4d9f3d8ab32e41605114787899e | 5d044dff51b77b1f569c1c8ec4e2f59abe521bdd | /fnss/topologies/randmodels.py | 0aefdf1d3793eefcfc3b08cc1239efb5f03258f0 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | brucespang/fnss | b84a91c4848fbc3243167ac392cace682ff11230 | 8e1d95744347afa77383092e6f144980d84e222d | refs/heads/master | 2021-01-14T08:36:13.313871 | 2020-03-26T20:29:23 | 2020-03-26T20:29:23 | 18,644,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,991 | py | """Functions to generate random topologies according to a number of models.
The generated topologies are either Topology or DirectedTopology objects.
"""
import math
import random
import networkx as nx
from fnss.util import random_from_pdf
from fnss.topologies.topology import Topology
__all__ = [
'erdos_renyi_topology',
'waxman_1_topology',
'waxman_2_topology',
'barabasi_albert_topology',
'extended_barabasi_albert_topology',
'glp_topology'
]
def erdos_renyi_topology(n, p, seed=None, fast=False):
r"""Return a random graph :math:`G_{n,p}` (Erdos-Renyi graph, binomial
graph).
Chooses each of the possible edges with probability p.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
fast : boolean, optional
Uses the algorithm proposed by [3]_, which is faster for small p
References
----------
.. [1] P. Erdos and A. Renyi, On Random Graphs, Publ. Math. 6, 290 (1959).
.. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959).
.. [3] Vladimir Batagelj and Ulrik Brandes,
"Efficient generation of large random networks",
Phys. Rev. E, 71, 036113, 2005.
"""
# validate input parameters
if not isinstance(n, int) or n < 0:
raise ValueError('n must be a positive integer')
if p > 1 or p < 0:
raise ValueError('p must be a value in (0,1)')
if fast:
G = Topology(nx.fast_gnp_random_graph(n, p, seed=seed))
else:
G = Topology(nx.gnp_random_graph(n, p, seed=seed))
G.name = "erdos_renyi_topology(%s, %s)" % (n, p)
G.graph['type'] = 'er'
return G
def waxman_1_topology(n, alpha=0.4, beta=0.1, L=1.0,
distance_unit='Km', seed=None):
r"""
Return a Waxman-1 random topology.
The Waxman-1 random topology models assigns link between nodes with
probability
.. math::
p = \alpha*exp(-d/(\beta*L)).
where the distance *d* is chosen randomly in *[0,L]*.
Parameters
----------
n : int
Number of nodes
alpha : float
Model parameter chosen in *(0,1]* (higher alpha increases link density)
beta : float
Model parameter chosen in *(0,1]* (higher beta increases difference
between density of short and long links)
L : float
Maximum distance between nodes.
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Topology
Notes
-----
Each node of G has the attributes *latitude* and *longitude*. These
attributes are not expressed in degrees but in *distance_unit*.
Each edge of G has the attribute *length*, which is also expressed in
*distance_unit*.
References
----------
.. [1] B. M. Waxman, Routing of multipoint connections.
IEEE J. Select. Areas Commun. 6(9),(1988) 1617-1622.
"""
# validate input parameters
if not isinstance(n, int) or n <= 0:
raise ValueError('n must be a positive integer')
if alpha > 1 or alpha <= 0 or beta > 1 or beta <= 0:
raise ValueError('alpha and beta must be float values in (0,1]')
if L <= 0:
raise ValueError('L must be a positive number')
if seed is not None:
random.seed(seed)
G = Topology(type='waxman_1', distance_unit=distance_unit)
G.name = "waxman_1_topology(%s, %s, %s, %s)" % (n, alpha, beta, L)
G.add_nodes_from(range(n))
nodes = list(G.nodes())
while nodes:
u = nodes.pop()
for v in nodes:
d = L * random.random()
if random.random() < alpha * math.exp(-d / (beta * L)):
G.add_edge(u, v, length=d)
return G
def waxman_2_topology(n, alpha=0.4, beta=0.1, domain=(0, 0, 1, 1),
distance_unit='Km', seed=None):
r"""Return a Waxman-2 random topology.
The Waxman-2 random topology models place n nodes uniformly at random
in a rectangular domain. Two nodes u, v are connected with a link
with probability
.. math::
p = \alpha*exp(-d/(\beta*L)).
where the distance *d* is the Euclidean distance between the nodes u and v.
and *L* is the maximum distance between all nodes in the graph.
Parameters
----------
n : int
Number of nodes
alpha : float
Model parameter chosen in *(0,1]* (higher alpha increases link density)
beta : float
Model parameter chosen in *(0,1]* (higher beta increases difference
between density of short and long links)
domain : tuple of numbers, optional
Domain size (xmin, ymin, xmax, ymax)
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Topology
Notes
-----
Each edge of G has the attribute *length*
References
----------
.. [1] B. M. Waxman, Routing of multipoint connections.
IEEE J. Select. Areas Commun. 6(9),(1988) 1617-1622.
"""
# validate input parameters
if not isinstance(n, int) or n <= 0:
raise ValueError('n must be a positive integer')
if alpha > 1 or alpha <= 0 or beta > 1 or beta <= 0:
raise ValueError('alpha and beta must be float values in (0,1]')
if not isinstance(domain, tuple) or len(domain) != 4:
raise ValueError('domain must be a tuple of 4 number')
(xmin, ymin, xmax, ymax) = domain
if xmin > xmax:
raise ValueError('In domain, xmin cannot be greater than xmax')
if ymin > ymax:
raise ValueError('In domain, ymin cannot be greater than ymax')
if seed is not None:
random.seed(seed)
G = Topology(type='waxman_2', distance_unit=distance_unit)
G.name = "waxman_2_topology(%s, %s, %s)" % (n, alpha, beta)
G.add_nodes_from(range(n))
for v in G.nodes():
G.node[v]['latitude'] = (ymin + (ymax - ymin)) * random.random()
G.node[v]['longitude'] = (xmin + (xmax - xmin)) * random.random()
l = {}
nodes = list(G.nodes())
while nodes:
u = nodes.pop()
for v in nodes:
x_u = G.node[u]['longitude']
x_v = G.node[v]['longitude']
y_u = G.node[u]['latitude']
y_v = G.node[v]['latitude']
l[(u, v)] = math.sqrt((x_u - x_v) ** 2 + (y_u - y_v) ** 2)
L = max(l.values())
for (u, v), d in l.items():
if random.random() < alpha * math.exp(-d / (beta * L)):
G.add_edge(u, v, length=d)
return G
# This is the classical BA model, without rewiring and add
def barabasi_albert_topology(n, m, m0, seed=None):
r"""
Return a random topology using Barabasi-Albert preferential attachment
model.
A topology of n nodes is grown by attaching new nodes each with m links
that are preferentially attached to existing nodes with high degree.
More precisely, the Barabasi-Albert topology is built as follows. First, a
line topology with m0 nodes is created. Then at each step, one node is
added and connected to m existing nodes. These nodes are selected randomly
with probability
.. math::
\Pi(i) = \frac{deg(i)}{sum_{v \in V} deg V}.
Where i is the selected node and V is the set of nodes of the graph.
Parameters
----------
n : int
Number of nodes
m : int
Number of edges to attach from a new node to existing nodes
m0 : int
Number of nodes initially attached to the network
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Topology
Notes
-----
The initialization is a graph with with m nodes connected by :math:`m -1`
edges.
It does not use the Barabasi-Albert method provided by NetworkX because it
does not allow to specify *m0* parameter.
There are no disconnected subgraphs in the topology.
References
----------
.. [1] A. L. Barabasi and R. Albert "Emergence of scaling in
random networks", Science 286, pp 509-512, 1999.
"""
def calc_pi(G):
"""Calculate BA Pi function for all nodes of the graph"""
degree = dict(G.degree())
den = float(sum(degree.values()))
return {node: degree[node] / den for node in G.nodes()}
# input parameters
if n < 1 or m < 1 or m0 < 1:
raise ValueError('n, m and m0 must be positive integers')
if m >= m0:
raise ValueError('m must be <= m0')
if n < m0:
raise ValueError('n must be > m0')
if seed is not None:
random.seed(seed)
# Step 1: Add m0 nodes. These nodes are interconnected together
# because otherwise they will end up isolated at the end
G = Topology(nx.path_graph(m0))
G.name = "ba_topology(%d,%d,%d)" % (n, m, m0)
G.graph['type'] = 'ba'
# Step 2: Add one node and connect it with m links
while G.number_of_nodes() < n:
pi = calc_pi(G)
u = G.number_of_nodes()
G.add_node(u)
new_links = 0
while new_links < m:
v = random_from_pdf(pi)
if not G.has_edge(u, v):
G.add_edge(u, v)
new_links += 1
return G
# This is the extended BA model, with rewiring and add
def extended_barabasi_albert_topology(n, m, m0, p, q, seed=None):
r"""
Return a random topology using the extended Barabasi-Albert preferential
attachment model.
Differently from the original Barabasi-Albert model, this model takes into
account the presence of local events, such as the addition of new links or
the rewiring of existing links.
More precisely, the Barabasi-Albert topology is built as follows. First, a
topology with *m0* isolated nodes is created. Then, at each step:
with probability *p* add *m* new links between existing nodes, selected
with probability:
.. math::
\Pi(i) = \frac{deg(i) + 1}{\sum_{v \in V} (deg(v) + 1)}
with probability *q* rewire *m* links. Each link to be rewired is selected as
follows: a node i is randomly selected and a link is randomly removed from
it. The node i is then connected to a new node randomly selected with
probability :math:`\Pi(i)`,
with probability :math:`1-p-q` add a new node and attach it to m nodes of
the existing topology selected with probability :math:`\Pi(i)`
Repeat the previous step until the topology comprises n nodes in total.
Parameters
----------
n : int
Number of nodes
m : int
Number of edges to attach from a new node to existing nodes
m0 : int
Number of edges initially attached to the network
p : float
The probability that new links are added
q : float
The probability that existing links are rewired
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Topology
References
----------
.. [1] A. L. Barabasi and R. Albert "Topology of evolving networks: local
events and universality", Physical Review Letters 85(24), 2000.
"""
def calc_pi(G):
"""Calculate extended-BA Pi function for all nodes of the graph"""
degree = dict(G.degree())
den = float(sum(degree.values()) + G.number_of_nodes())
return {node: (degree[node] + 1) / den for node in G.nodes()}
# input parameters
if n < 1 or m < 1 or m0 < 1:
raise ValueError('n, m and m0 must be a positive integer')
if m >= m0:
raise ValueError('m must be <= m0')
if n < m0:
raise ValueError('n must be > m0')
if p > 1 or p < 0:
raise ValueError('p must be included between 0 and 1')
if q > 1 or q < 0:
raise ValueError('q must be included between 0 and 1')
if p + q > 1:
raise ValueError('p + q must be <= 1')
if seed is not None:
random.seed(seed)
G = Topology(type='extended_ba')
G.name = "ext_ba_topology(%d, %d, %d, %f, %f)" % (n, m, m0, p, q)
# Step 1: Add m0 isolated nodes
G.add_nodes_from(range(m0))
while G.number_of_nodes() < n:
pi = calc_pi(G)
r = random.random()
if r <= p:
# add m new links with probability p
n_nodes = G.number_of_nodes()
n_edges = G.number_of_edges()
max_n_edges = (n_nodes * (n_nodes - 1)) / 2
if n_edges + m > max_n_edges: # cannot add m links
continue # rewire or add nodes
new_links = 0
while new_links < m:
u = random_from_pdf(pi)
v = random_from_pdf(pi)
if u is not v and not G.has_edge(u, v):
G.add_edge(u, v)
new_links += 1
elif r > p and r <= p + q:
# rewire m links with probability q
rewired_links = 0
while rewired_links < m:
i = random.choice(list(G.nodes())) # pick up node randomly (uniform)
if len(G.adj[i]) is 0: # if i has no edges, I cannot rewire
break
j = random.choice(list(G.adj[i].keys())) # node to be disconnected
k = random_from_pdf(pi) # new node to be connected
if i is not k and j is not k and not G.has_edge(i, k):
G.remove_edge(i, j)
G.add_edge(i, k)
rewired_links += 1
else:
# add a new node with probability 1 - p - q
new_node = G.number_of_nodes()
G.add_node(new_node)
new_links = 0
while new_links < m:
existing_node = random_from_pdf(pi)
if not G.has_edge(new_node, existing_node):
G.add_edge(new_node, existing_node)
new_links += 1
return G
def glp_topology(n, m, m0, p, beta, seed=None):
r"""
Return a random topology using the Generalized Linear Preference (GLP)
preferential attachment model.
It differs from the extended Barabasi-Albert model in that there is link
rewiring and a beta parameter is introduced to fine-tune preferential
attachment.
More precisely, the GLP topology is built as follows. First, a
line topology with *m0* nodes is created. Then, at each step:
with probability *p*, add *m* new links between existing nodes, selected
with probability:
.. math::
\Pi(i) = \frac{deg(i) - \beta 1}{\sum_{v \in V} (deg(v) - \beta)}
with probability :math:`1-p`, add a new node and attach it to m nodes of
the existing topology selected with probability :math:`\Pi(i)`
Repeat the previous step until the topology comprises n nodes in total.
Parameters
----------
n : int
Number of nodes
m : int
Number of edges to attach from a new node to existing nodes
m0 : int
Number of edges initially attached to the network
p : float
The probability that new links are added
beta : float
Parameter to fine-tune preferntial attachment: beta < 1
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Topology
References
----------
.. [1] T. Bu and D. Towsey "On distinguishing between Internet power law
topology generators", Proceeding od the 21st IEEE INFOCOM conference.
IEEE, volume 2, pages 638-647, 2002.
"""
def calc_pi(G, beta):
"""Calculate GLP Pi function for all nodes of the graph"""
# validate input parameter
if beta >= 1:
raise ValueError('beta must be < 1')
degree = dict(G.degree())
den = float(sum(degree.values()) - (G.number_of_nodes() * beta))
return {node: (degree[node] - beta) / den for node in G.nodes()}
def add_m_links(G, pi):
"""Add m links between existing nodes to the graph"""
n_nodes = G.number_of_nodes()
n_edges = G.number_of_edges()
max_n_edges = (n_nodes * (n_nodes - 1)) / 2
if n_edges + m > max_n_edges: # cannot add m links
add_node(G, pi) # add a new node instead
# return in any case because before doing another operation
# (add node or links) we need to recalculate pi
return
new_links = 0
while new_links < m:
u = random_from_pdf(pi)
v = random_from_pdf(pi)
if u != v and not G.has_edge(u, v):
G.add_edge(u, v)
new_links += 1
def add_node(G, pi):
"""Add one node to the graph and connect it to m existing nodes"""
new_node = G.number_of_nodes()
G.add_node(new_node)
new_links = 0
while new_links < m:
existing_node = random_from_pdf(pi)
if not G.has_edge(new_node, existing_node):
G.add_edge(new_node, existing_node)
new_links += 1
# validate input parameters
if n < 1 or m < 1 or m0 < 1:
raise ValueError('n, m and m0 must be a positive integers')
if beta >= 1:
raise ValueError('beta must be < 1')
if m >= m0:
raise ValueError('m must be <= m0')
if p > 1 or p < 0:
raise ValueError('p must be included between 0 and 1')
if seed is not None:
random.seed(seed)
# step 1: create a graph of m0 nodes connected by n-1 edges
G = Topology(nx.path_graph(m0))
G.graph['type'] = 'glp'
G.name = "glp_topology(%d, %d, %d, %f, %f)" % (n, m, m0, p, beta)
# Add nodes and links now
while G.number_of_nodes() < n:
pi = calc_pi(G, beta)
if random.random() < p:
# add m new links with probability p
add_m_links(G, pi)
else:
# add a new node with m new links with probability 1 - p
add_node(G, pi)
return G
| [
"l.saino@ucl.ac.uk"
] | l.saino@ucl.ac.uk |
1195a89a339b0dc730a05c8cde236cfa2de98c2a | 9af6ceb19c4288d989c7c73e7bcd31104a4dfd1b | /test.py | ea1d3166c28ece50f99365c383046d54e054df32 | [] | no_license | shaoboly/cluster_test | 80d1a8b8aadfd7ed0792db889baef591b444f18e | 9b4f27c5b152e1de94543c390dbb2d7a495db292 | refs/heads/master | 2020-04-06T07:12:04.337236 | 2016-09-02T16:43:26 | 2016-09-02T16:43:26 | 65,451,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | import numpy as np
ma = np.ones((3, 2))
#print ma
add = np.array([0,1])
data = np.vstack((ma,add))
#print ma
list = [[0]*10]*10
#list = [[0 for y in range(5)] for x in range(10)]
list[0][0] = 1
print list | [
"287743414@qq.com"
] | 287743414@qq.com |
ffdd682b1dce0eb902dcdae7bb9cb5c74a4b0a53 | 3cf50bf21226e3cde0b224a178961237c2971a3b | /scripts/runsim.py | c7d5ccd4c3ee4a809c06bcbcdec00aa715541375 | [] | no_license | cutefish/geods-analyze | e04ada343a93a9491ae07309aebc058e14acdf4b | 4d8f881f64d4a83c280f0cb35f10d8bee5a660d8 | refs/heads/master | 2020-12-24T13:28:35.222423 | 2014-10-19T19:47:51 | 2014-10-19T19:47:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | import os
import subprocess
import sys
import time
import datetime
def run(indir, r, numProcs=1):
commands = []
lb, ub = parseRange(r)
for cfgdir in os.listdir(indir):
if not os.path.isdir('%s/%s'%(indir, cfgdir)):
continue
try:
index = int(cfgdir)
except:
continue
if lb > index or index > ub:
continue
rundir = '%s/%s'%(indir, cfgdir)
command = 'python%s sim.py %s --verify' %(
os.environ['PYTHON_SIM_VERSION'], rundir)
commands.append((command, '%s/stdout'%rundir, '%s/stderr'%rundir))
runCommands(commands, numProcs)
#write a success mark
fh = open('%s/__success__'%indir, 'w')
fh.write(str(datetime.datetime.now()))
fh.write('\n')
fh.close()
def parseRange(r):
lb, ub = r.split('-')
return int(lb), int(ub)
def runCommands(commands, numProcs):
procs = []
while True:
#first remove processes that are finished
for proc, fh in list(procs):
if proc.poll() is not None:
procs.remove((proc, fh))
fh.close()
#check terminate condition
if len(procs) == 0 and len(commands) == 0:
break
#try to launch new commands if we can
if len(procs) < numProcs and len(commands) != 0:
for i in range(len(procs), numProcs):
command, outfile, errfile = commands.pop()
print command, '1>', outfile, '2>', errfile
outfh = open(outfile, 'w')
errfh = open(errfile, 'w')
proc = subprocess.Popen(
command.split(' '), stdout=outfh, stderr=errfh)
procs.append((proc, outfh))
if len(commands) == 0:
break
time.sleep(10)
def main():
if len(sys.argv) < 3:
print 'run.py <in dir> <range> [num procs]'
sys.exit(-1)
if len(sys.argv) == 3:
run(sys.argv[1], sys.argv[2])
else:
run(sys.argv[1], sys.argv[2], int(sys.argv[3]))
if __name__ == '__main__':
main()
| [
"cutefish.yx@gmail.com"
] | cutefish.yx@gmail.com |
97ce23377d03ef4d1bb7883b3f75f781278d8ef8 | c07db2fc2a4918037b3fb8810cc78f6f07588f6f | /PTTLibrary/Version.py | 1c36392d32f0668c77c831b3f1f6c1dde1d2a96d | [
"MIT"
] | permissive | shihyu/PTTLibrary | 4d0d7b01990ed0de02883a3fab54c09b21cde242 | b40f0d45e1ca2fc8e072473ece63a8628ac7168e | refs/heads/master | 2020-03-27T11:23:53.891419 | 2018-08-24T09:44:25 | 2018-08-24T09:44:25 | 146,483,955 | 1 | 0 | MIT | 2018-08-28T17:39:47 | 2018-08-28T17:39:47 | null | UTF-8 | Python | false | false | 13 | py | Ver = '0.7.5' | [
"truth@changingtec.com"
] | truth@changingtec.com |
aba8fcd3ea58d7fe66b3bbe8099f8f60d5f4097d | b64fcb9da80d12c52bd24a7a1b046ed9952b0026 | /client_sdk_python/providers/eth_tester/main.py | 68fdf1d3a68dcfcbb67e83434e4836cccf5581b6 | [
"MIT"
] | permissive | PlatONnetwork/client-sdk-python | e59f44a77690806c8763ed6db938ed8447d42417 | 94ad57bb34b5ee7bb314ac858071686382c55402 | refs/heads/master | 2022-07-09T08:49:07.312759 | 2021-12-24T08:15:46 | 2021-12-24T08:15:46 | 173,032,954 | 7 | 16 | MIT | 2022-08-31T02:19:42 | 2019-02-28T03:18:03 | Python | UTF-8 | Python | false | false | 1,773 | py | from client_sdk_python.providers import (
BaseProvider,
)
from .middleware import (
default_transaction_fields_middleware,
ethereum_tester_fixture_middleware,
ethereum_tester_middleware,
)
class EthereumTesterProvider(BaseProvider):
middlewares = [
default_transaction_fields_middleware,
ethereum_tester_fixture_middleware,
ethereum_tester_middleware,
]
ethereum_tester = None
api_endpoints = None
def __init__(self, ethereum_tester=None, api_endpoints=None):
if ethereum_tester is None:
# do not import eth_tester until runtime, it is not a default dependency
from eth_tester import EthereumTester
self.ethereum_tester = EthereumTester()
else:
self.ethereum_tester = ethereum_tester
if api_endpoints is None:
# do not import eth_tester derivatives until runtime, it is not a default dependency
from .defaults import API_ENDPOINTS
self.api_endpoints = API_ENDPOINTS
else:
self.api_endpoints = api_endpoints
def make_request(self, method, params):
namespace, _, endpoint = method.partition('_')
try:
delegator = self.api_endpoints[namespace][endpoint]
except KeyError:
return {
"error": "Unknown RPC Endpoint: {0}".format(method),
}
try:
response = delegator(self.ethereum_tester, params)
except NotImplementedError:
return {
"error": "RPC Endpoint has not been implemented: {0}".format(method),
}
else:
return {
'result': response,
}
def isConnected(self):
return True
| [
"hietel366435@163.com"
] | hietel366435@163.com |
0915102cfa0343f989eef246184cd916f8cc46c4 | 4bdbec7ad33b31c392b9d1f88ddf84e4b9230467 | /cross_origin_test/cross_origin_test/wsgi.py | 5bf61a3cc71d9dc0d96e87531d460711a5070d70 | [
"BSD-2-Clause",
"MIT"
] | permissive | mohawkhq/django-cross-origin | 4aa775b15612e505404a9eb6cfe24a568561d265 | f73f5c9a49d4044c34e443153c071b6bb0acda31 | refs/heads/master | 2020-06-08T20:13:02.690261 | 2013-11-19T15:33:34 | 2013-11-19T15:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for cross_origin_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cross_origin_test.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"dave@etianen.com"
] | dave@etianen.com |
207bca020ba796da46c19aed0c9f68d207511a4c | a124d710c0fab2ea5868baa04933c6315590ed11 | /HW_OceanOptics/OceanOpticsStepperMotor_Scan.py | d3f2b5a193620b7eb16a4062aca52845f4577ed2 | [
"MIT"
] | permissive | GingerLabUW/Microscope_App | 346d0b0882de3182af996d36d675f5e64ac2da3a | d7a52d23aa58daa6fbdb68473d2ad45eff6c45d0 | refs/heads/master | 2023-06-20T12:32:45.487992 | 2020-10-02T05:04:50 | 2020-10-02T05:04:50 | 192,818,664 | 3 | 1 | MIT | 2020-10-02T05:04:51 | 2019-06-19T23:48:21 | Python | UTF-8 | Python | false | false | 7,889 | py | from HW_StepperMotor.StepperMotor_Scan import StepperMotor_Scan
from ScopeFoundry import Measurement
from ScopeFoundry.helper_funcs import sibling_path, load_qt_ui_file
import pyqtgraph as pg
import numpy as np
import time
import pickle
import os.path
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.Point import Point
import customplotting.mscope as cpm
class OceanOpticsStepperMotor_Scan(StepperMotor_Scan):
name = "OceanOpticsStepperMotor_Scan"
def setup(self):
StepperMotor_Scan.setup(self)
self.settings.New("intg_time",dtype=int, unit='ms', initial=3, vmin=3)
self.settings.New('correct_dark_counts', dtype=bool, initial=True)
self.settings.New("scans_to_avg", dtype=int, initial=1, vmin=1)
def setup_figure(self):
StepperMotor_Scan.setup_figure(self)
#setup ui for ocean optics specific settings
spec_hw = self.app.hardware['oceanoptics']
details_groupBox = self.set_details_widget(widget = self.settings.New_UI(include=["intg_time", "correct_dark_counts", "scans_to_avg"]))
widgets = details_groupBox.findChildren(QtGui.QWidget)
intg_time_spinBox = widgets[1]
correct_dark_counts_checkBox = widgets[4]
#scans_to_avg_spinBox = widgets[6]
#connect settings to ui
spec_hw.settings.intg_time.connect_to_widget(intg_time_spinBox)
spec_hw.settings.correct_dark_counts.connect_to_widget(correct_dark_counts_checkBox)
intg_time_spinBox.valueChanged.connect(self.update_estimated_scan_time)
#save data buttons
self.ui.save_image_pushButton.clicked.connect(self.save_intensities_image)
self.ui.save_array_pushButton.clicked.connect(self.save_intensities_data)
#spectrometer plot
self.graph_layout=pg.GraphicsLayoutWidget()
self.plot = self.graph_layout.addPlot(title="Spectrometer Live Reading")
self.plot.setLabel('left', 'Intensity', unit='a.u.')
self.plot.setLabel('bottom', 'Wavelength', unit='nm')
# # Create PlotDataItem object ( a scatter plot on the axes )
self.optimize_plot_line = self.plot.plot([0])
#setup imageview
self.imv = pg.ImageView()
self.imv.getView().setAspectLocked(lock=False, ratio=1)
self.imv.getView().setMouseEnabled(x=True, y=True)
self.imv.getView().invertY(False)
roi_plot = self.imv.getRoiPlot().getPlotItem()
roi_plot.getAxis("bottom").setLabel(text="Wavelength (nm)")
def update_estimated_scan_time(self):
try:
self.overhead = self.x_range * self.y_range * .058 #determined by running scans and timing
scan_time = self.x_range * self.y_range * self.settings["intg_time"] * 1e-3 + self.overhead #s
self.ui.estimated_scan_time_label.setText("Estimated scan time: " + "%.2f" % scan_time + "s")
except:
pass
def update_display(self):
StepperMotor_Scan.update_display(self)
if hasattr(self, 'spec') and hasattr(self, 'pi_device') and hasattr(self, 'y'): #first, check if setup has happened
if not self.interrupt_measurement_called:
seconds_left = ((self.x_range * self.y_range) - self.pixels_scanned) * self.settings["intg_time"] * 1e-3 + self.overhead
self.ui.estimated_time_label.setText("Estimated time remaining: " + "%.2f" % seconds_left + "s")
#plot wavelengths vs intensity
self.plot.plot(self.spec.wavelengths(), self.y, pen='r', clear=True) #plot wavelength vs intensity
self.graph_layout.show()
self.graph_layout.window().setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False) #disable closing image view window
self.img_item.setImage(self.sum_intensities_image_map) #update stage image
#update imageview
self.imv.setImage(img=self.spectrum_image_map, autoRange=False, autoLevels=True, xvals=self.spec.wavelengths()) #adjust roi plot x axis
self.imv.show()
self.imv.window().setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False) #disable closing image view window
#update progress bar
progress = 100 * ((self.pixels_scanned+1)/np.abs(self.x_range*self.y_range))
self.ui.progressBar.setValue(progress)
self.set_progress(progress)
pg.QtGui.QApplication.processEvents()
def pre_run(self):
try:
StepperMotor_Scan.pre_run(self) #setup scan parameters
self.spec = self.spec_hw.spec
self.check_filename("_raw_PL_spectra_data.pkl")
# Define empty array for saving intensities
self.data_array = np.zeros(shape=(self.x_range*self.y_range,2048))
# Define empty array for image map
self.sum_intensities_image_map = np.zeros((self.x_range, self.y_range), dtype=float) #store sum of intensities for each pixel
self.spectrum_image_map = np.zeros((2048, self.x_range, self.y_range), dtype=float) #Store spectrum for each pixel
scan_time = self.x_range * self.y_range * self.settings["intg_time"] * 1e-3 #s
self.ui.estimated_scan_time_label.setText("Estimated scan time: " + "%.2f" % scan_time + "s")
except:
pass
def scan_measure(self):
"""
Data collection for each pixel.
"""
self._read_spectrometer()
self.data_array[self.pixels_scanned,:] = self.y
self.sum_intensities_image_map[self.index_x, self.index_y] = self.y.sum()
self.spectrum_image_map[:, self.index_x, self.index_y] = self.y
def post_run(self):
"""
Export data.
"""
StepperMotor_Scan.post_run(self)
save_dict = {"Wavelengths": self.spec.wavelengths(), "Intensities": self.data_array,
"Scan Parameters":{"X scan start (um)": self.x_start, "Y scan start (um)": self.y_start,
"X scan size (um)": self.x_scan_size, "Y scan size (um)": self.y_scan_size,
"X step size (um)": self.x_step, "Y step size (um)": self.y_step},
"OceanOptics Parameters":{"Integration Time (ms)": self.spec_hw.settings['intg_time'],
"Scans Averages": self.spec_measure.settings['scans_to_avg'],
"Correct Dark Counts": self.spec_hw.settings['correct_dark_counts']}
}
pickle.dump(save_dict, open(self.app.settings['save_dir']+"/"+self.app.settings['sample']+"_raw_PL_spectra_data.pkl", "wb"))
def _read_spectrometer(self):
'''
Read spectrometer according to settings and update self.y (intensities array)
'''
if hasattr(self, 'spec'):
intg_time_ms = self.spec_hw.settings['intg_time']
self.spec.integration_time_micros(intg_time_ms*1e3) #seabreeze error checking
scans_to_avg = self.spec_measure.settings['scans_to_avg']
Int_array = np.zeros(shape=(2048,scans_to_avg))
for i in range(scans_to_avg): #software average
data = self.spec.spectrum(correct_dark_counts=self.spec_hw.settings['correct_dark_counts'])#acquire wavelengths and intensities from spec
Int_array[:,i] = data[1]
self.y = np.mean(Int_array, axis=-1)
def save_intensities_data(self):
transposed = np.transpose(self.sum_intensities_image_map) #transpose so data visually makes sense
StepperMotor_Scan.save_intensities_data(self, transposed, 'oo')
def save_intensities_image(self):
StepperMotor_Scan.save_intensities_image(self, self.sum_intensities_image_map, 'oo') | [
"lindat18@lakesideschool.org"
] | lindat18@lakesideschool.org |
0dc5599022c2a784725abe29a63cdbbb2a1cc756 | 2bf4b4f8a8c8c2d1278e2e7cc91eb75076001748 | /core/templatetags/functions.py | f8c93b436eb6d358383de5bf4ddb6ac77aa5f592 | [] | no_license | diegolirio/bolao | 7dd8ef95363c6bf13e90b2a28751a35798c33821 | 1a0c0a45d99256f84ad3af9c1ebee19ef93d4b09 | refs/heads/master | 2020-04-19T01:55:46.212385 | 2013-10-29T19:19:22 | 2013-10-29T19:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | __author__ = 'diegolirio'
from django import template
from core.models import *
from core.const import *
register = template.Library()
@register.filter('hello')
def hello(obj):
return 'Ola ' + obj
@register.filter('cut')
def cut(value, arg):
"""Removes all values of arg from the given string"""
return value.replace(arg, '')
@register.filter('get_patrocinador_principal_display')
def get_patrocinador_principal_display(competicao):
try:
patrocinador = Competicao_Patrocinadores.objects.filter(competicao=competicao, principal=True)[0:1].get()
except:
return ''
return patrocinador.patrocinador.nome_visual
@register.filter('get_patrocinador_principal_link')
def get_patrocinador_principal_link(competicao):
try:
patrocinador = Competicao_Patrocinadores.objects.filter(competicao=competicao, principal=True)[0:1].get()
except:
return ''
return patrocinador.patrocinador.url_site
@register.filter('get_patrocinador_principal_img')
def get_patrocinador_principal_img(competicao):
try:
patrocinador = Competicao_Patrocinadores.objects.filter(competicao=competicao, principal=True)[0:1].get()
except:
return ''
return patrocinador.patrocinador.image_aside
@register.filter('get_comentarios_atividade')
def get_comentarios_atividade(atividade):
return ComentarioAtividade.objects.filter(atividade=atividade)
@register.filter('get_qtde_comentarios')
def get_qtde_comentarios(atividade):
return ComentarioAtividade.objects.filter(atividade=atividade).count()
@register.filter('get_aproveitamento')
def get_aproveitamento(inscricao):
grupos = Grupo.objects.filter(campeonato=inscricao.competicao.campeonato)
qtde = 0
for g in grupos:
jgs_aux = Jogo.objects.filter(grupo=g)
for j in jgs_aux:
if j.status.codigo != 'E':
qtde = qtde + 1
if qtde > 0:
pontuacao_100_ = PONTOS_PLACAR * qtde
aproveitamento = inscricao.pontos * 100 / pontuacao_100_
else:
aproveitamento = 100
return aproveitamento
| [
"diegolirio.dl@gmail.com"
] | diegolirio.dl@gmail.com |
da52392e3afb25a84af8667209531b96235635de | a006f079586fc0748e0d4ad7f0c9c773a3852d3e | /tabulares/xlsx_parser.py | 7ab0a7278aaa8150a12be5d5cd027ad91678bcc5 | [] | no_license | GilbertoJun/raspagem_de_dados_nao_html | 81f8ec343a1ed004498bd77747022947eac0f262 | 2acea5f748ebaf3c6ccec35195c2b939814c8ee3 | refs/heads/master | 2020-04-16T04:07:42.387961 | 2017-11-21T23:53:40 | 2017-11-21T23:53:40 | 165,256,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from openpyxl import load_workbook
doc = load_workbook('episodios.xlsx')
folhas = doc.get_sheet_names()
ep_folha = doc.get_sheet_by_name(folhas[0])
linhas = sum([[cedula for cedula in linha]for linha in ep_folha], [])
print(list(map(lambda x: x.value, linhas)))
| [
"igordiasth@gmail.com"
] | igordiasth@gmail.com |
be677cd1dacecd8fd029698436191ff354039c02 | 718fd4c9d5aa2233e9a7a3a91e20a1266a5c7ba4 | /Source_Final/Check_Drupal/Check_CHANGELOG.py | 7d9ed32e9aa7dbf8bb06c142383f0f4c92232edc | [] | no_license | opflep/Drupalgeddon-Toolkit | b44f6ea19bf70313a55df6550029aa9fc6d96611 | 32ae1bc68d8eca037345a5f589ac6fb68ba11f1b | refs/heads/master | 2020-04-28T21:06:01.758413 | 2019-05-07T17:51:40 | 2019-05-07T17:51:40 | 175,569,436 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | import time
import sys
import requests
import urllib3
import bb01_ultilities as util
from random import randint
from multiprocessing import Pool
# Start timer
start = time.time()
urllib3.disable_warnings()
# Get file input
file = open(sys.argv[1], 'r')
# Get file output
outputfile = sys.argv[2]
lines = file.readlines()
def checkVersion(url):
# Get host as each line of input file
host = "http://"+url.strip()
# Get random user agent and set to header
headers = util.genHeader()
try:
# Request to CHANGELOG.txt of host
r = requests.post(host+"/CHANGELOG.txt",
verify=False, headers=headers, timeout=1)
# Case status code != 200
if(r.status_code != 200):
# Request to CHANGELOG.txt of host
r = requests.post(
host+"/core/CHANGELOG.txt", verify=False, headers=headers, timeout=1)
# Get data
data = r.text
except Exception as e:
data = ""
# Case check drupal
if "Drupal 1.0.0, 2001-01-15" in data and "<!doctype html>" not in data and "<!DOCTYPE html>" not in data:
check = True
sline = 0
while check:
try:
# Get newest version of drupal
data = r.text.split('\n')[sline]
except Exception as e:
check = False
if "Drupal" in data and "xxxx" not in data and "content=" not in data:
check = False
else:
sline = sline+1
# Concate to result
result = host+" "+data
# Open output file
with open(outputfile, 'a') as f:
# Write the result to file
f.write("%s\n" % result.encode("utf-8"))
if __name__ == "__main__":
try:
p = Pool(processes=20)
result = p.map(checkVersion, lines)
except Exception as e:
print (e)
# Open output file and write the total time scanning
with open(outputfile, 'a') as f:
f.write("------| Total Time: %s |-------\n" % (time.time() - start))
| [
"zerozeralot@gmail.com"
] | zerozeralot@gmail.com |
87272a5edbdfced7a77242b9520a73fd7728d7ea | 8c3af6480c20bacb278b3a2d8949bf0439050e26 | /Modules/Module2Task.py | 062e8eabaa8a3ff3495e86525caf84bf804a8fa6 | [] | no_license | nhcha6/AutomationProject | f56615a7c3f7442fe810068887d5d113a15334ca | a468ca5e067ee078e98a9626e412af0b2d823a8d | refs/heads/master | 2023-01-09T18:37:06.248794 | 2020-11-12T02:04:49 | 2020-11-12T02:04:49 | 286,405,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,564 | py | from picamera import PiCamera
from picamera.array import PiRGBArray
import numpy as np
import cv2
import time
import RPi.GPIO as GPIO # Import module “RPi.GPIO” and gave it a nickname “GPIO”
import matplotlib.pyplot as plt
# setup GPIO modules
# Define button pin numbers
btn_pin_1 = 16
btn_pin_2 = 18
# declare global variables
# draw_erase = "draw", "erase", "offErase" or "offDraw"
draw_erase = "offErase"
color_button = "Blue"
drawing_color = (0, 0, 0)
thickness = 0
chosen_color = (255, 0, 0)
currentLine = []
# Suppress warnings
GPIO.setwarnings(False)
# set pin numbering to board
GPIO.setmode(GPIO.BOARD)
# set pins
GPIO.setup(btn_pin_1, GPIO.IN)
GPIO.setup(btn_pin_2, GPIO.IN)
# interrupt function of button 1. When pressed, the global variable draw_erase is changed to move the program into the
# next state. If "draw" or "erase" are the next state in the cycle, the appropriate thickness and color are set on the
# corresponding global variables. The currentLine variable is also reset.
def button_callback_1(channel):
global draw_erase
global drawing_color
global thickness
global currentLine
if draw_erase == "draw":
draw_erase = "offDraw"
elif draw_erase == "offDraw":
draw_erase = "erase"
currentLine = []
drawing_color = (0, 0, 0)
thickness = 15
elif draw_erase == "erase":
draw_erase = "offErase"
elif draw_erase == "offErase":
draw_erase = "draw"
currentLine = []
drawing_color = chosen_color
thickness = 5
# interrupt function of button 2. When pressed, the drawing colour global variable is changed to the next one in the
# cycle.
def button_callback_2(channel):
global chosen_color
global drawing_color
global color_button
global currentLine
if color_button == "Blue":
chosen_color = (0, 255, 0)
color_button = "Green"
elif color_button == "Green":
chosen_color = (0, 0, 255)
color_button = "Red"
elif color_button == "Red":
chosen_color = (255, 0, 0)
color_button = "Blue"
# if in draw mode currently, make sure the drawing color is updated with the new chosen color, and the
# currentLine variable is reset.
if draw_erase == "draw":
drawing_color = chosen_color
currentLine = []
# function runs drawing code.
def color_segmentation(range1, range2):
# checked for rising edge of either button (switch transitions closed to open)
GPIO.add_event_detect(btn_pin_1, GPIO.RISING, callback=button_callback_1, bouncetime=50)
GPIO.add_event_detect(btn_pin_2, GPIO.RISING, callback=button_callback_2, bouncetime=50)
# initialise object
camera = PiCamera()
# configure camera setting
camera.resolution = (640, 480)
camera.framerate = 32
# sleep and update settings
time.sleep(2)
camera.awb_mode = 'off'
camera.awb_gains = 1.3
# initialise the picture arrage with the corresponding size
rawCapture = PiRGBArray(camera, size=(640, 480))
# create an all black image to be the base for the drawing image.
drawing = np.zeros((480, 640, 3), np.uint8)
# continuously capture images in for loop.
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# extract image, convert to hsv and then apply range to get mask.
image = frame.array
hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
maskFinger = cv2.inRange(hsv_image, range1, range2)
#masked_image = cv2.bitwise_and(image, image, mask=maskFinger)
# create mask of drawn image, which has been added to in previous loops.
drawing_gray = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(drawing_gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# add the drawing image to the captured image.
image_masked = cv2.bitwise_and(image, image, mask = mask_inv)
drawn_image = cv2.add(image_masked, drawing)
# if in draw mode or erase mode, capture the centre of mass of the coloured object, display it
# on the screen as a circle, and then add the point to the line currently being drawn.
if draw_erase == "draw" or draw_erase == "erase":
#if previous != draw_erase:
#currentLine = []
# calc moments
try:
M = cv2.moments(maskFinger)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
currentLine.append([cX, cY])
cv2.circle(drawn_image, (cX, cY), 5, (0, 0, 255), 4, 3)
except ZeroDivisionError:
pass
# draw the line on the black backed drawing image.
cv2.polylines(drawing, [np.array(currentLine)], isClosed=False, color = drawing_color, thickness=thickness)
# cv2.imshow('PP', masked_image)
cv2.imshow('PP', drawn_image)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cv2.destroyAllWindows()
# function for displaying the hsv values for the middle point of the frame.
def median_hsv():
# initialise object
camera = PiCamera()
# configure camera setting
camera.resolution = (640, 480)
camera.framerate = 32
# sleep and update settings
time.sleep(2)
camera.awb_mode = 'off'
camera.awb_gains = 1.3
# initialise the picture arrage with the corresponding size
rawCapture = PiRGBArray(camera, size=(640, 480))
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
cv2.circle(image, (320, 240), 5, (0, 0, 255), 4, 3)
cv2.imshow('PP', image)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
key = cv2.waitKey(1) & 0xFF
if key == ord('p'):
hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
print(hsv_image[240, 320])
if key == ord('q'):
break
# When everything done, release the capture
# cv2.destroyAllWindows()
# range calculated for blue lid using the median_hsv() function.
range_1 = (170, 110, 50)
range_2 = (180, 230, 150)
# call drawing function using the prescribed range.
color_segmentation(range_1, range_2)
#median_hsv()+ | [
"nhcha6@student.monash.edu"
] | nhcha6@student.monash.edu |
1c07e950336bf700663363367fa33ecf43c0f407 | 0cb1ff9d0be4387e33f1003ab5cc72bab0345e7a | /wildcard/dashboards/settings/password/tests.py | 3372ec782591fc679b4e3a892d89731e3b8335cc | [
"Apache-2.0"
] | permissive | kickstandproject/wildcard | 65995fb0090c4cfcad34f8373cfc912199ecf5da | 0ef2a15d8ac6b1d37db964d0baa7e40f9f771bc9 | refs/heads/master | 2020-05-17T00:41:09.908059 | 2015-01-27T20:25:33 | 2015-01-28T03:30:22 | 14,288,349 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,365 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from wildcard import api
from wildcard.test import helpers as test
# TODO(mrunge): remove, when keystone v3 supports
# change_own_password, incl. password validation
kver = api.keystone.VERSIONS.active
if kver == 2:
INDEX_URL = reverse('horizon:settings:password:index')
class ChangePasswordTests(test.TestCase):
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd',).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData)
self.assertNoFormErrors(res)
def test_change_validation_passwords_not_matching(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
formData = {'method': 'PasswordForm',
'current_password': 'currpasswd',
'new_password': 'testpassword',
'confirm_password': 'doesnotmatch'}
res = self.client.post(INDEX_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password_shows_message_on_login_page(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd').AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData, follow=True)
info_msg = "Password changed. Please log in again to continue."
self.assertContains(res, info_msg)
def test_on_keystone_v3_disabled(self):
try:
reverse('horizon:settings:password:index')
except NoReverseMatch:
pass
| [
"paul.belanger@polybeacon.com"
] | paul.belanger@polybeacon.com |
aa17155893cfb2af9988c6c15cee4adfd0f2afa4 | 5963670e6837ac26826379c4d0589181af8d5620 | /apt_dataset.py | 645eccf1eed031f3f7a9c9eb7806d67bb9080b42 | [] | no_license | lychrel/caps-net | 2ab3c40a7e6b9803e9b995a48c29eae073d9b0fe | bae6f6322b2080f6ab7d6f75e9fd8bde7d2e06fb | refs/heads/master | 2020-03-15T15:12:07.757780 | 2018-05-05T02:37:18 | 2018-05-05T02:37:18 | 132,206,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import h5py
import cv2
from glob import glob
X_data = []
d=0
for fn in glob('data/*.jpg'):
im = cv2.imread(fn)
new_im = cv2.resize(im, (32, 32))
cv2.imwrite('resized/im_%d.jpg'%d, new_im)
imag = cv2.imread ('resized/im_%d.jpg'%d)
X_data.append(imag)
d = d + 1
y_data = np.zeros((6,10))
y_data[0][1] = 1
y_data[1][2] = 1
y_data[2][7] = 1
y_data[3][3] = 1
y_data[4][2] = 1
y_data[5][3] = 1
print('Apt_data shape:', np.array(X_data).shape)
print('Label shape: ', y_data.shape)
def rgb2gray(images):
"""Convert images from rbg to grayscale
"""
greyscale = np.dot(images, [0.2989, 0.5870, 0.1140])
return np.expand_dims(greyscale, axis=3)
# Transform the images to greyscale
X_u = rgb2gray(X_data).astype(np.float32)
# Create file
h5f = h5py.File('data/apt_num.h5', 'w')
# Store the datasets
h5f.create_dataset('apt_num_dataset', data=X_u)
h5f.create_dataset('apt_num_labels', data=y_data)
# Close the file
h5f.close()
| [
"jacklynchtds@gmail.com"
] | jacklynchtds@gmail.com |
2892ca34dda7c6bac350599fac9f051e71e64ce2 | f0c6b43e325064511c4e2d7ce9c59e88a12d81d5 | /Assignment/DataTypes/problem10.py | 0565ed8531943f1e8764d0ac461c28ed26bea342 | [] | no_license | kendraregmi/Assignment1 | bda8402fa216bf54789c4d3b5092a5540d4ee68d | 83a8365e508f5b83cee71fc14155b7838103b3ba | refs/heads/main | 2023-03-26T17:42:54.255731 | 2021-03-08T07:29:04 | 2021-03-08T07:29:04 | 344,406,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # 10. Write a Python program to remove the characters which have odd index
# values of a given string.
my_string= "Kathmandu"
result=""
for i in range(len(my_string)):
if i%2==0:
result= result+my_string[i]
print(result)
| [
"noreply@github.com"
] | kendraregmi.noreply@github.com |
8c4de2642d9752e64cfff1c79de8129758f696fc | f5d0be87bad113cd3ec0dabc4db0683442c794bf | /alphastarmini/core/arch/spatial_encoder.py | 96cbd701618415f6f2794855072f3791699f3169 | [
"Apache-2.0"
] | permissive | ZHQ-air/mini-AlphaStar | 8aa22242334bd397fa398f2b865d2fc20fb1cab6 | 6039fd105bd263ee1f7c3276fea7fe7b660e0701 | refs/heads/main | 2023-07-03T16:10:13.712321 | 2021-08-17T02:59:56 | 2021-08-17T02:59:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,731 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
" Spatial Encoder."
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from alphastarmini.core.arch.entity_encoder import EntityEncoder
from alphastarmini.core.arch.entity_encoder import Entity
from alphastarmini.lib import utils as L
from alphastarmini.lib.hyper_parameters import Arch_Hyper_Parameters as AHP
from alphastarmini.lib.hyper_parameters import MiniStar_Arch_Hyper_Parameters as MAHP
__author__ = "Ruo-Ze Liu"
debug = False
class SpatialEncoder(nn.Module):
'''
Inputs: map, entity_embeddings
Outputs:
embedded_spatial - A 1D tensor of the embedded map
map_skip - Tensors of the outputs of intermediate computations
'''
def __init__(self, n_resblocks=4, original_32=AHP.original_32,
original_64=AHP.original_64,
original_128=AHP.original_128,
original_256=AHP.original_256,
original_512=AHP.original_512):
super().__init__()
self.inplanes = AHP.map_channels
self.project = nn.Conv2d(self.inplanes, original_32, kernel_size=1, stride=1,
padding=0, bias=True)
# ds means downsampling
self.ds_1 = nn.Conv2d(original_32, original_64, kernel_size=4, stride=2,
padding=1, bias=True)
self.ds_2 = nn.Conv2d(original_64, original_128, kernel_size=4, stride=2,
padding=1, bias=True)
self.ds_3 = nn.Conv2d(original_128, original_128, kernel_size=4, stride=2,
padding=1, bias=True)
self.resblock_stack = nn.ModuleList([
ResBlock(inplanes=original_128, planes=original_128, stride=1, downsample=None)
for _ in range(n_resblocks)])
if AHP == MAHP:
# note: in mAS, we replace 128x128 to 64x64, and the result 16x16 also to 8x8
self.fc = nn.Linear(8 * 8 * original_128, original_256)
else:
self.fc = nn.Linear(16 * 16 * original_128, original_256) # position-wise
self.conv1 = nn.Conv1d(original_256, original_32, kernel_size=1, stride=1,
padding=0, bias=False)
self.map_width = AHP.minimap_size
def preprocess(self, obs, entity_embeddings):
map_data = get_map_data(obs)
return map_data
def scatter(self, entity_embeddings, entity_x_y):
# `entity_embeddings` are embedded through a size 32 1D convolution, followed by a ReLU,
print("entity_embeddings.shape:", entity_embeddings.shape) if debug else None
reduced_entity_embeddings = F.relu(self.conv1(entity_embeddings.transpose(1, 2))).transpose(1, 2)
print("reduced_entity_embeddings.shape:", reduced_entity_embeddings.shape) if debug else None
# then scattered into a map layer so that the size 32 vector at a specific
# location corresponds to the units placed there.
def bits2value(bits):
# change from the bits to dec values.
l = len(bits)
v = 0
g = 1
for i in range(l - 1, -1, -1):
v += bits[i] * g
g *= 2
return v
# shape [batch_size x entity_size x embedding_size]
batch_size = reduced_entity_embeddings.shape[0]
entity_size = reduced_entity_embeddings.shape[1]
device = next(self.parameters()).device
scatter_map = torch.zeros(batch_size, AHP.original_32, self.map_width, self.map_width, device=device)
print("scatter_map.shape:", scatter_map.shape) if debug else None
for i in range(batch_size):
for j in range(entity_size):
# can not be masked entity
if entity_x_y[i, j, 0] != -1e9:
x = entity_x_y[i, j, :8]
y = entity_x_y[i, j, 8:]
x = bits2value(x)
y = bits2value(y)
print('x', x) if debug else None
print('y', y) if debug else None
# note, we reduce 128 to 64, so the x and y should also be
# 128 is half of 256, 64 is half of 128, so we divide by 4
x = int(x / 4)
y = int(y / 4)
scatter_map[i, :, y, x] += reduced_entity_embeddings[i, j, :]
#print("scatter_map:", scatter_map[0, :, 23, 19]) if 1 else None
return scatter_map
def forward(self, x, entity_embeddings, entity_x_y):
scatter_map = self.scatter(entity_embeddings, entity_x_y)
x = torch.cat([scatter_map, x], dim=1)
# After preprocessing, the planes are concatenated, projected to 32 channels
# by a 2D convolution with kernel size 1, passed through a ReLU
x = F.relu(self.project(x))
# then downsampled from 128x128 to 16x16 through 3 2D convolutions and ReLUs
# with channel size 64, 128, and 128 respectively.
# The kernel size for those 3 downsampling convolutions is 4, and the stride is 2.
# note: in mAS, we replace 128x128 to 64x64, and the result 16x16 also to 8x8
# note: here we should add a relu after each conv2d
x = F.relu(self.ds_1(x))
x = F.relu(self.ds_2(x))
x = F.relu(self.ds_3(x))
# 4 ResBlocks with 128 channels and kernel size 3 and applied to the downsampled map,
# with the skip connections placed into `map_skip`.
map_skip = x
for resblock in self.resblock_stack:
x = resblock(x)
# note if we add the follow line, it will output "can not comput gradient error"
# map_skip += x
# so we try to change to the follow line, which will not make a in-place operation
map_skip = map_skip + x
x = x.reshape(x.shape[0], -1)
# The ResBlock output is embedded into a 1D tensor of size 256 by a linear layer
# and a ReLU, which becomes `embedded_spatial`.
x = self.fc(x)
embedded_spatial = F.relu(x)
return map_skip, embedded_spatial
def get_map_data(obs, map_width=AHP.minimap_size, verbose=False):
'''
TODO: camera: One-hot with maximum 2 of whether a location is within the camera, this refers to mimimap
TODO: scattered_entities: 32 float values from entity embeddings
default map_width is 128
'''
if "feature_minimap" in obs:
feature_minimap = obs["feature_minimap"]
else:
feature_minimap = obs
save_type = np.float32
# A: height_map: Float of (height_map / 255.0)
height_map = np.expand_dims(feature_minimap["height_map"].reshape(-1, map_width, map_width) / 255.0, -1).astype(save_type)
print('height_map:', height_map) if verbose else None
print('height_map.shape:', height_map.shape) if verbose else None
# A: visibility: One-hot with maximum 4
visibility = L.np_one_hot(feature_minimap["visibility_map"].reshape(-1, map_width, map_width), 4).astype(save_type)
print('visibility:', visibility) if verbose else None
print('visibility.shape:', visibility.shape) if verbose else None
# A: creep: One-hot with maximum 2
creep = L.np_one_hot(feature_minimap["creep"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('creep:', creep) if verbose else None
# A: entity_owners: One-hot with maximum 5
entity_owners = L.np_one_hot(feature_minimap["player_relative"].reshape(-1, map_width, map_width), 5).astype(save_type)
print('entity_owners:', entity_owners) if verbose else None
# the bottom 3 maps are missed in pysc1.2 and pysc2.0
# however, the 3 maps can be found on s2clientprotocol/spatial.proto
# actually, the 3 maps can be found on pysc3.0
# A: alerts: One-hot with maximum 2
alerts = L.np_one_hot(feature_minimap["alerts"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('alerts:', alerts) if verbose else None
# A: pathable: One-hot with maximum 2
pathable = L.np_one_hot(feature_minimap["pathable"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('pathable:', pathable) if verbose else None
# A: buildable: One-hot with maximum 2
buildable = L.np_one_hot(feature_minimap["buildable"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('buildable:', buildable) if verbose else None
out_channels = 1 + 4 + 2 + 5 + 2 + 2 + 2
map_data = np.concatenate([height_map, visibility, creep, entity_owners,
alerts, pathable, buildable], axis=3)
map_data = np.transpose(map_data, [0, 3, 1, 2])
print('map_data.shape:', map_data.shape) if verbose else None
map_data = torch.tensor(map_data)
print('torch map_data.shape:', map_data.shape) if verbose else None
return map_data
class ResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
class GatedResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv1_mask = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv2_mask = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
x = F.relu(self.bn1(self.conv1(x) * self.sigmoid(self.conv1_mask(x))))
x = self.bn2(self.conv2(x) * self.sigmoid(self.conv2_mask(x)))
x += residual
x = F.relu(x)
return x
class ResBlockImproved(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlockImproved, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
'''From paper Identity Mappings in Deep Residual Networks'''
def forward(self, x):
residual = x
x = F.relu(self.bn1(x))
x = self.conv1(x)
x = F.relu(self.bn2(x))
x = self.conv2(x)
x = x + residual
return x
class ResBlock1D(nn.Module):
def __init__(self, inplanes, planes, seq_len, stride=1, downsample=None):
super(ResBlock1D, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.ln1 = nn.LayerNorm([planes, seq_len])
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.ln2 = nn.LayerNorm([planes, seq_len])
def forward(self, x):
residual = x
x = F.relu(self.ln1(x))
x = self.conv1(x)
x = F.relu(self.ln2(x))
x = self.conv2(x)
x = x + residual
return x
def test():
spatial_encoder = SpatialEncoder()
batch_size = 2
# dummy map list
map_list = []
map_data_1 = torch.zeros(batch_size, 1, AHP.minimap_size, AHP.minimap_size)
map_data_1_one_hot = L.to_one_hot(map_data_1, 2)
print('map_data_1_one_hot.shape:', map_data_1_one_hot.shape) if debug else None
map_list.append(map_data_1)
map_data_2 = torch.zeros(batch_size, 17, AHP.minimap_size, AHP.minimap_size)
map_list.append(map_data_2)
map_data = torch.cat(map_list, dim=1)
map_skip, embedded_spatial = spatial_encoder.forward(map_data)
print('map_skip:', map_skip) if debug else None
print('embedded_spatial:', embedded_spatial) if debug else None
print('map_skip.shape:', map_skip.shape) if debug else None
print('embedded_spatial.shape:', embedded_spatial.shape) if debug else None
if debug:
print("This is a test!")
if __name__ == '__main__':
test()
| [
"liuruoze@163.com"
] | liuruoze@163.com |
4e414b5883bac2396bd27f9c98ec8e8c1ef065f7 | 07e0d893b0380961febf6ba87a18a6dd0ee25d52 | /Resume/ResumeData/apps.py | 7693d62e7e489592d08994bef1e7637380879b34 | [] | no_license | skjalal123/Resume | 2480b09168a3b6360f057b12cffa48d671724940 | 7e74e3becddb3669e33f60669c49c18d25b20a2d | refs/heads/main | 2023-04-26T10:49:52.010518 | 2021-05-22T15:00:53 | 2021-05-22T15:00:53 | 369,829,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.apps import AppConfig
class ResumedataConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'ResumeData'
| [
"wasimakram24@live.com"
] | wasimakram24@live.com |
02c5557d76cc9ea35a84233bfabcb2655e99f072 | 4403dffe7ccfab872af99c596c908e64e62f77ee | /artvart.py | 894d9e0a3dc0df5983cf88de0d096f9f7243f0fd | [] | no_license | kuolius/TicTacToe-AI | 103d1edaff6e09e0d02226a499b6f6b6f89d1fbe | 530c8d0cab6368ab36b78fc110f8af542c761fc6 | refs/heads/master | 2021-01-17T15:05:57.140615 | 2016-10-24T15:18:05 | 2016-10-24T15:18:05 | 71,801,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,994 | py | import pickle
import random
import time
import memedit
class Win():
def __init__(self,n):
self.sp=[" " for i in range(n)]
def get_sp(self,coord):
return self.sp[coord]
def set_sp(self,coord):
self.sp[coord]="x"
def set_contr(self,coord):
self.sp[coord]="o"
class Game:
def __init__(self):
self.n=33
self.win=[[] for i in range(self.n)]
for i in range(self.n):
self.win[i]=Win(self.n)
self.gameWin=False
self.gameLost=False
self.oturn=True
self.xturn=False
self.f=open("mem.txt","rb")
self.mem=pickle.load(self.f)
self.f.close()
self.movm=[]
self.mov=[0,0]
self.omov=[0,0]
self.lbound=0
self.rbound=0
self.ubound=0
self.dbound=0
self.end=False
self.boundwidth=0
self.boundheight=0
def expand(self):
self.n+=2
ret=[[] for i in range(self.n)]
for i in range(self.n):
ret[i]=Win(self.n)
for i in range(1,self.n-1):
for j in range(1,self.n-1):
if self.win[i-1].get_sp(j-1)=="x":
ret[i].set_sp(j)
elif self.win[i-1].get_sp(j-1)=="o":
ret[i].set_contr(j)
self.win=ret
def o_turn(self):
win=self.win
gameWin=self.gameWin
gameLost=self.gameLost
oturn=self.oturn
n=self.n
pressure=False
#checking bounds
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.lbound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.rbound=i
o=True
break
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.ubound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.dbound=i
o=True
break
#Shrinks to the bounds
temp=[]
for i in range(self.ubound,self.dbound+1):
temp.append([])
for j in range(self.lbound,self.rbound+1):
temp[i-self.ubound].append(self.win[i].sp[j])
#checking 4 o in a row agressive
for i in range(n):
if oturn==False:
break
for j in range(n):
if i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j)=="o" and win[i+2].get_sp(j)=="o" and win[i+3].get_sp(j)=="o" and i+4<n and win[i+4].get_sp(j)!="o"and win[i+4].get_sp(j)!="x" :
win[i+4].set_contr(j)
self.mov[0]=i+4
self.mov[1]=j
oturn=False
break
elif i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j)=="o" and win[i+2].get_sp(j)=="o" and win[i+3].get_sp(j)=="o" and i-1>=0 and win[i-1].get_sp(j)!="o"and win[i-1].get_sp(j)!="x":
win[i-1].set_contr(j)
self.mov[0]=i-1
self.mov[1]=j
oturn=False
break
if j<n-3 and win[i].get_sp(j)=="o" and win[i].get_sp(j+1)=="o" and win[i].get_sp(j+2)=="o" and win[i].get_sp(j+3)=="o" and j+4<n and win[i].get_sp(j+4)!="o"and win[i].get_sp(j+4)!="x":
win[i].set_contr(j+4)
self.mov[0]=i
self.mov[1]=j+4
oturn=False
break
elif j<n-3 and win[i].get_sp(j)=="o" and win[i].get_sp(j+1)=="o" and win[i].get_sp(j+2)=="o" and win[i].get_sp(j+3)=="o" and j-1>=0 and win[i].get_sp(j-1)!="o"and win[i].get_sp(j-1)!="x":
win[i].set_contr(j-1)
self.mov[0]=i
self.mov[1]=j-1
oturn=False
break
if j<n-3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j+1)=="o" and win[i+2].get_sp(j+2)=="o" and win[i+3].get_sp(j+3)=="o" and i+4<n and j+4<n and win[i+4].get_sp(j+4)!="o"and win[i+4].get_sp(j+4)!="x":
win[i+4].set_contr(j+4)
self.mov[0]=i+4
self.mov[1]=j+4
oturn=False
break
elif j<n-3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j+1)=="o" and win[i+2].get_sp(j+2)=="o" and win[i+3].get_sp(j+3)=="o" and i-1>=0 and j-1>=0 and win[i-1].get_sp(j-1)!="o"and win[i-1].get_sp(j-1)!="x":
win[i-1].set_contr(j-1)
self.mov[0]=i-1
self.mov[1]=j-1
oturn=False
break
if j>=3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j-1)=="o" and win[i+2].get_sp(j-2)=="o" and win[i+3].get_sp(j-3)=="o" and i+4<n and j-4>=0 and win[i+4].get_sp(j-4)!="o"and win[i+4].get_sp(j-4)!="x":
win[i+4].set_contr(j-4)
self.mov[0]=i+4
self.mov[1]=j-4
oturn=False
break
elif j>=3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j-1)=="o" and win[i+2].get_sp(j-2)=="o" and win[i+3].get_sp(j-3)=="o" and i-1>=0 and j+1<n and win[i-1].get_sp(j+1)!="o"and win[i-1].get_sp(j+1)!="x":
win[i-1].set_contr(j+1)
self.mov[0]=i-1
self.mov[1]=j+1
oturn=False
break
#checking 4 x in a row defense
for i in range(n):
if oturn==False:
break
for j in range(n):
if i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j)=="x" and win[i+2].get_sp(j)=="x" and win[i+3].get_sp(j)=="x" and i+4<n and win[i+4].get_sp(j)!="o"and win[i+4].get_sp(j)!="x" :
win[i+4].set_contr(j)
self.mov[0]=i+4
self.mov[1]=j
oturn=False
pressure=True
break
elif i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j)=="x" and win[i+2].get_sp(j)=="x" and win[i+3].get_sp(j)=="x" and i-1>=0 and win[i-1].get_sp(j)!="o"and win[i-1].get_sp(j)!="x":
win[i-1].set_contr(j)
self.mov[0]=i-1
self.mov[1]=j
oturn=False
pressure=True
break
if j<n-3 and win[i].get_sp(j)=="x" and win[i].get_sp(j+1)=="x" and win[i].get_sp(j+2)=="x" and win[i].get_sp(j+3)=="x" and j+4<n and win[i].get_sp(j+4)!="o"and win[i].get_sp(j+4)!="x":
win[i].set_contr(j+4)
self.mov[0]=i
self.mov[1]=j+4
oturn=False
pressure=True
break
elif j<n-3 and win[i].get_sp(j)=="x" and win[i].get_sp(j+1)=="x" and win[i].get_sp(j+2)=="x" and win[i].get_sp(j+3)=="x" and j-1>=0 and win[i].get_sp(j-1)!="o"and win[i].get_sp(j-1)!="x":
win[i].set_contr(j-1)
self.mov[0]=i
self.mov[1]=j-1
oturn=False
pressure=True
break
if j<n-3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j+1)=="x" and win[i+2].get_sp(j+2)=="x" and win[i+3].get_sp(j+3)=="x" and i+4<n and j+4<n and win[i+4].get_sp(j+4)!="o"and win[i+4].get_sp(j+4)!="x":
win[i+4].set_contr(j+4)
self.mov[0]=i+4
self.mov[1]=j+4
oturn=False
pressure=True
break
elif j<n-3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j+1)=="x" and win[i+2].get_sp(j+2)=="x" and win[i+3].get_sp(j+3)=="x" and i-1>=0 and j-1>=0 and win[i-1].get_sp(j-1)!="o"and win[i-1].get_sp(j-1)!="x":
win[i-1].set_contr(j-1)
self.mov[0]=i-1
self.mov[1]=j-1
oturn=False
pressure=True
break
if j>=3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j-1)=="x" and win[i+2].get_sp(j-2)=="x" and win[i+3].get_sp(j-3)=="x" and i+4<n and j-4>=0 and win[i+4].get_sp(j-4)!="o"and win[i+4].get_sp(j-4)!="x":
win[i+4].set_contr(j-4)
self.mov[0]=i+4
self.mov[1]=j-4
oturn=False
pressure=True
break
elif j>=3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j-1)=="x" and win[i+2].get_sp(j-2)=="x" and win[i+3].get_sp(j-3)=="x" and i-1>=0 and j+1<n and win[i-1].get_sp(j+1)!="o"and win[i-1].get_sp(j+1)!="x":
win[i-1].set_contr(j+1)
self.mov[0]=i-1
self.mov[1]=j+1
oturn=False
pressure=True
break
#agressive defense
o=False
b=False
indexi=0
indexj=0
for matrix in self.mem:
if oturn==False:
break
#checking bounds in matrix
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j]=="*":
stari=i
starj=j
o=False
lbound=0
rbound=0
dbound=0
ubound=0
for i in range(len(matrix[0])):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
lbound=i
o=True
break
o=False
for i in range(len(matrix[0])-1,-1,-1):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
rbound=i
o=True
break
o=False
for i in range(len(matrix)):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
ubound=i
o=True
break
o=False
for i in range(len(matrix)-1,-1,-1):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
dbound=i
o=True
break
if dbound-ubound>len(temp) or rbound-lbound>len(temp[0]):
continue
for istart in range(len(temp)):
if b:
break
mini=0
maxi=0
if istart>=ubound:
mini=ubound
else:
mini=istart
if len(temp)-1-istart-(dbound-ubound)>=len(matrix)-1-dbound:
maxi=len(matrix)-1-dbound
else:
maxi=len(temp)-1-istart-(dbound-ubound)
if maxi<0:
break
for jstart in range(len(temp[0])):
o=False
minj=0
maxj=0
if jstart>=lbound:
minj=lbound
else:
minj=jstart
if len(temp[0])-1-jstart-(rbound-lbound)>=len(matrix[0])-1-rbound:
maxj=len(matrix[0])-1-rbound
else:
maxj=len(temp[0])-1-jstart-(rbound-lbound)
if maxj<0:
break
for i in range(ubound-mini,dbound+maxi+1):
if o:
break
for j in range(lbound-minj,rbound+maxj+1):
#if self.ubound+istart+i-2>=0 and self.ubound+istart+i-2<n and self.lbound+jstart+j-2>=0 and self.lbound+jstart+j-2<n:
if matrix[i][j]=="x" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!="x" or matrix[i][j]=="*" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!=" " or matrix[i][j]=="+" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]=="o":
#print(win[self.ubound+istart+i-1].sp[self.lbound+jstart+j-1],matrix[i][j],self.ubound+istart+i-1,self.lbound+jstart+j-1,i,j)
o=True
break
"""if matrix[i][j]=="*" :
indexi=self.ubound+istart+i-ubound+mini
indexj=self.lbound+jstart+j-lbound+minj
else:
if matrix[i][j]=="x" or matrix[i][j]=="*":
o=True
break
"""
#print(o)
if o==False:
b=True
indexi=self.ubound+istart+stari-ubound
indexj=self.lbound+jstart+starj-lbound
win[indexi].set_contr(indexj)
self.mov[0]=indexi
self.mov[1]=indexj
#print(indexi,indexj)
oturn=False
pressure=True
break
#agressive respond
o=False
b=False
indexi=0
indexj=0
for matrix in self.mem:
if oturn==False:
break
#checking bounds in matrix
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j]=="*":
stari=i
starj=j
o=False
lbound=0
rbound=0
dbound=0
ubound=0
for i in range(len(matrix[0])):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
lbound=i
o=True
break
o=False
for i in range(len(matrix[0])-1,-1,-1):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
rbound=i
o=True
break
o=False
for i in range(len(matrix)):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
ubound=i
o=True
break
o=False
for i in range(len(matrix)-1,-1,-1):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
dbound=i
o=True
break
if dbound-ubound>len(temp) or rbound-lbound>len(temp[0]):
continue
for istart in range(len(temp)):
if b:
break
mini=0
maxi=0
if istart>=ubound:
mini=ubound
else:
mini=istart
if len(temp)-1-istart-(dbound-ubound)>=len(matrix)-1-dbound:
maxi=len(matrix)-1-dbound
else:
maxi=len(temp)-1-istart-(dbound-ubound)
if maxi<0:
break
for jstart in range(len(temp[0])):
o=False
minj=0
maxj=0
if jstart>=lbound:
minj=lbound
else:
minj=jstart
if len(temp[0])-1-jstart-(rbound-lbound)>=len(matrix[0])-1-rbound:
maxj=len(matrix[0])-1-rbound
else:
maxj=len(temp[0])-1-jstart-(rbound-lbound)
if maxj<0:
break
for i in range(ubound-mini,dbound+maxi+1):
if o:
break
for j in range(lbound-minj,rbound+maxj+1):
#if self.ubound+istart+i-2>=0 and self.ubound+istart+i-2<n and self.lbound+jstart+j-2>=0 and self.lbound+jstart+j-2<n:
if matrix[i][j]=="x" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!="o" or matrix[i][j]=="*" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!=" " or matrix[i][j]=="+" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]=="x":
#print(win[self.ubound+istart+i-1].sp[self.lbound+jstart+j-1],matrix[i][j],self.ubound+istart+i-1,self.lbound+jstart+j-1,i,j)
o=True
break
"""if matrix[i][j]=="*" :
indexi=self.ubound+istart+i-ubound+mini
indexj=self.lbound+jstart+j-lbound+minj
else:
if matrix[i][j]=="x" or matrix[i][j]=="*":
o=True
break
"""
#print(o)
if o==False:
b=True
indexi=self.ubound+istart+stari-ubound
indexj=self.lbound+jstart+starj-lbound
win[indexi].set_contr(indexj)
self.mov[0]=indexi
self.mov[1]=indexj
#print(indexi,indexj)
oturn=False
break
# checking 1 x
if oturn:
ocords=[]
for i in range(n):
for j in range(n):
if win[i].get_sp(j)=="o" and (win[i+1].get_sp(j)==" " or win[i-1].get_sp(j)==" " or win[i].get_sp(j+1)==" " or win[i].get_sp(j-1)==" " or win[i-1].get_sp(j+1)==" " or win[i-1].get_sp(j-1)==" " or win[i+1].get_sp(j+1)==" " or win[i+1].get_sp(j-1)==" "):
ocords.append([i,j])
if len(ocords)!=0:
ocord=ocords[random.randrange(len(ocords))]
xcords=[]
if win[ocord[0]+1].get_sp(ocord[1])==" ":
xcords.append([ocord[0]+1,ocord[1]])
if win[ocord[0]-1].get_sp(ocord[1])==" ":
xcords.append([ocord[0]-1,ocord[1]])
if win[ocord[0]].get_sp(ocord[1]+1)==" ":
xcords.append([ocord[0],ocord[1]+1])
if win[ocord[0]].get_sp(ocord[1]-1)==" ":
xcords.append([ocord[0],ocord[1]-1])
if win[ocord[0]+1].get_sp(ocord[1]+1)==" ":
xcords.append([ocord[0]+1,ocord[1]+1])
if win[ocord[0]+1].get_sp(ocord[1]-1)==" ":
xcords.append([ocord[0]+1,ocord[1]-1])
if win[ocord[0]-1].get_sp(ocord[1]+1)==" ":
xcords.append([ocord[0]-1,ocord[1]+1])
if win[ocord[0]-1].get_sp(ocord[1]-1)==" ":
xcords.append([ocord[0]-1,ocord[1]-1])
xcord=xcords[random.randrange(len(xcords))]
#print(xcord,"XCOORD")
win[xcord[0]].set_contr(xcord[1])
self.mov[0]=xcord[0]
self.mov[1]=xcord[1]
oturn=False
else:
for i in range(n):
if oturn==False:
break
for j in range(n):
if win[i].get_sp(j)=="x" and j+1<n and win[i].get_sp(j+1)!="o" and win[i].get_sp(j+1)!="x":
win[i].set_contr(j+1)
self.mov[0]=i
self.mov[1]=j+1
oturn=False
break
elif win[i].get_sp(j)=="x" and win[i].get_sp(j-1)!="o" and win[i].get_sp(j-1)!="x":
win[i].set_contr(j-1)
self.mov[0]=i
self.mov[1]=j-1
oturn=False
break
# first move
if oturn:
win[1].set_contr(1)
self.mov[0]=1
self.mov[1]=1
oturn=False
#checking if win
for i in range(n):
if gameWin==True:
break
for j in range(n):
if i<n-4 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j)=="o" and win[i+2].get_sp(j)=="o" and win[i+3].get_sp(j)=="o" and win[i+4].get_sp(j)=="o":
gameWin=True
break
if j<n-4 and win[i].get_sp(j)=="o" and win[i].get_sp(j+1)=="o" and win[i].get_sp(j+2)=="o" and win[i].get_sp(j+3)=="o" and win[i].get_sp(j+4)=="o":
gameWin=True
break
if j<n-4 and i<n-4 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j+1)=="o" and win[i+2].get_sp(j+2)=="o" and win[i+3].get_sp(j+3)=="o" and win[i+4].get_sp(j+4)=="o":
gameWin=True
break
if j>=4 and i<n-4 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j-1)=="o" and win[i+2].get_sp(j-2)=="o" and win[i+3].get_sp(j-3)=="o" and win[i+4].get_sp(j-4)=="o":
gameWin=True
break
self.win=win
self.gameWin=gameWin
self.oturn=oturn
expand=False
for i in range(self.n):
if self.win[0].get_sp(i)!=" ":
self.expand()
expand=True
break
if self.win[self.n-2].get_sp(i)!=" ":
self.expand()
expand=True
break
if self.win[i].get_sp(0)!=" ":
self.expand()
expand=True
break
if self.win[i].get_sp(self.n-2)!=" ":
self.expand()
expand=True
break
#checking bounds
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.lbound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.rbound=i
o=True
break
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.ubound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.dbound=i
o=True
break
#if self.gameWin:
#pressure=True
temp=[]
for i in range(self.ubound-1,self.dbound+2):
temp.append([])
for j in range(self.lbound-1,self.rbound+2):
if i>=0 and i<n and j>=0 and j<n:
temp[i-self.ubound+1].append(self.win[i].sp[j])
else:
temp[i-self.ubound+1].append(" ")
expands=0
if expand:
expands=1
if self.boundwidth>self.lbound-expands:
self.omov[1]+=1
if self.boundheight>self.ubound-expands:
self.omov[0]+=1
self.boundheight=self.ubound
self.boundwidth=self.lbound
self.mov[0]-=self.ubound-1
self.mov[1]-=self.lbound-1
#self.omov[0]-=self.ubound-1
#self.omov[1]-=self.lbound-1
if expand:
self.mov[0]+=1
self.mov[1]+=1
#self.omov[0]+=1
#self.omov[1]+=1
self.movm.append([temp,[self.mov[0],self.mov[1]],[self.omov[0],self.omov[1]],pressure,"o"])
#print(self.movm[-1])
for i in self.movm[-1][0]:
for j in i:
print(j,end="")
print("")
if pressure:
print("FORCED")
print("O-Turn")
print("O-Move:",self.movm[-1][1])
print("X-Move:",self.movm[-1][2])
if self.gameWin==True:
index=0
for i in range(len(self.movm)-1,0,-1):
if not self.movm[i][3] and self.movm[i][4]=="x":
index=i
break
for i in range(len(self.movm[index+1][0])):
for j in range(len(self.movm[index+1][0][i])):
if self.movm[index+1][0][i][j]=="x":
self.movm[index+1][0][i][j]="o"
elif self.movm[index+1][0][i][j]=="o":
self.movm[index+1][0][i][j]="x"
print("Laimejo-O:")
for i in self.movm[index+1][0]:
for j in i:
print(j,end="")
print("")
print(self.movm[index+1][1],self.movm[index+1][2])
temp=[]
for i in range(len(self.movm[index+1][0])):
temp.append([])
for j in range(len(self.movm[index+1][0][0])):
if self.movm[index+1][0][i][j]!="o" and (i==self.movm[index+1][1][0]+1 and j==self.movm[index+1][1][1]+1 or i==self.movm[index+1][1][0]+2 and j==self.movm[index+1][1][1]+2 or i==self.movm[index+1][1][0]+3 and j==self.movm[index+1][1][1]+3 or i==self.movm[index+1][1][0]+4 and j==self.movm[index+1][1][1]+4 or i==self.movm[index+1][1][0]+1 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+2 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+3 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+4 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+1 and j==self.movm[index+1][1][1]-1 or i==self.movm[index+1][1][0]+2 and j==self.movm[index+1][1][1]-2 or i==self.movm[index+1][1][0]+3 and j==self.movm[index+1][1][1]-3 or i==self.movm[index+1][1][0]+4 and j==self.movm[index+1][1][1]-4 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-1 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-2 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-3 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-4 or i==self.movm[index+1][1][0]-1 and j==self.movm[index+1][1][1]-1 or i==self.movm[index+1][1][0]-2 and j==self.movm[index+1][1][1]-2 or i==self.movm[index+1][1][0]-3 and j==self.movm[index+1][1][1]-3 or i==self.movm[index+1][1][0]-4 and j==self.movm[index+1][1][1]-4 or i==self.movm[index+1][1][0]-1 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-2 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-3 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-4 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-1 and j==self.movm[index+1][1][1]+1 or i==self.movm[index+1][1][0]-2 and j==self.movm[index+1][1][1]+2 or i==self.movm[index+1][1][0]-3 and j==self.movm[index+1][1][1]+3 or i==self.movm[index+1][1][0]-4 and j==self.movm[index+1][1][1]+4 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+1 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+2 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+3 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+4 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]):
if i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]:
#print("Sutampa:",self.movm[index+1][1][0],self.movm[index+1][1][1])
temp[i].append("*")
elif self.movm[index+1][0][i][j]==" ":
temp[i].append("+")
else:
temp[i].append(self.movm[index+1][0][i][j])
else:
if i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1] and (i==self.movm[index+1][1][0]+1 and j==self.movm[index+1][1][1]+1 or i==self.movm[index+1][1][0]+2 and j==self.movm[index+1][1][1]+2 or i==self.movm[index+1][1][0]+3 and j==self.movm[index+1][1][1]+3 or i==self.movm[index+1][1][0]+4 and j==self.movm[index+1][1][1]+4 or i==self.movm[index+1][1][0]+1 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+2 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+3 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+4 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]+1 and j==self.movm[index+1][1][1]-1 or i==self.movm[index+1][1][0]+2 and j==self.movm[index+1][1][1]-2 or i==self.movm[index+1][1][0]+3 and j==self.movm[index+1][1][1]-3 or i==self.movm[index+1][1][0]+4 and j==self.movm[index+1][1][1]-4 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-1 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-2 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-3 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]-4 or i==self.movm[index+1][1][0]-1 and j==self.movm[index+1][1][1]-1 or i==self.movm[index+1][1][0]-2 and j==self.movm[index+1][1][1]-2 or i==self.movm[index+1][1][0]-3 and j==self.movm[index+1][1][1]-3 or i==self.movm[index+1][1][0]-4 and j==self.movm[index+1][1][1]-4 or i==self.movm[index+1][1][0]-1 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-2 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-3 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-4 and j==self.movm[index+1][1][1] or i==self.movm[index+1][1][0]-1 and j==self.movm[index+1][1][1]+1 or i==self.movm[index+1][1][0]-2 and j==self.movm[index+1][1][1]+2 or i==self.movm[index+1][1][0]-3 and j==self.movm[index+1][1][1]+3 or i==self.movm[index+1][1][0]-4 and j==self.movm[index+1][1][1]+4 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+1 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+2 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+3 or i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1]+4 ):
temp[i].append("+")
else:
temp[i].append(" ")
#print("")
lbound=0
rbound=0
ubound=0
dbound=0
#print(temp)
o=False
for i in range(len(temp[0])):
if o:
break
for j in range(len(temp)):
if temp[j][i]!=" ":
lbound=i
o=True
break
o=False
for i in range(len(temp[0])-1,-1,-1):
if o:
break
for j in range(len(temp)):
if temp[j][i]!=" ":
rbound=i
o=True
break
o=False
for i in range(len(temp)):
if o:
break
for j in range(len(temp[0])):
if temp[i][j]!=" ":
ubound=i
o=True
break
o=False
for i in range(len(temp)-1,-1,-1):
if o:
break
for j in range(len(temp[0])):
if temp[i][j]!=" ":
dbound=i
o=True
break
#print(temp)
#print(dbound,rbound,lbound,ubound)
temp1=[]
for i in range(ubound,dbound+1):
temp1.append([])
for j in range(lbound,rbound+1):
temp1[i-ubound].append(temp[i][j])
#print(temp1)
f=open("mem.txt","wb")
pickle.dump(self.mem+[temp1],f)
f.close()
memedit.clear()
self.end=True
self.xturn=True
def x_turn(self):
win=self.win
gameLost=self.gameLost
xturn=self.xturn
n=self.n
pressure=False
#checking bounds
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.lbound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.rbound=i
o=True
break
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.ubound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.dbound=i
o=True
break
temp=[]
for i in range(self.ubound,self.dbound+1):
temp.append([])
for j in range(self.lbound,self.rbound+1):
temp[i-self.ubound].append(self.win[i].sp[j])
#checking 4 x in a row agressive
for i in range(n):
if xturn==False:
break
for j in range(n):
if i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j)=="x" and win[i+2].get_sp(j)=="x" and win[i+3].get_sp(j)=="x" and i+4<n and win[i+4].get_sp(j)!="o"and win[i+4].get_sp(j)!="x" :
win[i+4].set_sp(j)
self.omov[0]=i+4
self.omov[1]=j
xturn=False
break
elif i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j)=="x" and win[i+2].get_sp(j)=="x" and win[i+3].get_sp(j)=="x" and i-1>=0 and win[i-1].get_sp(j)!="o"and win[i-1].get_sp(j)!="x":
win[i-1].set_sp(j)
self.omov[0]=i-1
self.omov[1]=j
xturn=False
break
if j<n-3 and win[i].get_sp(j)=="x" and win[i].get_sp(j+1)=="x" and win[i].get_sp(j+2)=="x" and win[i].get_sp(j+3)=="x" and j+4<n and win[i].get_sp(j+4)!="o"and win[i].get_sp(j+4)!="x":
win[i].set_sp(j+4)
self.omov[0]=i
self.omov[1]=j+4
xturn=False
break
elif j<n-3 and win[i].get_sp(j)=="x" and win[i].get_sp(j+1)=="x" and win[i].get_sp(j+2)=="x" and win[i].get_sp(j+3)=="x" and j-1>=0 and win[i].get_sp(j-1)!="o"and win[i].get_sp(j-1)!="x":
win[i].set_sp(j-1)
self.omov[0]=i
self.omov[1]=j-1
xturn=False
break
if j<n-3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j+1)=="x" and win[i+2].get_sp(j+2)=="x" and win[i+3].get_sp(j+3)=="x" and i+4<n and j+4<n and win[i+4].get_sp(j+4)!="o"and win[i+4].get_sp(j+4)!="x":
win[i+4].set_sp(j+4)
self.omov[0]=i+4
self.omov[1]=j+4
xturn=False
break
elif j<n-3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j+1)=="x" and win[i+2].get_sp(j+2)=="x" and win[i+3].get_sp(j+3)=="x" and i-1>=0 and j-1>=0 and win[i-1].get_sp(j-1)!="o"and win[i-1].get_sp(j-1)!="x":
win[i-1].set_sp(j-1)
self.omov[0]=i-1
self.omov[1]=j-1
xturn=False
break
if j>=3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j-1)=="x" and win[i+2].get_sp(j-2)=="x" and win[i+3].get_sp(j-3)=="x" and i+4<n and j-4>=0 and win[i+4].get_sp(j-4)!="o"and win[i+4].get_sp(j-4)!="x":
win[i+4].set_sp(j-4)
self.omov[0]=i+4
self.omov[1]=j-4
xturn=False
break
elif j>=3 and i<n-3 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j-1)=="x" and win[i+2].get_sp(j-2)=="x" and win[i+3].get_sp(j-3)=="x" and i-1>=0 and j+1<n and win[i-1].get_sp(j+1)!="o"and win[i-1].get_sp(j+1)!="x":
win[i-1].set_sp(j+1)
self.omov[0]=i-1
self.omov[1]=j+1
xturn=False
break
#checking 4 o in a row defensive
for i in range(n):
if xturn==False:
break
for j in range(n):
if i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j)=="o" and win[i+2].get_sp(j)=="o" and win[i+3].get_sp(j)=="o" and i+4<n and win[i+4].get_sp(j)!="o"and win[i+4].get_sp(j)!="x" :
win[i+4].set_sp(j)
self.omov[0]=i+4
self.omov[1]=j
pressure=True
xturn=False
break
elif i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j)=="o" and win[i+2].get_sp(j)=="o" and win[i+3].get_sp(j)=="o" and i-1>=0 and win[i-1].get_sp(j)!="o"and win[i-1].get_sp(j)!="x":
win[i-1].set_sp(j)
self.omov[0]=i-1
self.omov[1]=j
pressure=True
xturn=False
break
if j<n-3 and win[i].get_sp(j)=="o" and win[i].get_sp(j+1)=="o" and win[i].get_sp(j+2)=="o" and win[i].get_sp(j+3)=="o" and j+4<n and win[i].get_sp(j+4)!="o"and win[i].get_sp(j+4)!="x":
win[i].set_sp(j+4)
self.omov[0]=i
self.omov[1]=j+4
xturn=False
pressure=True
break
elif j<n-3 and win[i].get_sp(j)=="o" and win[i].get_sp(j+1)=="o" and win[i].get_sp(j+2)=="o" and win[i].get_sp(j+3)=="o" and j-1>=0 and win[i].get_sp(j-1)!="o"and win[i].get_sp(j-1)!="x":
win[i].set_sp(j-1)
self.omov[0]=i
self.omov[1]=j-1
xturn=False
pressure=True
break
if j<n-3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j+1)=="o" and win[i+2].get_sp(j+2)=="o" and win[i+3].get_sp(j+3)=="o" and i+4<n and j+4<n and win[i+4].get_sp(j+4)!="o"and win[i+4].get_sp(j+4)!="x":
win[i+4].set_sp(j+4)
self.omov[0]=i+4
self.omov[1]=j+4
xturn=False
pressure=True
break
elif j<n-3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j+1)=="o" and win[i+2].get_sp(j+2)=="o" and win[i+3].get_sp(j+3)=="o" and i-1>=0 and j-1>=0 and win[i-1].get_sp(j-1)!="o"and win[i-1].get_sp(j-1)!="x":
win[i-1].set_sp(j-1)
self.omov[0]=i-1
self.omov[1]=j-1
xturn=False
pressure=True
break
if j>=3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j-1)=="o" and win[i+2].get_sp(j-2)=="o" and win[i+3].get_sp(j-3)=="o" and i+4<n and j-4>=0 and win[i+4].get_sp(j-4)!="o"and win[i+4].get_sp(j-4)!="x":
win[i+4].set_sp(j-4)
self.omov[0]=i+4
self.omov[1]=j-4
xturn=False
pressure=True
break
elif j>=3 and i<n-3 and win[i].get_sp(j)=="o" and win[i+1].get_sp(j-1)=="o" and win[i+2].get_sp(j-2)=="o" and win[i+3].get_sp(j-3)=="o" and i-1>=0 and j+1<n and win[i-1].get_sp(j+1)!="o"and win[i-1].get_sp(j+1)!="x":
win[i-1].set_sp(j+1)
self.omov[0]=i-1
self.omov[1]=j+1
xturn=False
pressure=True
break
#agressive defense
o=False
b=False
indexi=0
indexj=0
for matrix in self.mem:
if xturn==False:
break
#checking bounds in matrix
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j]=="*":
stari=i
starj=j
o=False
lbound=0
rbound=0
dbound=0
ubound=0
for i in range(len(matrix[0])):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
lbound=i
o=True
break
o=False
for i in range(len(matrix[0])-1,-1,-1):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
rbound=i
o=True
break
o=False
for i in range(len(matrix)):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
ubound=i
o=True
break
o=False
for i in range(len(matrix)-1,-1,-1):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
dbound=i
o=True
break
if dbound-ubound>len(temp) or rbound-lbound>len(temp[0]):
continue
for istart in range(len(temp)):
if b:
break
mini=0
maxi=0
if istart>=ubound:
mini=ubound
else:
mini=istart
if len(temp)-1-istart-(dbound-ubound)>=len(matrix)-1-dbound:
maxi=len(matrix)-1-dbound
else:
maxi=len(temp)-1-istart-(dbound-ubound)
if maxi<0:
break
for jstart in range(len(temp[0])):
o=False
minj=0
maxj=0
if jstart>=lbound:
minj=lbound
else:
minj=jstart
if len(temp[0])-1-jstart-(rbound-lbound)>=len(matrix[0])-1-rbound:
maxj=len(matrix[0])-1-rbound
else:
maxj=len(temp[0])-1-jstart-(rbound-lbound)
if maxj<0:
break
for i in range(ubound-mini,dbound+maxi+1):
if o:
break
for j in range(lbound-minj,rbound+maxj+1):
#if self.ubound+istart+i-2>=0 and self.ubound+istart+i-2<n and self.lbound+jstart+j-2>=0 and self.lbound+jstart+j-2<n:
if matrix[i][j]=="x" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!="o" or matrix[i][j]=="*" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!=" " or matrix[i][j]=="+" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]=="x":
#print(win[self.ubound+istart+i-1].sp[self.lbound+jstart+j-1],matrix[i][j],self.ubound+istart+i-1,self.lbound+jstart+j-1,i,j)
o=True
break
"""if matrix[i][j]=="*" :
indexi=self.ubound+istart+i-ubound+mini
indexj=self.lbound+jstart+j-lbound+minj
else:
if matrix[i][j]=="x" or matrix[i][j]=="*":
o=True
break
"""
#print(o)
if o==False:
b=True
indexi=self.ubound+istart+stari-ubound
indexj=self.lbound+jstart+starj-lbound
win[indexi].set_sp(indexj)
self.omov[0]=indexi
self.omov[1]=indexj
#print(indexi,indexj)
xturn=False
pressure=True
break
#agressive respond
o=False
b=False
indexi=0
indexj=0
for matrix in self.mem:
if xturn==False:
break
#checking bounds in matrix
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j]=="*":
stari=i
starj=j
o=False
lbound=0
rbound=0
dbound=0
ubound=0
for i in range(len(matrix[0])):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
lbound=i
o=True
break
o=False
for i in range(len(matrix[0])-1,-1,-1):
if o:
break
for j in range(len(matrix)):
if matrix[j][i]!=" " and matrix[j][i]!="+" and matrix[j][i]!="*":
rbound=i
o=True
break
o=False
for i in range(len(matrix)):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
ubound=i
o=True
break
o=False
for i in range(len(matrix)-1,-1,-1):
if o:
break
for j in range(len(matrix[0])):
if matrix[i][j]!=" " and matrix[i][j]!="+" and matrix[i][j]!="*":
dbound=i
o=True
break
if dbound-ubound>len(temp) or rbound-lbound>len(temp[0]):
continue
for istart in range(len(temp)):
if b:
break
mini=0
maxi=0
if istart>=ubound:
mini=ubound
else:
mini=istart
if len(temp)-1-istart-(dbound-ubound)>=len(matrix)-1-dbound:
maxi=len(matrix)-1-dbound
else:
maxi=len(temp)-1-istart-(dbound-ubound)
if maxi<0:
break
for jstart in range(len(temp[0])):
o=False
minj=0
maxj=0
if jstart>=lbound:
minj=lbound
else:
minj=jstart
if len(temp[0])-1-jstart-(rbound-lbound)>=len(matrix[0])-1-rbound:
maxj=len(matrix[0])-1-rbound
else:
maxj=len(temp[0])-1-jstart-(rbound-lbound)
if maxj<0:
break
for i in range(ubound-mini,dbound+maxi+1):
if o:
break
for j in range(lbound-minj,rbound+maxj+1):
#if self.ubound+istart+i-2>=0 and self.ubound+istart+i-2<n and self.lbound+jstart+j-2>=0 and self.lbound+jstart+j-2<n:
if matrix[i][j]=="x" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!="x" or matrix[i][j]=="*" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]!=" " or matrix[i][j]=="+" and win[self.ubound+istart+i-ubound].sp[self.lbound+jstart+j-lbound]=="o":
#print(win[self.ubound+istart+i-1].sp[self.lbound+jstart+j-1],matrix[i][j],self.ubound+istart+i-1,self.lbound+jstart+j-1,i,j)
o=True
break
"""if matrix[i][j]=="*" :
indexi=self.ubound+istart+i-ubound+mini
indexj=self.lbound+jstart+j-lbound+minj
else:
if matrix[i][j]=="x" or matrix[i][j]=="*":
o=True
break
"""
#print(o)
if o==False:
b=True
indexi=self.ubound+istart+stari-ubound
indexj=self.lbound+jstart+starj-lbound
win[indexi].set_sp(indexj)
self.omov[0]=indexi
self.omov[1]=indexj
#print(indexi,indexj)
xturn=False
break
# checking 1 x
if xturn:
ocords=[]
for i in range(n):
for j in range(n):
if win[i].get_sp(j)=="x" and (win[i+1].get_sp(j)==" " or win[i-1].get_sp(j)==" " or win[i].get_sp(j+1)==" " or win[i].get_sp(j-1)==" " or win[i-1].get_sp(j+1)==" " or win[i-1].get_sp(j-1)==" " or win[i+1].get_sp(j+1)==" " or win[i+1].get_sp(j-1)==" "):
ocords.append([i,j])
if len(ocords)!=0:
ocord=ocords[random.randrange(len(ocords))]
xcords=[]
if win[ocord[0]+1].get_sp(ocord[1])==" ":
xcords.append([ocord[0]+1,ocord[1]])
if win[ocord[0]-1].get_sp(ocord[1])==" ":
xcords.append([ocord[0]-1,ocord[1]])
if win[ocord[0]].get_sp(ocord[1]+1)==" ":
xcords.append([ocord[0],ocord[1]+1])
if win[ocord[0]].get_sp(ocord[1]-1)==" ":
xcords.append([ocord[0],ocord[1]-1])
if win[ocord[0]+1].get_sp(ocord[1]+1)==" ":
xcords.append([ocord[0]+1,ocord[1]+1])
if win[ocord[0]+1].get_sp(ocord[1]-1)==" ":
xcords.append([ocord[0]+1,ocord[1]-1])
if win[ocord[0]-1].get_sp(ocord[1]+1)==" ":
xcords.append([ocord[0]-1,ocord[1]+1])
if win[ocord[0]-1].get_sp(ocord[1]-1)==" ":
xcords.append([ocord[0]-1,ocord[1]-1])
xcord=xcords[random.randrange(len(xcords))]
#print(xcord,"XCOORD")
win[xcord[0]].set_sp(xcord[1])
self.omov[0]=xcord[0]
self.omov[1]=xcord[1]
xturn=False
else:
for i in range(n):
if xturn==False:
break
for j in range(n):
if win[i].get_sp(j)=="o" and j+1<n and win[i].get_sp(j+1)!="o" and win[i].get_sp(j+1)!="x":
win[i].set_sp(j+1)
self.omov[0]=i
self.omov[1]=j+1
xturn=False
break
elif win[i].get_sp(j)=="o" and win[i].get_sp(j-1)!="o" and win[i].get_sp(j-1)!="x":
win[i].set_sp(j-1)
self.omov[0]=i
self.omov[1]=j-1
xturn=False
break
for i in range(n):
if gameLost==True:
break
for j in range(n):
if i<n-4 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j)=="x" and win[i+2].get_sp(j)=="x" and win[i+3].get_sp(j)=="x" and win[i+4].get_sp(j)=="x":
gameLost=True
break
if j<n-4 and win[i].get_sp(j)=="x" and win[i].get_sp(j+1)=="x" and win[i].get_sp(j+2)=="x" and win[i].get_sp(j+3)=="x" and win[i].get_sp(j+4)=="x":
gameLost=True
break
if j<n-4 and i<n-4 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j+1)=="x" and win[i+2].get_sp(j+2)=="x" and win[i+3].get_sp(j+3)=="x" and win[i+4].get_sp(j+4)=="x":
gameLost=True
break
if j>=4 and i<n-4 and win[i].get_sp(j)=="x" and win[i+1].get_sp(j-1)=="x" and win[i+2].get_sp(j-2)=="x" and win[i+3].get_sp(j-3)=="x" and win[i+4].get_sp(j-4)=="x":
gameLost=True
break
#print("X-MoveReal:",self.omov)
self.win=win
self.gameLost=gameLost
self.xturn=xturn
expand=False
for i in range(self.n):
if self.win[0].get_sp(i)!=" ":
self.expand()
expand=True
break
if self.win[self.n-2].get_sp(i)!=" ":
self.expand()
expand=True
break
if self.win[i].get_sp(0)!=" ":
self.expand()
expand=True
break
if self.win[i].get_sp(self.n-2)!=" ":
self.expand()
expand=True
break
#checking bounds
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.lbound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[j].sp[i]!=" ":
self.rbound=i
o=True
break
o=False
for i in range(n):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.ubound=i
o=True
break
o=False
for i in range(n-1,-1,-1):
if o:
break
for j in range(n):
if self.win[i].sp[j]!=" ":
self.dbound=i
o=True
break
#if self.gameLost:
#pressure=True
temp=[]
for i in range(self.ubound-1,self.dbound+2):
temp.append([])
for j in range(self.lbound-1,self.rbound+2):
if i<n and i>=0 and j<n and j>=0:
temp[i-self.ubound+1].append(self.win[i].sp[j])
else:
temp[i-self.ubound+1].append(" ")
expands=0
if expand:
expands=1
if self.boundwidth>self.lbound-expands:
self.mov[1]+=1
if self.boundheight>self.ubound-expands:
self.mov[0]+=1
self.boundheight=self.ubound
self.boundwidth=self.lbound
#self.mov[0]-=self.ubound-1
#self.mov[1]-=self.lbound-1
self.omov[0]-=self.ubound-1
self.omov[1]-=self.lbound-1
if expand:
#self.mov[0]+=1
#self.mov[1]+=1
self.omov[0]+=1
self.omov[1]+=1
self.movm.append([temp,[self.mov[0],self.mov[1]],[self.omov[0],self.omov[1]],pressure,"x"])
#print(self.movm)
for i in self.movm[-1][0]:
for j in i:
print(j,end="")
print("")
if pressure:
print("FORCED")
print("X-Turn")
print("O-Move:",self.movm[-1][1])
print("X-Move:",self.movm[-1][2])
if self.gameLost==True:
index=0
for i in range(len(self.movm)-1,0,-1):
if not self.movm[i][3] and self.movm[i][4]=="o":
index=i
break
print("Laimejo-X:")
for i in self.movm[index+1][0]:
for j in i:
print(j,end="")
print("")
print(self.movm[index+1][1],self.movm[index+1][2])
temp=[]
for i in range(len(self.movm[index+1][0])):
temp.append([])
for j in range(len(self.movm[index+1][0][0])):
if self.movm[index+1][0][i][j]!="o" and (i==self.movm[index+1][2][0]+1 and j==self.movm[index+1][2][1]+1 or i==self.movm[index+1][2][0]+2 and j==self.movm[index+1][2][1]+2 or i==self.movm[index+1][2][0]+3 and j==self.movm[index+1][2][1]+3 or i==self.movm[index+1][2][0]+4 and j==self.movm[index+1][2][1]+4 or i==self.movm[index+1][2][0]+1 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+2 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+3 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+4 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+1 and j==self.movm[index+1][2][1]-1 or i==self.movm[index+1][2][0]+2 and j==self.movm[index+1][2][1]-2 or i==self.movm[index+1][2][0]+3 and j==self.movm[index+1][2][1]-3 or i==self.movm[index+1][2][0]+4 and j==self.movm[index+1][2][1]-4 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-1 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-2 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-3 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-4 or i==self.movm[index+1][2][0]-1 and j==self.movm[index+1][2][1]-1 or i==self.movm[index+1][2][0]-2 and j==self.movm[index+1][2][1]-2 or i==self.movm[index+1][2][0]-3 and j==self.movm[index+1][2][1]-3 or i==self.movm[index+1][2][0]-4 and j==self.movm[index+1][2][1]-4 or i==self.movm[index+1][2][0]-1 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-2 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-3 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-4 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-1 and j==self.movm[index+1][2][1]+1 or i==self.movm[index+1][2][0]-2 and j==self.movm[index+1][2][1]+2 or i==self.movm[index+1][2][0]-3 and j==self.movm[index+1][2][1]+3 or i==self.movm[index+1][2][0]-4 and j==self.movm[index+1][2][1]+4 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+1 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+2 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+3 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+4 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1] ):
if i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]:
temp[i].append("*")
elif self.movm[index+1][0][i][j]==" ":
temp[i].append("+")
else:
temp[i].append(self.movm[index+1][0][i][j])
else:
if i==self.movm[index+1][1][0] and j==self.movm[index+1][1][1] and (i==self.movm[index+1][2][0]+1 and j==self.movm[index+1][2][1]+1 or i==self.movm[index+1][2][0]+2 and j==self.movm[index+1][2][1]+2 or i==self.movm[index+1][2][0]+3 and j==self.movm[index+1][2][1]+3 or i==self.movm[index+1][2][0]+4 and j==self.movm[index+1][2][1]+4 or i==self.movm[index+1][2][0]+1 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+2 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+3 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+4 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]+1 and j==self.movm[index+1][2][1]-1 or i==self.movm[index+1][2][0]+2 and j==self.movm[index+1][2][1]-2 or i==self.movm[index+1][2][0]+3 and j==self.movm[index+1][2][1]-3 or i==self.movm[index+1][2][0]+4 and j==self.movm[index+1][2][1]-4 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-1 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-2 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-3 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]-4 or i==self.movm[index+1][2][0]-1 and j==self.movm[index+1][2][1]-1 or i==self.movm[index+1][2][0]-2 and j==self.movm[index+1][2][1]-2 or i==self.movm[index+1][2][0]-3 and j==self.movm[index+1][2][1]-3 or i==self.movm[index+1][2][0]-4 and j==self.movm[index+1][2][1]-4 or i==self.movm[index+1][2][0]-1 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-2 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-3 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-4 and j==self.movm[index+1][2][1] or i==self.movm[index+1][2][0]-1 and j==self.movm[index+1][2][1]+1 or i==self.movm[index+1][2][0]-2 and j==self.movm[index+1][2][1]+2 or i==self.movm[index+1][2][0]-3 and j==self.movm[index+1][2][1]+3 or i==self.movm[index+1][2][0]-4 and j==self.movm[index+1][2][1]+4 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+1 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+2 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+3 or i==self.movm[index+1][2][0] and j==self.movm[index+1][2][1]+4):
temp[i].append("+")
else:
temp[i].append(" ")
#print("")
lbound=0
rbound=0
ubound=0
dbound=0
#print(temp)
o=False
for i in range(len(temp[0])):
if o:
break
for j in range(len(temp)):
if temp[j][i]!=" ":
lbound=i
o=True
break
o=False
for i in range(len(temp[0])-1,-1,-1):
if o:
break
for j in range(len(temp)):
if temp[j][i]!=" ":
rbound=i
o=True
break
o=False
for i in range(len(temp)):
if o:
break
for j in range(len(temp[0])):
if temp[i][j]!=" ":
ubound=i
o=True
break
o=False
for i in range(len(temp)-1,-1,-1):
if o:
break
for j in range(len(temp[0])):
if temp[i][j]!=" ":
dbound=i
o=True
break
#print(temp)
#print(dbound,rbound,lbound,ubound)
temp1=[]
for i in range(ubound,dbound+1):
temp1.append([])
for j in range(lbound,rbound+1):
temp1[i-ubound].append(temp[i][j])
f=open("mem.txt","wb")
pickle.dump(self.mem+[temp1],f)
f.close()
memedit.clear()
self.end=True
self.oturn=True
n=int(input("How many PC vs PC games? "))
for i in range(n):
game=Game()
while True:
game.o_turn()
#time.sleep(2)
if game.end:
print("pabaiga")
break
game.x_turn()
if game.end:
print("pabaiga")
break
| [
"kuolius@gmail.com"
] | kuolius@gmail.com |
af1d1f6800491cc4d0c4dd91c57f770c204a9250 | 70f21e5fc61def7bf780602f633646ec32f09f74 | /examples/Hello_World.py | b39e51dbb21816e24f42ba8143ea070c87c20a59 | [] | no_license | QilinGu/xls-compare | 8675bf89efcf7a569878abf7dc7cb15bdea2efa5 | fcb7a751060586a1d728f672125f84881ed74532 | refs/heads/master | 2021-01-20T13:38:29.756960 | 2015-07-16T15:13:16 | 2015-07-16T15:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | #!/usr/bin/python
#coding:utf-8
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# 一个最简单的Hello World程序
app = QApplication(sys.argv)
b = QPushButton('Hello World!')
b.show()
app.connect(b, SIGNAL('clicked()'), app, SLOT('quit()'))
app.exec_() | [
"493722771@qq.com"
] | 493722771@qq.com |
eb2c8258f0156a186c1b5525851bf8627d0ebad7 | d7f43ee7b91c216b1740dead4cc348f3704d2f5a | /src/beginner_tutorials/scripts/add_two_ints_server.py~ | ef69b404916f90b0f5cf43bc27b89200b6fda426 | [] | no_license | capslockqq/catkin_ws | 26f734cf45cb5fe15301f5448a6005f2b21073b5 | a0989427e42988f36ae9e4d83ba7eb871a56b64e | refs/heads/master | 2021-08-24T07:04:07.551220 | 2017-12-08T14:42:19 | 2017-12-08T14:42:19 | 113,569,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | #!/usr/bin/env python
from beginner_tutorials.srv import
import rospy
def handle_add_two_ints(req):
print "Returning [%s + %s = %s]"%(req.a, req.b, (req.a + req.b))
return AddTwoIntsResponse(req.a + req.b)
def add_two_ints_server():
rospy.init_node('add_two_ints_server')
s = rospy.Service('add_two_ints', AddTwoInts, handle_add_two_ints)
print "Ready to add two ints."
rospy.spin()
if __name__ == "__main__":
add_two_ints_server()
| [
"ubuntu@ubuntu.(none)"
] | ubuntu@ubuntu.(none) | |
70fdca472a5e38ee8bfc23e6806ddea345662d96 | 5b224b970e412183f4d2a75ad5c8bc7a1a4ce649 | /multipoll/electoralsystems/utils/ranking.py | e5db63597cc608683f91549ae42a749f70add406 | [] | no_license | ruler501/multipoll | 25adbc7ef7ded4abe829891f10231b47201a2bf7 | 830be489deda31c5116afc66038f30d7fe5c820a | refs/heads/master | 2023-05-26T02:46:08.613426 | 2021-10-05T18:37:09 | 2021-10-05T18:37:09 | 209,223,032 | 1 | 0 | null | 2023-05-22T23:19:56 | 2019-09-18T05:13:46 | Python | UTF-8 | Python | false | false | 3,915 | py | from __future__ import annotations # noqa: T484
from dataclasses import dataclass
from functools import total_ordering
from typing import Iterable, List, Optional, Tuple
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import multipoll.models
class Ranking:
weights: List[Optional[float]]
indexes: List[int]
def __init__(self, vote: multipoll.models.FullVoteBase, collapse_ties: bool = True):
options_count = len(vote.poll.options)
enumerated_weights: List[Tuple[int, int]] = \
[(i, w) for i, w in enumerate(vote.weights[:options_count]) if w is not None]
if len(enumerated_weights) == 0:
self.weights = [None for _ in range(options_count)]
self.indexes = list(range(options_count))
return
prelims = sorted(enumerated_weights, key=lambda x: x[1], reverse=not collapse_ties)
weights: List[Optional[float]] = [None for _ in range(options_count)]
cur: Optional[int] = prelims[0][1]
score: float = 1
if collapse_ties:
for i, w in prelims:
if w != cur:
score += 1
cur = w
weights[i] = score
weights = [None if x is None else (x + len(prelims) - score) # noqa: IF100
for x in weights]
else:
for i, w in prelims:
if w != cur:
score = i + 1
cur = w
weights[i] = len(prelims) - score + 1
self.weights = weights
none_indexes = [i for i, w in enumerate(vote.weights[:options_count])
if w is None]
self.indexes = [i for i, _ in prelims] + none_indexes
@total_ordering
@dataclass(order=False, frozen=False)
class Majority:
votes_for: int
votes_against: int
wins: int
option: int
opposing_option: int
def __lt__(self, other: object) -> bool:
return (isinstance(other, self.__class__)
and (self.votes_for < other.votes_for
or (self.votes_for == other.votes_for
and (self.votes_against > other.votes_against
or (self.votes_against == other.votes_against
and (self.wins < other.wins
or (self.wins == other.wins
and (self.option > other.option
or (self.option == other.option
and self.opposing_option
> other.opposing_option)))))))))
@property
def margin(self) -> int:
return self.votes_for - self.votes_against
@classmethod
def create(cls, votes_for: int, votes_against: int, wins_for: int, wins_against: int,
option: int, opposing_option: int) -> Optional[Majority]:
if votes_for > votes_against:
return Majority(votes_for, votes_against, wins_for, option, opposing_option)
elif votes_against > votes_for:
return Majority(votes_against, votes_for, wins_against, opposing_option, option)
else:
return None
@classmethod
def populate_majorities(cls, comparisons: List[List[int]]) -> Iterable[Majority]:
options_count = len(comparisons)
wins = [0 for _ in range(options_count)]
for i in range(options_count):
for j in range(options_count):
wins[i] += comparisons[i][j]
for i in range(options_count):
for j in range(i + 1, options_count):
majority = Majority.create(comparisons[i][j], comparisons[j][i],
wins[i], wins[j], i, j)
if majority is not None:
yield majority
| [
"friends.devon@gmail.com"
] | friends.devon@gmail.com |
58d0a8905b5a6546432140bf05e9ab8f06dfb857 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/77/usersdata/216/42255/submittedfiles/exercicio24.py | f70202f0601ef08a2d7723413c6c64658abd3963 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # -*- coding: utf-8 -*-
import math
a=int(input('Digite um número:'))
b=int(input('Digite um número:'))
i=0
for i in range(1,a,1):
if a%i==0 and b%i==0:
print(i)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
79b1f02be4171c517d1ac21c107213d28eee37dc | 46476e394ff60c7d683e510337f1783e633d47c6 | /db-tools/db_tools/commands/load/load_encounter.py | 5bacadf0a9d3e7a6777e17bb55132c237383aa4a | [] | no_license | mister-vio/db-tool | 2ac0a6202898e9aaec148561edf6f625adce5d7c | e150f9292a6dd0071d3a2a0094aff2d570ce6686 | refs/heads/master | 2020-11-23T18:42:29.442081 | 2019-12-13T06:40:55 | 2019-12-13T06:40:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,412 | py | import calendar
from typing import Dict
from collections import namedtuple
from db_tools.database import Encounter
from .tools import safe_get_value, safe_get_date, BaseProcess, ConvertException
day_stat = namedtuple('day_stat', ['day', 'visit_amount'])
class EncounterProcess(BaseProcess):
_model = Encounter
_result_template = {
'days_visit': {i: 0 for i in range(7)}
}
def process_stat(self, entity: Dict):
super().process_stat(entity)
for i in range(entity['start_date'].weekday(), entity['end_date'].weekday() + 1):
self._result['days_visit'][i] += 1
def print_result(self):
super().print_result()
most_popular: day_stat = None
least_popular: day_stat = None
for day, visit_amount in self._result['days_visit'].items():
if most_popular is None or most_popular.visit_amount < visit_amount:
most_popular = day_stat(day, visit_amount)
if least_popular is None or least_popular.visit_amount > visit_amount:
least_popular = day_stat(day, visit_amount)
print(f"The most popular day is {calendar.day_name[most_popular.day]}")
print(f"The least popular day is {calendar.day_name[least_popular.day]}")
def entry_convert(self, raw_entry: Dict) -> Dict:
if 'id' not in raw_entry or not raw_entry['id']:
raise ConvertException("There is no required 'id' field in provided entry.")
patient_source_id = safe_get_value(raw_entry, 'subject', 'reference')[8:]
if patient_source_id not in self._context['patient_mapping']:
raise ConvertException(f"The patient with source_id={patient_source_id} does not exist.")
start_date = safe_get_date(raw_entry, 'period', 'start')
if start_date is None:
raise ConvertException(f"Not valid start_date")
end_date = safe_get_date(raw_entry, 'period', 'end')
if end_date is None:
raise ConvertException(f"Not valid end_date")
yield dict(
source_id=raw_entry['id'],
patient_id=self._context['patient_mapping'][patient_source_id],
start_date=start_date,
end_date=end_date,
type_code=safe_get_value(raw_entry, 'type', 0, 'coding', 0, 'code'),
type_code_system=safe_get_value(raw_entry, 'type', 0, 'coding', 0, 'system')
)
| [
"easylovv@gmail.com"
] | easylovv@gmail.com |
9aab50959e6376757d51b3fef3e88483eb1d7494 | 07c3124153a6909f19a21c3c664d8e3f8e0481d0 | /fractals/sierpinski_triangle/sierpinski_triangle.py | aae6e3da8f1aaeec51acdaeab10b98c9d1557216 | [] | no_license | gridl/art-of-turtle-programming | 94ed422a4e75f83e4c3abf7910ed9e5ed8a40aa9 | db6b2c1059bffc9df468691c6ecf1c110b38aafd | refs/heads/master | 2020-03-19T16:20:48.680667 | 2015-12-15T05:46:03 | 2015-12-15T05:46:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | from turtle import *
import math
tracer(1, 0)
setworldcoordinates(0, 0, 960, 810)
bgcolor(0.1, 0.1, 0.1)
BASE_SIZE = 13
BASE_HEIGHT = BASE_SIZE * math.sin(60 * (math.pi / 180))
START_X = 50
START_Y = 20
def draw_triangle(x, y, color):
penup()
pencolor(color)
goto(x, y) # go to bottom-left corner
pendown()
setheading(60)
forward(BASE_SIZE) # draw first side
right(120)
forward(BASE_SIZE) # draw second side
right(120)
forward(BASE_SIZE) # draw third side
def draw_sierpinski(x, y, level, color):
if level == 0:
draw_triangle(x, y, color)
draw_triangle(x + (BASE_SIZE * 0.5), y + BASE_HEIGHT, color)
draw_triangle(x + BASE_SIZE, y, color)
else:
draw_sierpinski(x, y, level - 1, color)
draw_sierpinski(x + (BASE_SIZE * 0.5 * (2 ** level)), y + (BASE_HEIGHT * (2 ** level)), level - 1, color)
draw_sierpinski(x + (BASE_SIZE * (2 ** level)), y, level - 1, color)
# loop from 5 to 0, drawing 5 sets of sierpinski triangles each with a different color
for i in range(5, -1, -1):
red = 1 - (0.2 * i)
green = 0.1 * i
blue = 0.1 * i
draw_sierpinski(START_X, START_Y, i, (red, green, blue))
hideturtle()
update()
exitonclick()
| [
"asweigart@gmail.com"
] | asweigart@gmail.com |
08588b8d20b297324f3860f7152fb0dc94d827eb | 32937a8134261736fc75c9309923c1a5ec6f44c6 | /venv/bin/django-admin | b68b22bd5029218b2866b69da5411a5d79051a1d | [] | no_license | hideonbush21/ITTeamProject | e521f232dfb25cf9a6627eec608382ef86c09cba | e3d924d9a7d31389c11328b1ab65446576695089 | refs/heads/master | 2023-07-02T09:00:05.426646 | 2021-08-06T22:40:25 | 2021-08-06T22:40:25 | 392,419,544 | 1 | 1 | null | 2021-08-04T21:03:33 | 2021-08-03T18:40:10 | null | UTF-8 | Python | false | false | 318 | #!/Users/wentao/PycharmProjects/tango_with_django_project/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"wentao@pc-14-223.customer.ask4.lan"
] | wentao@pc-14-223.customer.ask4.lan | |
8ca9fad8cd78573c8d3ca2e9a76b0d607134371b | ce214c2cbecb3591665b2748c1c777dd83625f96 | /lesson_13/api/routers.py | ff242ade61e4fcebb7697a8a760da6bb173b9707 | [] | no_license | antonplkv/itea_advanced_june | e35af2f10d93d8ffb43664cd0cf7dfd46b969aef | c20e81167bfd87b7e16f340210b246a4cbc1751e | refs/heads/master | 2022-12-04T20:27:21.908624 | 2020-08-19T18:19:49 | 2020-08-19T18:19:49 | 272,512,423 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from flask import Flask
from flask_restful import Api
from .resources import AuthorResource
app = Flask(__name__)
api = Api(app)
api.add_resource(AuthorResource, '/authors', '/authors/<author_id>') | [
"polyakov9.anton9@gmail.com"
] | polyakov9.anton9@gmail.com |
f92485c0021ef91b6877e779c12d2cedf0f5d894 | 957287b35e49685bbbda8d08eb9b8023eefe0f90 | /ink/cli_opts/logs.py | 61dd6879321620a343d244eccd85b1830035cd7f | [
"Apache-2.0"
] | permissive | cglewis/ink | 379f5a51faec3a56b1a7c79c9b6a8c73a0e5d772 | d97041480ceaff4a234ba574cb0e2f10d78dbb63 | refs/heads/master | 2020-04-09T04:53:25.745153 | 2014-06-14T03:51:58 | 2014-06-14T03:51:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | """
This module is the logs command of ink.
Created on 16 February 2014
@author: Charlie Lewis
"""
class logs(object):
"""
This class is responsible for the logs command of the cli.
"""
@classmethod
def main(self, args):
# !! TODO
print args
| [
"charliel@lab41.org"
] | charliel@lab41.org |
220c8655a593b2f802ce974cd1199cd28f8e6f71 | 7f5ab405ed6e4014bf1e98941449d90e9e351cb0 | /inheritance-and-creating-sub-classes.py | e8aa1e82b503d357c7a52db100046002475753fc | [] | no_license | shuvabiswas12/OOP-Python | 435f1e74ee37e0a2b8e9959a473819eb7beb11b5 | c17b317dc99298d452c1e34c6754d4e1bbbe6125 | refs/heads/master | 2020-05-18T19:32:14.864280 | 2019-05-02T15:57:32 | 2019-05-02T15:57:32 | 184,610,209 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py |
class A:
def __init__(self, pay, amount):
self.pay = pay
self.amount = amount
def view_all(self):
return 'pay = ', self.pay + 'amount = ', self.amount
class B(A):
def __init__(self, pay, amount, data):
# one way to call parent class constructor
A.__init__(self, pay, amount) # this way it takes a 'object' parameter at first
# another way to call parent class constructor
super().__init__(pay, amount)
self.data = data
b = B(1.04, 20000, True)
print(help(B))
#
# isinstance() method ...
if isinstance(b, B):
print("b is an instance of B()")
else:
print("b is not an instance of B()")
#
# issubclass() method ...
if issubclass(B, A):
print("B is a sub class of A")
else:
print("B is not a subclass of A")
| [
"shuvabiswas12@gmail.com"
] | shuvabiswas12@gmail.com |
0d4217ba1b325e87c690927e48f1717142aec8e0 | 46f043d557eba57da5b8c3e9937e4dc84556ae65 | /UDPserver.py | 116b0fbf6c3d8c523d4d06c586ac5bf27a68ad5a | [] | no_license | fengrenxiaoli/Mypython | 822f397d89db1e511ba6785a404efea99dd8600b | 3cb08e0b9e760f44068d31c151afacef21e099f8 | refs/heads/master | 2021-01-10T12:12:15.338696 | 2015-11-17T15:11:19 | 2015-11-17T15:11:19 | 44,948,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | import socket
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind(('127.0.0.1',9999))
print('Bind UDP on 9999...')
while True:
data,addr=s.recvfrom(1024)
print('Redeived from %s:%s'%addr)
s.sendto(b'Hello,%s'%data,addr)
| [
"fengrenxiaoli@gmail.com"
] | fengrenxiaoli@gmail.com |
f82d1bfc18cf23dccc01d4ee011811e1f567837a | 0092041336a420af59b73e2ab1bf6e7077b11f6e | /autoeq/constants.py | 9e3aa99e634a4cadadc3b973ff61a777af07f613 | [
"MIT"
] | permissive | jaakkopasanen/AutoEq | e10280a5413a406623ddbc8b87ddf7953ffd020c | ab5869c8f4996f8eea88abca50a41510263ed098 | refs/heads/master | 2023-08-22T22:43:51.969927 | 2023-08-09T11:13:24 | 2023-08-09T11:13:24 | 123,807,729 | 11,367 | 2,940 | MIT | 2023-08-11T08:23:26 | 2018-03-04T16:37:35 | Python | UTF-8 | Python | false | false | 9,711 | py | # -*- coding: utf-8 -*
import os
import math
DEFAULT_F_MIN = 20.0
DEFAULT_F_MAX = 20000.0
DEFAULT_STEP = 1.01
DEFAULT_MAX_GAIN = 6.0
DEFAULT_TREBLE_F_LOWER = 6000.0
DEFAULT_TREBLE_F_UPPER = 8000.0
DEFAULT_TREBLE_MAX_GAIN = 6.0
DEFAULT_TREBLE_GAIN_K = 1.0
DEFAULT_SMOOTHING_WINDOW_SIZE = 1 / 12
DEFAULT_SMOOTHING_ITERATIONS = 1
DEFAULT_TREBLE_SMOOTHING_F_LOWER = 6000.0
DEFAULT_TREBLE_SMOOTHING_F_UPPER = 8000.0
DEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE = 2.0
DEFAULT_TREBLE_SMOOTHING_ITERATIONS = 1
DEFAULT_SOUND_SIGNATURE_SMOOTHING_WINDOW_SIZE = None
DEFAULT_FS = 44100
DEFAULT_BIT_DEPTH = 16
DEFAULT_PHASE = 'minimum'
DEFAULT_F_RES = 10.0
DEFAULT_TILT = 0.0
DEFAULT_BASS_BOOST_GAIN = 0.0
DEFAULT_BASS_BOOST_FC = 105.0
DEFAULT_BASS_BOOST_Q = 0.7
DEFAULT_TREBLE_BOOST_GAIN = 0.0
DEFAULT_TREBLE_BOOST_FC = 10000.0
DEFAULT_TREBLE_BOOST_Q = 0.7
DEFAULT_PEQ_OPTIMIZER_MIN_F = 20.0
DEFAULT_PEQ_OPTIMIZER_MAX_F = 20000.0
DEFAULT_PEQ_OPTIMIZER_MAX_TIME = None
DEFAULT_PEQ_OPTIMIZER_TARGET_LOSS = None
DEFAULT_PEQ_OPTIMIZER_MIN_CHANGE_RATE = None
DEFAULT_PEQ_OPTIMIZER_MIN_STD = 0.002
DEFAULT_FIXED_BAND_FILTER_MIN_GAIN = -12.0
DEFAULT_FIXED_BAND_FILTER_MAX_GAIN = 12.0
DEFAULT_PEAKING_FILTER_MIN_FC = 20.0
DEFAULT_PEAKING_FILTER_MAX_FC = 10000.0
DEFAULT_PEAKING_FILTER_MIN_Q = 0.18248 # AUNBandEq has maximum bandwidth of 5 octaves which is Q of 0.182479
DEFAULT_PEAKING_FILTER_MAX_Q = 6.0
DEFAULT_PEAKING_FILTER_MIN_GAIN = -20.0
DEFAULT_PEAKING_FILTER_MAX_GAIN = 20.0
DEFAULT_SHELF_FILTER_MIN_FC = 20.0
DEFAULT_SHELF_FILTER_MAX_FC = 10000.0
DEFAULT_SHELF_FILTER_MIN_Q = 0.4 # Shelf filters start to overshoot below 0.4
DEFAULT_SHELF_FILTER_MAX_Q = 0.7 # Shelf filters start to overshoot above 0.7
DEFAULT_SHELF_FILTER_MIN_GAIN = -20.0
DEFAULT_SHELF_FILTER_MAX_GAIN = 20.0
DEFAULT_BIQUAD_OPTIMIZATION_F_STEP = 1.02
DEFAULT_MAX_SLOPE = 18.0
DEFAULT_PREAMP = 0.0
DEFAULT_GRAPHIC_EQ_STEP = 1.0563 # Produces 127 samples with greatest frequency of 19871
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
MOD_REGEX = r' \((sample|serial number) [a-zA-Z0-9\-]+\)$'
DBS = ['crinacle', 'headphonecom', 'innerfidelity', 'oratory1990', 'rtings']
HARMAN_OVEREAR_PREFERENCE_FREQUENCIES = [20.0, 21.0, 22.0, 24.0, 25.0, 27.0, 28.0, 30.0, 32.0, 34.0, 36.0, 38.0, 40.0, 43.0, 45.0, 48.0, 50.0, 53.0, 56.0, 60.0, 63.0, 67.0, 71.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 106.0, 112.0, 118.0, 125.0, 132.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 212.0, 224.0, 236.0, 250.0, 265.0, 280.0, 300.0, 315.0, 335.0, 355.0, 375.0, 400.0, 425.0, 450.0, 475.0, 500.0, 530.0, 560.0, 600.0, 630.0, 670.0, 710.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0, 1060.0, 1120.0, 1180.0, 1250.0, 1320.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0, 1900.0, 2000.0, 2120.0, 2240.0, 2360.0, 2500.0, 2650.0, 2800.0, 3000.0, 3150.0, 3350.0, 3550.0, 3750.0, 4000.0, 4250.0, 4500.0, 4750.0, 5000.0, 5300.0, 5600.0, 6000.0, 6300.0, 6700.0, 7100.0, 7500.0, 8000.0, 8500.0, 9000.0, 9500.0, 10000.0, 10600.0, 11200.0, 11800.0, 12500.0, 13200.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0]
HARMAN_INEAR_PREFENCE_FREQUENCIES = [20.0, 21.2, 22.4, 23.6, 25.0, 26.5, 28.0, 30.0, 31.5, 33.5, 35.5, 37.5, 40.0, 42.5, 45.0, 47.5, 50.0, 53.0, 56.0, 60.0, 63.0, 67.0, 71.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 106.0, 112.0, 118.0, 125.0, 132.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 212.0, 224.0, 236.0, 250.0, 265.0, 280.0, 300.0, 315.0, 335.0, 355.0, 375.0, 400.0, 425.0, 450.0, 475.0, 500.0, 530.0, 560.0, 600.0, 630.0, 670.0, 710.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0, 1060.0, 1120.0, 1180.0, 1250.0, 1320.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0, 1900.0, 2000.0, 2120.0, 2240.0, 2360.0, 2500.0, 2650.0, 2800.0, 3000.0, 3150.0, 3350.0, 3550.0, 3750.0, 4000.0, 4250.0, 4500.0, 4750.0, 5000.0, 5300.0, 5600.0, 6000.0, 6300.0, 6700.0, 7100.0, 7500.0, 8000.0, 8500.0, 9000.0, 9500.0, 10000.0, 10600.0, 11200.0, 11800.0, 12500.0, 13200.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0]
PREAMP_HEADROOM = 0.2
PEQ_CONFIGS = {
'10_BAND_GRAPHIC_EQ': {
'optimizer': {'min_std': 0.01},
'filter_defaults': {'q': math.sqrt(2), 'min_gain': -12.0, 'max_gain': 12.0, 'type': 'PEAKING'},
'filters': [{'fc': 31.25 * 2 ** i} for i in range(10)]
},
'31_BAND_GRAPHIC_EQ': {
'optimizer': {'min_std': 0.01},
'filter_defaults': {'q': 4.318473, 'min_gain': -12.0, 'max_gain': 12.0, 'type': 'PEAKING'},
'filters': [{'fc': 20 * 2 ** (i / 3), 'type': 'PEAKING'} for i in range(31)]
},
'10_PEAKING': {
'filters': [{'type': 'PEAKING'}] * 10
},
'8_PEAKING_WITH_SHELVES': {
'optimizer': {
'min_std': 0.008
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 8
},
'4_PEAKING_WITH_LOW_SHELF': {
'optimizer': {
'max_f': 10000.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 4
},
'4_PEAKING_WITH_HIGH_SHELF': {
'filters': [{
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 4
},
'AUNBANDEQ': {
'optimizer': {
'min_std': 0.008
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_fc': 20.0, # Can go to 16 Hz
'max_fc': 10000.0, # Can go to 20 kHz
'min_q': 0.182479, # Max bw of 5.0
'max_q': 10.0 # Min bw of 0.01 = 144.27 Q
}] * 8
},
'MINIDSP_2X4HD': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -16.0,
'max_gain': 16.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.5,
'max_q': 6.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'MINIDSP_IL_DSP': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -16.0,
'max_gain': 16.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.5,
'max_q': 6.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'NEUTRON_MUSIC_PLAYER': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -12.0,
'max_gain': 12.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 5.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'POWERAMP_EQUALIZER': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -15.0,
'max_gain': 15.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10e3,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 12.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'QUDELIX_5K': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -12.0,
'max_gain': 12.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10e3,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 7.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'SPOTIFY': {
'optimizer': {'min_std': 0.01},
'filters': [
{'fc': 60.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 150.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 400.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 2400.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 15000.0, 'q': 1.0, 'type': 'PEAKING'},
]
},
'USB_AUDIO_PLAYER_PRO': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -20.0,
'max_gain': 20.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 10.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
}
| [
"jaakko.o.pasanen@gmail.com"
] | jaakko.o.pasanen@gmail.com |
fa2debd4b7df01163deb530cc13213e4631ef425 | 67281f76d77308756c2530517e302475f596a834 | /pythonscripts/set.py | ab49e035441d467e50e19aebfab1c956fde0f2dc | [] | no_license | Surajprasanna/epsilon-python | 5edac9a186e6298e8209f60bbe0ed24dffa68e2f | 879063774e3d4bfa8d713ba26857f881e39aaa44 | refs/heads/master | 2020-04-05T20:28:05.402489 | 2018-11-14T11:14:34 | 2018-11-14T11:14:34 | 157,181,489 | 0 | 0 | null | 2018-11-12T08:37:54 | 2018-11-12T08:37:53 | null | UTF-8 | Python | false | false | 177 | py | #/bin/python3
setA = {2,4,5,7,78,34,56,3}
setB = {2,4,3,9,10}
#print(setA)
#for i in setA:
# print(i)
print(dir(setA))
#print(setA.intersection(setB))
print(setA.union(setB))
| [
"root@ip-172-31-22-38.ap-south-1.compute.internal"
] | root@ip-172-31-22-38.ap-south-1.compute.internal |
dd2581b2b922761111f73de6a66b37bef9ca71ad | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/servicebus/latest/list_disaster_recovery_config_keys.py | 25a135b1c7de1f742920f2d68de3190e3c721078 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 6,888 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDisasterRecoveryConfigKeysResult',
'AwaitableListDisasterRecoveryConfigKeysResult',
'list_disaster_recovery_config_keys',
]
@pulumi.output_type
class ListDisasterRecoveryConfigKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListDisasterRecoveryConfigKeysResult(ListDisasterRecoveryConfigKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_disaster_recovery_config_keys(alias: Optional[str] = None,
authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDisasterRecoveryConfigKeysResult:
"""
Use this data source to access information about an existing resource.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['alias'] = alias
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus/latest:listDisasterRecoveryConfigKeys', __args__, opts=opts, typ=ListDisasterRecoveryConfigKeysResult).value
return AwaitableListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
edcddd8e2f551a6693766bb167fef95132f4a54d | 0e0c67d6eabd63653c02121d83ac1de863231cb6 | /myblog/blog/migrations/0001_initial.py | 297902279ecc0337ff08c570e48986f2864a62a8 | [] | no_license | ragyrad/DjangoLearn | cb22fee4a1f97ccf67421c97f5857fef7d3f1e95 | 0577a0488d7339d7a1a15e79bc331dc5869c06a3 | refs/heads/master | 2023-03-22T23:30:51.276045 | 2021-03-09T09:16:46 | 2021-03-09T09:16:46 | 332,638,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | # Generated by Django 3.1.6 on 2021-02-11 05:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('publish',),
},
),
]
| [
"urban.oleg1@gmail.com"
] | urban.oleg1@gmail.com |
22d7e44524dc9cd48166afdf000431fc3f606e9a | 6ca0d0be3f59b14e36a7262fdb6da929597dbcfc | /lorawan/user_agent/logger/log_main.py | e618b6941eec08bd11b4104ce6a739e16f3b15b0 | [
"MIT"
] | permissive | pablomodernell/lorawan_conformance_testing | 79f12845840ef8b0f427743d760de9495ab36a9a | 3e6b9028ee7a6a614e52bac684e396ecd04fd10c | refs/heads/master | 2023-05-13T12:59:04.908279 | 2020-08-23T16:45:26 | 2020-08-23T16:45:26 | 280,359,564 | 1 | 0 | MIT | 2023-05-01T20:42:47 | 2020-07-17T07:39:34 | HTML | UTF-8 | Python | false | false | 2,255 | py | """
Auxiliary functions for accessing the logging information generated by the
Test Application Server (TAS).
"""
#################################################################################
# MIT License
#
# Copyright (c) 2018, Pablo D. Modernell, Universitat Oberta de Catalunya (UOC),
# Universidad de la Republica Oriental del Uruguay (UdelaR).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#################################################################################
import click
import lorawan.user_agent.logger.loggers as loggers
def log_all():
""" Starts a logger that collects the messages from all the TAS services."""
logger_mock = loggers.LoggerAll()
print("Starting log.")
logger_mock.start_logging()
def log_nwk_forwarder():
""" Starts a logger that collects the messages from the Payload Forwarder service."""
logger_mock = loggers.PayloadForwarderLog()
print("Starting Payload Forwarder Service log.")
logger_mock.start_logging()
def log_test_session_coordinator():
""" Starts a logger that collects the messages from the Test Session Coordinatior service."""
logger_mock = loggers.TestServerLog()
print("Starting Test Server log.")
logger_mock.start_logging()
| [
"pmodernell@worldsensing.com"
] | pmodernell@worldsensing.com |
3fe664179b58c7117696dab5877afa4fd95563ad | 6bb9dbc6e05739c9b519812ac8bf2f5b7fd11e2d | /RL_understanding/reinforcement-learning-an-introduction-master/chapter05/blackjack.py | 30fb5cb31b29a175c09b015477d0afe5d72398a8 | [
"Apache-2.0"
] | permissive | eddgeag/LearninrRL | 5a029c7e2491a0517c847810f39ee1e518367619 | a8c84d9c545332a80eb900a6535ce1be44beb152 | refs/heads/master | 2020-07-02T21:44:07.419583 | 2020-01-07T11:28:39 | 2020-01-07T11:28:39 | 201,676,207 | 0 | 0 | null | 2019-08-13T21:00:37 | 2019-08-10T19:51:34 | null | UTF-8 | Python | false | false | 13,070 | py | #######################################################################
# Copyright (C) #
# 2016-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# 2016 Kenta Shimada(hyperkentakun@gmail.com) #
# 2017 Nicky van Foreest(vanforeest@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
# actions: hit or stand
ACTION_HIT = 0
ACTION_STAND = 1 # "strike" in the book
ACTIONS = [ACTION_HIT, ACTION_STAND]
# policy for player
POLICY_PLAYER = np.zeros(22)
for i in range(12, 20):
POLICY_PLAYER[i] = ACTION_HIT
POLICY_PLAYER[20] = ACTION_STAND
POLICY_PLAYER[21] = ACTION_STAND
# function form of target policy of player
def target_policy_player(usable_ace_player, player_sum, dealer_card):
return POLICY_PLAYER[player_sum]
# function form of behavior policy of player
def behavior_policy_player(usable_ace_player, player_sum, dealer_card):
if np.random.binomial(1, 0.5) == 1:
return ACTION_STAND
return ACTION_HIT
# policy for dealer
POLICY_DEALER = np.zeros(22)
for i in range(12, 17):
POLICY_DEALER[i] = ACTION_HIT
for i in range(17, 22):
POLICY_DEALER[i] = ACTION_STAND
# get a new card
def get_card():
card = np.random.randint(1, 14)
card = min(card, 10)
return card
# play a game
# @policy_player: specify policy for player
# @initial_state: [whether player has a usable Ace, sum of player's cards, one card of dealer]
# @initial_action: the initial action
def play(policy_player, initial_state=None, initial_action=None):
# player status
# sum of player
player_sum = 0
# trajectory of player
player_trajectory = []
# whether player uses Ace as 11
usable_ace_player = False
# dealer status
dealer_card1 = 0
dealer_card2 = 0
usable_ace_dealer = False
if initial_state is None:
# generate a random initial state
num_of_ace = 0
# initialize cards of player
while player_sum < 12:
# if sum of player is less than 12, always hit
card = get_card()
# if get an Ace, use it as 11
if card == 1:
num_of_ace += 1
card = 11
usable_ace_player = True
player_sum += card
# if player's sum is larger than 21, he must hold at least one Ace, two Aces are possible
if player_sum > 21:
# use the Ace as 1 rather than 11
player_sum -= 10
# if the player only has one Ace, then he doesn't have usable Ace any more
if num_of_ace == 1:
usable_ace_player = False
# initialize cards of dealer, suppose dealer will show the first card he gets
dealer_card1 = get_card()
dealer_card2 = get_card()
else:
# use specified initial state
usable_ace_player, player_sum, dealer_card1 = initial_state
dealer_card2 = get_card()
# initial state of the game
state = [usable_ace_player, player_sum, dealer_card1]
# initialize dealer's sum
dealer_sum = 0
if dealer_card1 == 1 and dealer_card2 != 1:
dealer_sum += 11 + dealer_card2
usable_ace_dealer = True
elif dealer_card1 != 1 and dealer_card2 == 1:
dealer_sum += dealer_card1 + 11
usable_ace_dealer = True
elif dealer_card1 == 1 and dealer_card2 == 1:
dealer_sum += 1 + 11
usable_ace_dealer = True
else:
dealer_sum += dealer_card1 + dealer_card2
# game starts!
# player's turn
while True:
if initial_action is not None:
action = initial_action
initial_action = None
else:
# get action based on current sum
action = policy_player(usable_ace_player, player_sum, dealer_card1)
# track player's trajectory for importance sampling
player_trajectory.append([(usable_ace_player, player_sum, dealer_card1), action])
if action == ACTION_STAND:
break
# if hit, get new card
player_sum += get_card()
# player busts
if player_sum > 21:
# if player has a usable Ace, use it as 1 to avoid busting and continue
if usable_ace_player == True:
player_sum -= 10
usable_ace_player = False
else:
# otherwise player loses
return state, -1, player_trajectory
# dealer's turn
while True:
# get action based on current sum
action = POLICY_DEALER[dealer_sum]
if action == ACTION_STAND:
break
# if hit, get a new card
new_card = get_card()
if new_card == 1 and dealer_sum + 11 < 21:
dealer_sum += 11
usable_ace_dealer = True
else:
dealer_sum += new_card
# dealer busts
if dealer_sum > 21:
if usable_ace_dealer == True:
# if dealer has a usable Ace, use it as 1 to avoid busting and continue
dealer_sum -= 10
usable_ace_dealer = False
else:
# otherwise dealer loses
return state, 1, player_trajectory
# compare the sum between player and dealer
if player_sum > dealer_sum:
return state, 1, player_trajectory
elif player_sum == dealer_sum:
return state, 0, player_trajectory
else:
return state, -1, player_trajectory
# Monte Carlo Sample with On-Policy
def monte_carlo_on_policy(episodes):
states_usable_ace = np.zeros((10, 10))
# initialze counts to 1 to avoid 0 being divided
states_usable_ace_count = np.ones((10, 10))
states_no_usable_ace = np.zeros((10, 10))
# initialze counts to 1 to avoid 0 being divided
states_no_usable_ace_count = np.ones((10, 10))
for i in tqdm(range(0, episodes)):
_, reward, player_trajectory = play(target_policy_player)
for (usable_ace, player_sum, dealer_card), _ in player_trajectory:
player_sum -= 12
dealer_card -= 1
if usable_ace:
states_usable_ace_count[player_sum, dealer_card] += 1
states_usable_ace[player_sum, dealer_card] += reward
else:
states_no_usable_ace_count[player_sum, dealer_card] += 1
states_no_usable_ace[player_sum, dealer_card] += reward
return states_usable_ace / states_usable_ace_count, states_no_usable_ace / states_no_usable_ace_count
# Monte Carlo with Exploring Starts
def monte_carlo_es(episodes):
# (playerSum, dealerCard, usableAce, action)
state_action_values = np.zeros((10, 10, 2, 2))
# initialze counts to 1 to avoid division by 0
state_action_pair_count = np.ones((10, 10, 2, 2))
# behavior policy is greedy
def behavior_policy(usable_ace, player_sum, dealer_card):
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
# get argmax of the average returns(s, a)
values_ = state_action_values[player_sum, dealer_card, usable_ace, :] / \
state_action_pair_count[player_sum, dealer_card, usable_ace, :]
return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# play for several episodes
for episode in tqdm(range(episodes)):
# for each episode, use a randomly initialized state and action
initial_state = [bool(np.random.choice([0, 1])),
np.random.choice(range(12, 22)),
np.random.choice(range(1, 11))]
initial_action = np.random.choice(ACTIONS)
current_policy = behavior_policy if episode else target_policy_player
_, reward, trajectory = play(current_policy, initial_state, initial_action)
for (usable_ace, player_sum, dealer_card), action in trajectory:
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
# update values of state-action pairs
state_action_values[player_sum, dealer_card, usable_ace, action] += reward
state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
return state_action_values / state_action_pair_count
# Monte Carlo Sample with Off-Policy
def monte_carlo_off_policy(episodes):
initial_state = [True, 13, 2]
rhos = []
returns = []
for i in range(0, episodes):
_, reward, player_trajectory = play(behavior_policy_player, initial_state=initial_state)
# get the importance ratio
numerator = 1.0
denominator = 1.0
for (usable_ace, player_sum, dealer_card), action in player_trajectory:
if action == target_policy_player(usable_ace, player_sum, dealer_card):
denominator *= 0.5
else:
numerator = 0.0
break
rho = numerator / denominator
rhos.append(rho)
returns.append(reward)
rhos = np.asarray(rhos)
returns = np.asarray(returns)
weighted_returns = rhos * returns
weighted_returns = np.add.accumulate(weighted_returns)
rhos = np.add.accumulate(rhos)
ordinary_sampling = weighted_returns / np.arange(1, episodes + 1)
with np.errstate(divide='ignore',invalid='ignore'):
weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)
return ordinary_sampling, weighted_sampling
def figure_5_1():
states_usable_ace_1, states_no_usable_ace_1 = monte_carlo_on_policy(10000)
states_usable_ace_2, states_no_usable_ace_2 = monte_carlo_on_policy(500000)
states = [states_usable_ace_1,
states_usable_ace_2,
states_no_usable_ace_1,
states_no_usable_ace_2]
titles = ['Usable Ace, 10000 Episodes',
'Usable Ace, 500000 Episodes',
'No Usable Ace, 10000 Episodes',
'No Usable Ace, 500000 Episodes']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for state, title, axis in zip(states, titles, axes):
fig = sns.heatmap(np.flipud(state), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
plt.savefig('../images/figure_5_1.png')
plt.close()
def figure_5_2():
state_action_values = monte_carlo_es(500000)
state_value_no_usable_ace = np.max(state_action_values[:, :, 0, :], axis=-1)
state_value_usable_ace = np.max(state_action_values[:, :, 1, :], axis=-1)
# get the optimal policy
action_no_usable_ace = np.argmax(state_action_values[:, :, 0, :], axis=-1)
action_usable_ace = np.argmax(state_action_values[:, :, 1, :], axis=-1)
images = [action_usable_ace,
state_value_usable_ace,
action_no_usable_ace,
state_value_no_usable_ace]
titles = ['Optimal policy with usable Ace',
'Optimal value with usable Ace',
'Optimal policy without usable Ace',
'Optimal value without usable Ace']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for image, title, axis in zip(images, titles, axes):
fig = sns.heatmap(np.flipud(image), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
# plt.savefig('../images/figure_5_2.png')
# plt.close()
def figure_5_3():
true_value = -0.27726
episodes = 10000
runs = 100
error_ordinary = np.zeros(episodes)
error_weighted = np.zeros(episodes)
for i in tqdm(range(0, runs)):
ordinary_sampling_, weighted_sampling_ = monte_carlo_off_policy(episodes)
# get the squared error
error_ordinary += np.power(ordinary_sampling_ - true_value, 2)
error_weighted += np.power(weighted_sampling_ - true_value, 2)
error_ordinary /= runs
error_weighted /= runs
plt.plot(error_ordinary, label='Ordinary Importance Sampling')
plt.plot(error_weighted, label='Weighted Importance Sampling')
plt.xlabel('Episodes (log scale)')
plt.ylabel('Mean square error')
plt.xscale('log')
plt.legend()
plt.savefig('../images/figure_5_3.png')
# plt.close()
if __name__ == '__main__':
# figure_5_1()
# figure_5_2()
figure_5_3()
| [
"eddgeag@gmail.com"
] | eddgeag@gmail.com |
a9ce27dab2091e921cd004331e4fd2bda5e1d9f0 | 913d05cc0c20b8c80b7fd1cd7a4da65a059a2f44 | /utils.py | f2e7c30a3a1e5de42ee6fbbe5237f0b6298f6835 | [] | no_license | paksu/MERCURYCLAVE | 6544fef4a1fedcf9bd121d577f813c83427ca6c8 | 2847ab8a749609261df4eccac6871faab8cd76d0 | refs/heads/master | 2021-07-12T02:28:04.718463 | 2017-05-21T11:06:34 | 2017-05-21T11:06:34 | 106,080,196 | 0 | 0 | null | 2017-10-07T07:43:34 | 2017-10-07T07:43:33 | null | UTF-8 | Python | false | false | 502 | py | from __future__ import print_function
import re
def print_error(err):
print("[ERROR]", err)
def print_info(inf):
print("[INFO]", inf)
def is_valid_b64(s):
validator = re.compile(
'^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$')
if validator.match(s) != None:
return True
else:
return False
def is_valid_ascii(s):
try:
s.decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
| [
"dsouzadyn@gmail.com"
] | dsouzadyn@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.