blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ef374502289c5bfa390c868be416d362de0ab621
|
Python
|
carlosdlfuente/PhraseDep
|
/python/encoder.py
|
UTF-8
| 7,264
| 2.90625
| 3
|
[] |
no_license
|
"""
This file implements an Encoder for lexicalized parsing.
Its job is to map between lexicalized trees and
the parts representation as annotated spans.
"""
from collections import defaultdict
import pydecode
from pydecode.encoder import StructuredEncoder
from nltk import ImmutableTree as Tree
import numpy as np
from tree import *
from itertools import izip
class auto:
"""
A simple auto increment dictionary.
"""
def __init__(self):
self.id = 0
def __call__(self):
self.id += 1
return self.id
class SparseEncoder(StructuredEncoder):
"""
A sparse structured encoder. Gives a new id to each new key.
"""
def __init__(self):
self.encoder = defaultdict(auto())
def transform_labels(self, labels):
reverse = dict(zip(self.encoder.values(), self.encoder.keys()))
return np.array([reverse[label] for label in labels])
class LexicalizedCFGEncoder(SparseEncoder):
def save(self, file, graph):
s = set(graph.labeling)
out = [(k, v) for k, v in izip(self.encoder.keys(), self.encoder.values())
if v in s]
out = [(k, v) for k, v in izip(self.encoder.keys(), self.encoder.values())
if v in s]
a1 = np.array([a for (a, b) in out])
b1 = np.array([b for (a, b) in out])
with open(file, "wb") as o:
np.save(file + ".keys", a1)
np.save(file + ".vals", b1)
def load(self, file, graph):
with open(file, "rb") as i:
keys = np.load(file + ".keys.npy")
vals = np.load(file + ".vals.npy")
self.encoder = dict([(tuple(keys[i]), vals[i]) for i in range(len(keys))])
def __init__(self, sentence, tags, grammar):
self.grammar = grammar
self.sentence = sentence
self.tags = tags
super(LexicalizedCFGEncoder, self).__init__()
def transform_structure(self, parse):
r"""
Decompose a parse structure to parts.
Parameters
----------
parse : nltk.Tree
A lexicalized parse tree with annotated nonterminals of the
form X^i where X is a non-terminal symbol in the grammar and
:math:`i \in \{0 \ldots n-1\}` is the index of the head word.
Returns
-------
parts : int ndarray
Each row is a part of the form (i, j, k, h, m, r) where
i < j < k is the span and the split point, h is the head
index, m is the modifier index, and r is the index of
the rule used.
"""
stack = [(parse, 0, len(parse.leaves())-1)]
parts = []
while stack:
(node, i, k) = stack.pop()
cur = i
X, h = annotated_label(node)
if len(node) == 2:
Y, h_1 = annotated_label(node[0])
Z, h_2 = annotated_label(node[1])
other = h_1 if h == h_2 else h_2
assert h == h_1 or h == h_2, "%s %s %s\n%s"%(h, h_1, h_2, parse)
r = self.grammar.rule_index(X, Y, Z)
if not terminal(node[0]):
j = i + len(node[0].leaves()) - 1
else:
j = i + 1 - 1
assert(i <= h <= k), "%s %s %s %s %s %s\n%s"%(h, h_1, h_2, i, j, k, parse)
parts.append((i, j, k, h, other, r))
if len(node) == 1 and not terminal(node[0]):
Y, h_1 = annotated_label(node[0])
r = self.grammar.rule_index(X, Y)
parts.append((i, k, k, h, h, r))
for n in node:
if terminal(n):
cur += 1
continue
stack.append((n, cur, cur + len(n.leaves())-1))
cur += len(n.leaves())
parts.reverse()
return np.array(parts)
def from_parts(self, parts):
r"""
Compose a set of parts into a parse structure.
Parameters
----------
parts : int ndarray
Each row is a part of the form (i, j, k, h, m, r) where
i < j < k is the span and the split point, h is the head
index, m is the modifier index, and r is the index of
the rule used.
Returns
-------
parse : nltk.Tree
A lexicalized parse tree with annotated nonterminals of the
form X^i where X is a non-terminal symbol in the grammar and
:math:`i \in \{0 \ldots n-1\}` is the index of the head word.
"""
parse = {}
for i in range(len(self.sentence)):
X = self.sentence[i]
parse[i, i] = Tree(annotate_label(self.tags[i], i),
(annotate_label(self.sentence[i], i),))
for part in parts:
i, j, k, h, _, r = part
if j != k:
X, _, __ = self.grammar.rule_nonterms(r)
parse[i, k] = Tree(annotate_label(X, h),
(parse[i,j], parse[j+1, k]))
else:
X, _ = self.grammar.rule_nonterms(r)
parse[i, k] = Tree(annotate_label(X, h),
(parse[i, k],))
parse = parse[0, len(self.sentence)-1]
return parse
def structure_path(self, graph, parse):
"""
Helper method for debugging. Checks that a graph contains parse.
Parameters
-----------
graph : hypergraph
parse : nltk.Tree
"""
parts = self.transform_structure(parse)
#labels = [self.encoder[part]
#print parts
label_weights = np.zeros(len(self.encoder)+20, dtype=np.int8)
for part in parts:
#assert
if not (tuple(part) in self.encoder): print part
# print part[-1], [self.grammar.nonterms[nt] for nt in self.grammar.rule_nonterms(part[-1])]
label_weights[self.encoder[tuple(part)]] = 1
weights = pydecode.transform(graph, label_weights)
part_set = set([self.encoder[tuple(part)] for part in parts])
for edge in graph.edges:
if edge.label == -1:
weights[edge.id] = 1
else:
if edge.label in part_set:
part_set.remove(edge.label)
bad_parts = self.transform_labels([part for part in part_set])
# print part_set, bad_parts, [self.grammar.rule_nonterms(p[-1]) for p in bad_parts]
#assert not part_set, [self.transform_labels([part for part in part_set])]
chart = pydecode.inside(graph, weights, weight_type=pydecode.Boolean)
for edge in graph.edges:
if edge.label != -1 and weights[edge.id] == 1 or edge.head.id == graph.root.id:
# print len(edge.tail), edge.label
for node in edge.tail:
# print node.id
if chart[node.id] != 1:
pass
# print self.transform_labels([edge.label])
# assert(False)
# assert chart[edge.head.id] == 1
# print chart
if not chart[graph.root.id]:
print "fail"
else:
print "good"
return
| true
|
9a6653c0c1e5d45956ead7d54da5fd2a77c7f0fb
|
Python
|
sam-rossin/auction-simulator
|
/auction_simulator.py
|
UTF-8
| 7,011
| 2.625
| 3
|
[] |
no_license
|
#an auction simulator
#Sam Rossin
#fall 2015
import bidding_agent
import user_interface
import card
import random
import string
import os
import inspect
import importlib
import signal
from contextlib import contextmanager
#game constants
NUM_ROUNDS = 10
STARTING_BUDGET = 1000
CARDS_PER_AGENT = 10
#scoring constants
DOMINATE_POINTS = 1
HIGHEST_POINTS = 1
#this is useful for printing stuff
CATAGORIES = ["Science", "Ecology", "Culture", "Commerce", "Industry"]
#timeout exception: an exception for the purpose of cutting functions off
#if they run too long
class TimeoutException(Exception): pass
#signal handler to handle sigalrm
def signal_handler(signum, frame):
raise TimeoutException()
#fucntion to use to limit the time of other functions
#must be used in a with clause
@contextmanager
def time_limit(seconds, agent_id, function):
signal.setitimer(signal.ITIMER_REAL, seconds)
try:
yield
except TimeoutException:
UI.on_function_timed_out(agent_id, function)
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
#finds all classes in the given directory that are subclasses
#of BiddingAgent
def get_agent_classes(directory = "agents"):
modules = [file_name[:-3] for file_name in os.listdir(directory) if file_name.endswith('.py')]
agent_classes = []
for module_name in modules:
module = importlib.import_module(directory + "." + module_name)
for name, agent in inspect.getmembers(module, inspect.isclass):
if agent not in agent_classes and issubclass(agent, bidding_agent.BiddingAgent):
if agent != bidding_agent.BiddingAgent:
agent_classes.append(agent)
return agent_classes
#creates a bidding agent for each bidding agent class
#
#returns a list of these agents
#
#params: a list of classes, a list of cards, and a budget
#this id of an agent will be its position in the list
def make_bidding_agents(agent_classes, cards, budget):
agents = [None]*len(agent_classes)
for i in range(len(agent_classes)):
with time_limit(5, i, "__init__"):
agents[i] = agent_classes[i](cards, i, len(agent_classes), budget)
return agents
#generates the cards for a round. this can be replaced with
#any scheme for generating cards, this randomizer is a place-holder
#it doesn't follow the reqiured rules or anything
def generate_cards(num, round_number):
cards = []
for i in range(num):
name = "".join(random.choice(string.ascii_letters) for i in range(10))
cards.append(card.Card(name, random.randint(0, 8), random.randint(0, 8),
random.randint(0, 8), random.randint(0, 8),
random.randint(0, 8)))
return cards
#does a round of bids
#
#params: the round number, the card being bid on
def get_bids(card, index, agents, budgets):
UI.on_auction_started(card)
bids = [];
for i in range(len(agents)):
bid = 0
if agents[i]:
with time_limit(.1,i, "getBid"):
bid = agents[i].getBid(card, index)
if type(bid) != int:
bid = 0
UI.on_illegal_bid_received(i, "type")
elif bid > budgets[i]:
bid = 0
UI.on_illegal_bid_received(i, "budget")
UI.on_bid_received(card, i, bid)
bids.append((i, bid))
return bids
#gives out the results of a round of bids
#bids is a list of tuples, (id, bid)
def give_results(bids, agents, card, budgets):
bids = sorted(bids, key = lambda x: x[1], reverse = True)
winner = bids[0][0]
price = bids[1][1]
#keep track of budgets
budgets[winner] -= price
#sort by agent
bids = sorted(bids, key = lambda x: x[0])
formatted_bids = [x[1] for x in bids]
for i in range(len(agents)):
if agents[i]:
with time_limit(.1, i, "seeResults"):
agents[i].seeResults(card, winner, price, formatted_bids)
UI.on_auction_finished(card, winner, price)
return winner
#caculates everyones score
#params: dictionary of cards won, num agents
#return list of scores
#
#may need to reorganize this to work with vizualizations
def calculate_scores(cards_won, num_agents):
#total each players scores
score = [0]*num_agents
totals = []
for i in range(num_agents):
total = [0]*len(CATAGORIES)
if i in cards_won:
for card in cards_won[i]:
for j in range(len(CATAGORIES)):
total[j] += card.getList()[j+1]
totals.append(total)
#calculate domination scores
dominations = []
for i in range(num_agents):
for j in range(i):
iWins = 0
jWins = 0
for k in range(5):
if totals[i][k] > totals[j][k]:
iWins += 1
elif totals[i][k]<totals[j][k]:
jWins += 1
if iWins > jWins:
score[i] += DOMINATE_POINTS
dominations.append((i, j))
elif jWins > iWins:
score[j] += DOMINATE_POINTS
dominations.append((j, i))
#calculate scores based on largest total
catagories = [0]*len(CATAGORIES)
for i in range(len(CATAGORIES)):
highscore = -1
high_id = -1
for j in range(len(totals)):
if (totals[j][i] > highscore):
highscore = totals[j][i]
high_id = j
score[high_id] += HIGHEST_POINTS
catagories[i] = high_id
UI.on_round_finished(score, dominations, catagories)
return score
def main():
#set signal handler so we can use SIGALRM to enforce time limits
signal.signal(signal.SIGALRM, signal_handler)
#build UI. It is global because I got sick of passing it into every function
global UI
UI = user_interface.UserInterface()
#set up game
agent_classes = get_agent_classes()
UI.on_game_started([x.__name__ for x in agent_classes])
num_agents = len(agent_classes)
for rnd in range(NUM_ROUNDS):
#set up round
cards = generate_cards(num_agents*CARDS_PER_AGENT, rnd)
UI.on_round_started(rnd, cards)
agents = make_bidding_agents(agent_classes, cards, STARTING_BUDGET);
budgets = [STARTING_BUDGET for agent in agents]
cards_won = {}
#do bidding
for i in range(len(cards)):
bids = get_bids(cards[i], i, agents, budgets)
winner = give_results(bids, agents, cards[i], budgets)
if winner in cards_won:
cards_won[winner].append(cards[i])
else:
cards_won[winner] = [cards[i]]
#do scoring
calculate_scores(cards_won, num_agents)
UI.on_game_finished()
if __name__ == '__main__':
main()
| true
|
c55feceeb38e72e75a06e98ac0a51f3092e411e6
|
Python
|
nasgoncalves/opentracing-tutorial
|
/lesson02/exercise02/__main__.py
|
UTF-8
| 1,490
| 2.625
| 3
|
[] |
no_license
|
import logging
import sys
import time
from jaeger_client import Config
def init_tracer(service):
logging.getLogger('').handlers = []
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name=service,
)
# this call also sets opentracing.tracer
return config.initialize_tracer()
tracer = init_tracer('hello-world')
def say_hello(hello_to):
with tracer.start_active_span('say-hello') as scope:
scope.span.set_tag('hello-to', hello_to)
hello_str = format_string(hello_to)
print_hello(hello_str)
def format_string(hello_to):
with tracer.start_active_span('format') as scope:
hello_str = 'Hello, %s!' % hello_to
scope.span.log_kv({'event': 'string-format', 'value': hello_str})
return hello_str
def print_hello(hello_str):
with tracer.start_active_span('println') as scope:
print(hello_str)
scope.span.log_kv({'event': 'println'})
assert len(sys.argv) == 2
hello_to = sys.argv[1]
say_hello(hello_to)
# Jaeger Tracer is primarily designed for long-running server processes,
# so it has an internal buffer of spans that is flushed by a background thread.
# Since our program exits immediately, it may not have time to flush the spans
# to Jaeger backend
time.sleep(2)
tracer.close()
| true
|
3c81f11042d68b9af9da5235784b0068c7b7dd83
|
Python
|
heni-l/DenseNet121
|
/data_load.py
|
UTF-8
| 2,728
| 2.609375
| 3
|
[] |
no_license
|
import torch
from PIL import Image
import numpy as np
import pandas as pd
from torchvision import transforms
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
torch.manual_seed(1)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_valid = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class MyDataset(Dataset):
def __init__(self,num_classes, train = True, test = False):
self.num_classes = num_classes
if test == True:
data = np.load('test.npy')
else:
data = np.load('train.npy')
# ๅฐๆฏไธช่กๅ้ๆขๅคๆ3*32*32็ๅพๅ
pic = []
for temp in data:
temp = np.array(temp)
r = temp[:1024].reshape(32, 32, 1)
g = temp[1024:2048].reshape(32, 32, 1)
b = temp[2048:].reshape(32, 32, 1)
temp = np.concatenate((r, g, b), -1)
pic.append(temp)
if test == True:
self.pic = pic
self.labels = [0 for img in pic]
self.transforms = transform_valid
else:
if num_classes == 20:
df = pd.read_csv('train1.csv')
labels = df['coarse_label'].values
elif num_classes == 100:
df = pd.read_csv('train2.csv')
labels = df['fine_label'].values
#ๅฐๆฐๆฎๅไธบ่ฎญ็ป้ๅๆต่ฏ้๏ผๆฏไพไธบ9:1
pic_train, pic_valid, label_train, label_valid\
= train_test_split(pic, labels, test_size=0.1, random_state=61)
if train:
self.pic = pic_train
self.labels = label_train
self.transforms = transform_train
else:
self.pic = pic_valid
self.labels = label_valid
self.transforms = transform_valid
def __getitem__(self, index):
img = self.pic[index]
img = Image.fromarray(np.uint8(img))
img = self.transforms(img)
label = self.labels[index]
return img, label
def __len__(self):
return len(self.labels)
#่ทๅ่ฎญ็ป้ๅ้ช่ฏ้ๆฐๆฎ
def GetTrainData(num_classes):
return MyDataset(num_classes), MyDataset(num_classes, train = False)
#่ทๅๆต่ฏ้ๆฐๆฎ
def GetTestData(num_classes):
return MyDataset(num_classes, train = False, test = True)
| true
|
5d99371d591ae83303ace38f35e875e779a17fa6
|
Python
|
JAGANPS/ml-lab
|
/pgm2.py
|
UTF-8
| 903
| 2.96875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 10:01:24 2019
@author: ADMIN
"""
from sklearn.cluster import KMeans
#from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data=pd.read_csv("km1.csv")
df1=pd.DataFrame(data)
print(df1)
f1=df1['Distance_Feature'].values
f2=df1['Speeding_Feature'].values
x=np.matrix(list(zip(f1,f2)))
plt.plot()
plt.xlim([0,100])
plt.ylim([0,50])
plt.title('dataset')
plt.ylabel('Speeding_Feature')
plt.xlabel('Distance_Feature')
plt.scatter(f1,f2)
plt.show()
#create new plot and data
plt.plot()
colors=['b','g','r']
markers=['o','v','s']
#kmeans Algorithm
#k=3
KMeans_model=KMeans(n_clusters=3).fit(x)
plt.plot()
for i,l in enumerate(KMeans_model.labels_):
plt.plot(f1[i],f2[i],color=colors[l],marker=markers[l],ls='None')
plt.xlim([0,100])
plt.ylim([0,50])
plt.show()
| true
|
5daaeaf1144217886f8faca118a5ac38e3f1593d
|
Python
|
amitpanda93/MACBackup
|
/Data Science/Notebooks/pandas_e2.py
|
UTF-8
| 431
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 14:41:22 2019
@author: amit.panda03
"""
import pandas as pd
import numpy as np
df = pd.DataFrame([[18,10,5,11,-2],
[2,-2,9,-11,3],
[-4,6,-19,2,1],
[3,-14,1,-2,8],
[-2,2,4,6,13]],
index = list('pqrst'),
columns = list('abcde'))
print(df)
| true
|
84d1922cfb697edfc5f3e2d770e9082d9c74f998
|
Python
|
allen-studio/learn-python3-records
|
/ex38.py
|
UTF-8
| 1,194
| 4.125
| 4
|
[] |
no_license
|
ten_things = "Apple Oranges Crows Telephone Light Sugar"
print("Wait there are not 10 things in that list. Let's fix that.")
stuff = ten_things.split(' ') # ้่ฟsplitๆๅฅๅญ็็ฉบๆ ผๆๆๅ่กจ
print(stuff) #ๆๅฐ็กฎ่ฎคๅทฒ็ปๆฏๅ่กจไบ
more_stuff = ["Day", "Night", "Song", "Frisbee",
"corn", "Banana", "Girl", "Boy"]
while len(stuff)!= 10: # ๅฝ stuff ๅ่กจๅ
็ๅ
็ด ๆ10ไธชๅฐฑๅๆญข
next_one = more_stuff.pop() # more_stuff.pop()ไป่ฏฅๅ่กจๅ
็งป้คpopๆๅไธไธชๅ
็ด ๏ผๅนถๆ่ฏฅๅ
็ด ่ตๅผ็ปnext_one
print("Adding", next_one)
stuff.append(next_one) # ็ปstuffๅ่กจๅขๅ ไธไธชๅ
็ด next_one,่ฟ่ก็ปๆๅ็ป่ฎกstuffๅ
็ด ไธชๆฐ๏ผๆฒก่พพๅฐ10ไธช้ๅค่ฟ่ก
print(f"There are {len(stuff)} item now.")
print("There we go:", stuff)
print("Let's do some things with stuff.")
print(stuff[1]) #ๆๅฐstuff็็ฌฌ2ไธชๅ
็ด ๏ผๅ่กจ็ฌฌไธไธชๅ
็ด ไฝ็ฝฎๆฏ[0])
print(stuff[-1]) #ๆๅฐstuff็ๆๅไธไธชๅ
็ด
print(stuff.pop()) # ็งป้คstuff็ๆๅไธไธชๅ
็ด
print(' '.join(stuff)) # ้่ฟjoinๆstuffๅๅนถๆๅฅๅญ
print('#'.join(stuff[3:5])) # ๅๅนถๅฅๅญ๏ผๅนถๆๅฐ็ฌฌๅไธชๅฐ็ฌฌๅ
ญไธชไน้ด็ๅ
็ด ๏ผๅนถๅจไธญ้ดๅขๅ # ๅท
| true
|
75f481715ea3e94879394c0818db06452b4d91df
|
Python
|
AllenMostafa/project-euler-
|
/p12_Highly_divisible_triangular_number.py
|
UTF-8
| 1,047
| 4.375
| 4
|
[] |
no_license
|
# Project Euler --> https://projecteuler.net/problem=12
# Problem 12 : Highly divisible triangular number
#STEP 1: We want to create a function that calculates the first 7 triangular
#STEP 2: Create a function that returns the factors of each traingular
#STEP 3: store the length of the factors and when reach to to 500 print the triangular
def triangular(n):
""" Calculate the nth triangular"""
triang = n * (n + 1) / 2
return triang
def factors(x):
"""Calculate the number of divisors and return the number if divisor"""
j = []
for i in range(1,int(x**0.5) + 1):
if x % i == 0:
if(i != x/i):
j.append(i)
j.append(x/i)
else:
j.append(i)
return len(j)
def result():
"""Calculates the first triangle number that has 500 or above divisors """
n = 500
while True:
x = triangular(n)
z = factors(int(x))
if z >= 500:
break
else:
n += 1
return x
m = result()
print(m)
| true
|
56a6d188fc7c2db25959d16922a44b300140b322
|
Python
|
amadeus4dev/amadeus-python
|
/amadeus/client/response.py
|
UTF-8
| 1,253
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
from amadeus.mixins.parser import Parser
class Response(Parser, object):
'''
The response object returned for every API call.
:var http_response: the raw http response
:var request: the original Request object used to make this call
:vartype request: amadeus.Request
:var result: the parsed JSON received from the API, if the result was JSON
:vartype result: dict
:var data: the data extracted from the JSON data, if the body contained
JSON
:vartype data: dict
:var body: the raw body received from the API
:vartype body: str
:var parsed: wether the raw body has been parsed into JSON
:vartype parsed: bool
:var status_code: The HTTP status code for the response, if any
:vartype status_code: int
'''
# Initialize the Response object with the
# HTTPResponse object to parse, the client that made the request
# and the original request made
def __init__(self, http_response, request):
self.http_response = http_response
self.request = request
# PROTECTED
# Parses the response, using the client to log any errors
def _parse(self, client):
self._parse_status_code()
self._parse_data(client)
return self
| true
|
2c64aa987fb60710b4c1f47d0837853f3dc232fa
|
Python
|
khlidar/Concrete_Beam_Program
|
/Shapes.py
|
UTF-8
| 5,457
| 3.46875
| 3
|
[] |
no_license
|
'''
Description:
Definition of shapes class and sup classes
Information on authors:
Name: Contribution:
--------- ------------------
Jacob Original code
Kristinn Hlidar Gretarsson Original code
Version history:
'''
# Import
from math import sqrt
# Definitions
class Shapes(object):
def __init__(self):
self.name = 'I\'m just a blob'
self.h = 0
self.b = 0
def changeShape(self, new_shape):
shapes = ['rectangle', 'triangle', 'circle']
if new_shape.lower() in shapes:
self.name = new_shape.lower()
def giveParameters(self):
return print(self.parameter())
def parameter(self):
return 'I\'m just a blob'
def changeParameter(self):
print('I don\'t know how to do that')
def width(self, location):
return 0
def getWidth(self, location):
return self.width(location)
def getHeight(self):
return self.h
def isValidShapeParameter(self, input):
if type(input) == float:
return True
else:
print(f'{input} is not valid input into {self}')
def __str__(self):
return self.name
class Rectangle(Shapes):
def __init__(self, breadth=0., height=0.):
self.name = 'rectangle'
self.b = breadth
self.h = height
def parameter(self):
return f'{self.name} width = {self.b} and height = {self.h}'
def width(self, location):
if location <= self.h:
return self.b
else:
print(f'location is outside of shape with height {self.h}')
def changeParameter(self, breadth=0., height=0.):
if breadth:
self.b = breadth
if height:
self.h = height
class Triangle(Shapes):
def __init__(self, breadth=0., height=0.):
self.name = 'triangle'
self.b = breadth
self.h = height
def parameter(self):
return f'{self.name} width = {self.b} and height = {self.h}'
def width(self, location):
if location <= self.h:
b = self.b * location / self.h
return b
else:
print(f'location is outside of shape with height {self.h}')
def changeParameter(self, breadth=0., height=0.):
if breadth:
self.b = breadth
if height:
self.h = height
class Circle(Shapes):
def __init__(self, diameter=0):
self.name = 'circle'
self.d = diameter
def width(self, location):
if location <= self.d:
b = 2 * sqrt(2 * location * self.d / 2 - location * location)
return b
else:
print(f'location is outside of circle with diameter {self.d}')
def changeParameter(self, diameter=0):
if diameter:
self.d = diameter
def getHeight(self):
return self.d
class T_beam(Shapes):
def __init__(self, breadth=0, height=0, flange_breadth=0, flange_height=0):
self.name = 'T-beam'
self.b = breadth
self.h = height
self.f_b = flange_breadth
self.f_h = flange_height
def width(self, location):
if 0 <= location <= self.f_h:
b = self.f_b
return b
elif self.f_h < location <= self.h:
b = self.b
return b
else:
print(f'location {location} is outside of shape T-beam')
def changeParameter(self, breadth=0, height=0, flange_breadth=0, flange_height=0):
if breadth:
self.b = breadth
if height:
self.h = height
if flange_breadth:
self.f_b = flange_breadth
if flange_height:
self.f_h = flange_height
class I_beam(Shapes):
def __init__(self, breadth=0, height=0, flange_u_breadth=0, flange_u_height=0,
flange_l_breadth=0, flange_l_height=0):
self.name = 'I-beam'
self.b = breadth
self.h = height
self.fu_b = flange_u_breadth
self.fu_h = flange_u_height
self.fl_b = flange_l_breadth
self.fl_h = flange_l_height
def width(self, location):
if 0 <= location <= self.fu_h:
return self.fu_b
elif self.fu_h < location <= self.h - self.fl_h:
return self.b
elif self.h - self.fl_h < location <= self.h:
return self.fl_b
else:
print(f'Location {location} is outside of shape I-beam')
def changeParameter(self, breadth=0, height=0, flange_u_breadth=0, flange_u_height=0,
flange_l_breadth=0, flange_l_height=0):
if breadth:
self.b = breadth
if height:
self.h = height
if flange_u_breadth:
self.fu_b = flange_u_breadth
if flange_u_height:
self.fu_h = flange_u_height
if flange_l_breadth:
self.fl_b = flange_l_breadth
if flange_l_height:
self.fl_h = flange_l_height
# Run main program
if __name__ == '__main__':
a = Rectangle(16, 28)
print(a.getHeight())
test = isinstance(a, Shapes)
a.changeParameter(16., 28.)
a.giveParameters()
print(test)
| true
|
7222cb1908a1b5ef98ee5c827ad19151f97546fa
|
Python
|
tmoertel/practice
|
/misc/weighted_stream_selection.py
|
UTF-8
| 3,744
| 4.15625
| 4
|
[] |
no_license
|
"""Write a function to generate a weighted random sample from a stream."""
import random
def select_weighted_value(weighted_values):
"""Return a randomly selected value from a stream of weighted values.
Args:
weighted_values: an iterable stream of (w, x) pairs, where w is a
non-negative integer weight and x is a value.
Returns:
A randomly selected value. Each value's chance of being selected
is proportional to its corresponding weight.
Raises:
ValueError if the stream has negative weights or lacks any
positive weights.
"""
no_selection = object() # Create distinct no-selection indicator.
selected_value = no_selection
total_weight = 0
for weight, value in weighted_values:
if weight < 0:
raise ValueError('received negative weight %r' % weight)
total_weight += weight
if 0 < total_weight and random.randint(1, total_weight) <= weight:
selected_value = value
if selected_value is no_selection:
raise ValueError('there were no positively weighted values')
return selected_value
"""Proof of correctness.
Suppose for input the function is given a series of non-negatively
weighted values (w_i, x_i) for i = 1..N. Let W = sum(w_i for all i)
be greater than 0. We want to show that, when the loop exits, each
value x_i will have had a w_i/W probability of having been selected.
Our proof will proceed by induction on N.
First, the base case of N = 1. In this case, there is only one
value to select and thus the only properly weighted selection is
that value (provided that w_1 > 0, which our assumption that W > 0
implies when N = 1). When the final if-statement within the loop
is evaluated, weight = total_weight = w_1, and x_1 will always be
selected since all randomly selected integers within the range
1..w_1 are less than or equal to w_1. Thus, when N = 1, the
algorithm always returns a properly weighted selection.
As our induction hypothesis, suppose that the algorithm works for
inputs of length N - 1. Now consider inputs of length N. When the
loop finally exits, it will have just considered the final input value
x_N. At that time, total_weight will have accumulated all of the
input weights and will equal W. Therefore, when x_N was considered,
it must have been given exactly the proper w_N/W probability of having
been selected. If x_N was selected, then, the algorithm returns a
properly weighted selection. Otherwise, x_N can ruled out, and the
problem becomes equivalent to returning a properly weighted selection
from the remaining N - 1 inputs. In this case, the algorithm returns
whatever was already in selected_value. This value is the same as
what would have been returned by the algorithm had it been called on
just the first N - 1 inputs. By our induction hypothesis, this is
also a properly weighted selection. Q.E.D.
"""
def test_sampling_an_empty_set_must_raise_error():
from nose.tools import raises
raises(ValueError)(select_weighted_value)([])
def test_negative_weights_must_raise_error():
from nose.tools import raises
raises(ValueError)(select_weighted_value)([(-1, 'value')])
def test_zero_total_weight_must_raise_error():
from nose.tools import raises
raises(ValueError)(select_weighted_value)([(0, 'value')])
def test_singleton_value_must_always_be_selected():
for weight in xrange(1, 10):
assert select_weighted_value([(weight, 'value')]) == 'value'
def test_zero_weighted_value_must_never_be_selected():
for weight in xrange(1, 10):
assert select_weighted_value([(0, 'x'), (weight, 'value')]) == 'value'
assert select_weighted_value([(weight, 'value'), (0, 'x')]) == 'value'
| true
|
e90b9efe51b16d6ffe64f9337dc516f808511d52
|
Python
|
ana-balica/nosnore
|
/nosnore/core/signal.py
|
UTF-8
| 1,237
| 2.90625
| 3
|
[] |
no_license
|
import numpy as np
import pylab as pl
from scipy.io import wavfile
def getwavdata(filename):
return wavfile.read(filename)
def savewavdata(filename, rate, data):
wavfile.write(filename, rate, data)
class Signal(object):
def __init__(self, signal, time):
self._signal = signal
self._size = signal.size
self._time = time
if self._size != self._time.size:
raise ValueError("Signal dimentions should coincide")
@property
def signal(self):
return self._signal
@signal.setter
def signal(self, value):
self._signal = value
@property
def size(self):
return self._size
@size.setter
def size(self, value):
self._size = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
self._time = value
def split(self, window):
start = 0
end = window
chunks = []
while end < self.size:
try:
chunks.extend([self.signal[start:end]])
except IndexError:
chunks.extend([self.signal[start:]])
start = end
end += window
return chunks
| true
|
db1f3e3e5a8e9a322e5513fac245fe40f08c86de
|
Python
|
green-fox-academy/FulmenMinis
|
/week-03/day-03/purple_steps.py
|
UTF-8
| 436
| 3.1875
| 3
|
[] |
no_license
|
from tkinter import *
root = Tk()
canvas = Canvas(root, width='300', height='300')
canvas.pack()
# reproduce this:
# [https://github.com/greenfox-academy/teaching-materials/blob/master/workshop/drawing/purple-steps/r3.png]
#19 db 11x11 px
def drawing_function(x, y):
canvas.create_rectangle(x, y, x+11, y+11, fill = 'medium orchid')
x=11
y=11
for i in range (0,19):
drawing_function(x,y)
x+=11
y+=11
root.mainloop()
| true
|
1bdf6aa2cba5b399f340fa47b2591d70b4452ec3
|
Python
|
Dnoniel-Ermolaev/python-deep-learning-test
|
/doge_class.py
|
UTF-8
| 4,960
| 2.578125
| 3
|
[] |
no_license
|
"""
Classification sample
Command line to run:
python ie_classification_sample.py -i image.jpg \
-m squeezenet1.1.xml -w squeezenet1.1.bin -c imagenet_synset_words.txt
"""
import os
os.add_dll_directory("C:\\Program Files (x86)\\Intel\\openvino_2021.4.689\\deployment_tools\\ngraph\\lib")
os.add_dll_directory("C:\\Program Files (x86)\\Intel\\openvino_2021.4.689\\deployment_tools\\inference_engine\\external\\tbb\\bin")
os.add_dll_directory("C:\\Program Files (x86)\\Intel\\openvino_2021.4.689\\deployment_tools\\inference_engine\\bin\\intel64\\Release")
os.add_dll_directory("C:\\Program Files (x86)\\Intel\\openvino_2021.4.689\\deployment_tools\\inference_engine\\external\\hddl\\bin")
os.add_dll_directory("C:\\Program Files (x86)\\Intel\\openvino_2021.4.689\\opencv\\bin")
os.add_dll_directory("C:\\Program Files (x86)\\Intel\\openvino_2021.4.689\\python\\python3.9\\openvino\\libs")
os.add_dll_directory("C:\Program Files (x86)\Intel\openvino_2021.4.689\deployment_tools\inference_engine\include")
import os
import cv2
import sys
import argparse
import numpy as np
import logging as log
from openvino.inference_engine import IECore
sys.path.append('../lib/')
#from ie_classifier import InferenceEngineClassifier
def Sum(A,B):
sum = A+B+B+B
return sum
def test_Sum():
assert (5+4) == Sum(5,4)
class InferenceEngineClassifier:
def __init__(self, configPath = None, weightsPath = None, device = 'CPU', classesPath = None):
self.ie = IECore()
self.net = self.ie.read_network(model=configPath)
self.exec_net = self.ie.load_network(network=self.net, device_name=device)
# with open(args.labels, 'r') as f:
# labels = [line.split(',')[0].strip() for line in f]
return
def get_top(self, prob, topN = 1):
result =[]
result = np.squeeze(prob)
# Get an array of args.number_top class IDs in descending order of probability
result = np.argsort(result)[-topN :][::-1]
return result
def _prepare_image(self, image, h, w):
#image = image.transpose((2, 0, 1))
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
return image
def classify(self, image):
probabilites=None
input_blob = next(iter(self.net.inputs))
out_blob = next(iter(self.net.outputs))
n, c, h, w = self.net.inputs[input_blob].shape
image = self._prepare_image(image, h, w)
output = self.exec_net.infer(inputs = {input_blob: image})
output = output[out_blob]
return output
#def build_argparser():
# parser = argparse.ArgumentParser()
#
# parser.add_argument('-m', '--model', help='Path to an .xml file with a trained model.', required=True, type=str)
# parser.add_argument('-w', '--weights', help='Path to an .bin file with a trained weights.', required=True, type=str)
# parser.add_argument('-i', '--input', help='Path to image file', required=True, type=str)
# parser.add_argument('-d', '--device', help='Specify the target \
# device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. \
# Sample will look for a suitable plugin for device specified \
# (CPU by default)', default='CPU', type=str)
# parser.add_argument('-c', '--classes', help='File containing classes \
# names', type=str, default=None)
# return parser
def build_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', help='Path to an .xml file with a trained model.', default='C:\\Users\\ermol\\public\\squeezenet1.1\\FP16\\squeezenet1.1.xml', type=str)
parser.add_argument('-w', '--weights', help='Path to an .bin file with a trained weights.', default='C:\\Users\\ermol\\public\\squeezenet1.1\\FP16\\squeezenet1.1.bin', type=str)
parser.add_argument('-i', '--input', help='Path to image file', default='C:\\Users\\ermol\\PycharmProjects\\practice\\monkey.jpeg', type=str)
parser.add_argument('-d', '--device', help='Specify the target \
device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. \
Sample will look for a suitable plugin for device specified \
(CPU by default)', default='CPU', type=str)
parser.add_argument('-c', '--classes', help='File containing classes \
names', type=str, default=None)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s",level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info("Start IE classification sample")
ie_classifier = InferenceEngineClassifier(configPath=args.model,weightsPath=args.weights, device=args.device,classesPath=args.classes)
img = cv2.imread(args.input)
prob = ie_classifier.classify(img)
predictions = ie_classifier.get_top(prob, 5)
log.info("Predictions: " + str(predictions))
return
if __name__ == '__main__':
sys.exit(main())
| true
|
c5179166b150a9223fb60df750fbf27360af60ca
|
Python
|
3enoit3/tools
|
/gen/gen.py
|
UTF-8
| 1,051
| 2.96875
| 3
|
[] |
no_license
|
# Input
def split_lines(iInput):
return [l.rstrip("\n") for l in iInput.split("\n") if l]
def merge_lines(iInput, iLines = 1):
l = split_lines(iInput)
o = []
while l:
o.append( ' '.join(l[:iLines]) )
del l[:iLines]
return o
# Output
def as_input():
return lambda s, c: s
def split_input(i = 0):
return lambda s, c: s.split()[i]
def optional_input(iVar, iIf, iElse = ""):
return lambda s, c: iIf.format(**c) if iVar in c else iElse
class LineGenerator:
def __init__(self, iFormat = "{}", iParams = []):
self._format = iFormat
self._params = iParams
def process(self, s, c):
aParams = []
if self._params:
aParams = [ f(s, c) for f in self._params ]
else:
aParams = [s]
return self._format.format(*aParams, **c)
# Apply
def generate_lines(iInputLines, iGenerators, iContext={}):
aGenerated = []
for s in iInputLines:
aGenerated += [ g.process(s, iContext) for g in iGenerators ]
return aGenerated
| true
|
69a0eee3e8f4444352d47cb1aba84b828e0eb622
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2916/58575/304882.py
|
UTF-8
| 1,317
| 2.625
| 3
|
[] |
no_license
|
n=int(input())
nums=list(map(int,input().split(" ")))
deleteNumber=0
i=len(nums)-1
while i>=5:
if nums[i]==42:
judge=False
j=i-1
while j>=4 and judge==False:
if nums[j]==23:
k=j-1
while k>=3 and judge==False:
if nums[k]==16:
l=k-1
while l>=2 and judge==False:
if nums[l]==15:
m=l-1
while m>=1 and judge==False:
if nums[m]==8:
t=m-1
while t>=0 and judge==False:
if nums[t]==4:
nums=nums[0:t]+nums[t+1:m]+nums[m+1:l]+nums[l+1:k]+nums[k+1:j]+nums[j+1:i]
judge=True
t-=1
m-=1
l-=1
k-=1
j-=1
if judge==False:
deleteNumber+=1
nums=nums[0:-1]
else:
deleteNumber+=1
nums=nums[0:-1]
i=len(nums)-1
deleteNumber+=len(nums)
print(deleteNumber)
| true
|
5c418bbcc2250748f3191141b20399318b86e8a9
|
Python
|
jkuzm/2020repo
|
/pythonViaPycharm1/testDecorator.py
|
UTF-8
| 1,918
| 4.0625
| 4
|
[] |
no_license
|
def fib_gen(limit):
i,a,b =0,0,1
while(i < limit):
yield a
a,b = b,a+b
i += 1
for i in fib_gen(10):
print(i, end= " ")
print()
#simplest decorator sample from https://medium.com/@dmi3coder/pythons-decorators-vs-java-s-annotations-same-thing-2b1ef12e4dc5
def as_html(func):
def wrapper():
result = func()
return f'<html>{result}</html>'
return wrapper
@as_html
def say_hello():
return 'Hello'
print(say_hello())
#now, from there, cached decorator to demonstrate function not called if cached,
#but I'll check what I can't get, if parameter matters, so it's different than sample
import time
cached_items = {}
def cached(func):
def wrapper(*args, **kwargs):
global cached_items
if func.__name__ not in cached_items:
cached_items[func.__name__] = func(*args,**kwargs)
return cached_items[func.__name__]
return wrapper
@cached
def my_task(n):
time.sleep(1.0)
return n*2;
start_time = time.time()
res = my_task(10)
print("--- %.8f seconds first execution producing result %d" % ((time.time()-start_time), res))
start_time = time.time()
res = my_task(20) #yes, I was right, cached doesn't count for arg, so result is wrong
print("--- %.8f seconds first execution producing result %d" % ((time.time()-start_time), res))
start_time = time.time()
res = my_task(10)
print("--- %.8f seconds first execution producing result %d" % ((time.time()-start_time), res))
#let's get cleared usage of *args
#* means variable length of args, args becomes iterable
def print_all(*args):
for i in args:
print(i,end=' ')
print_all('We','will','survive!')
print()
#key-worded args: ** - a special sign to take them that way
def print_keys_vals(**kwargs):
for key,val in kwargs.items():
print("{}:{} ".format(key,val))
print_keys_vals(first = 'We',second = 'will',third = 'survive!')
print()
| true
|
2f3ce1a3c8eb8c836d969001bd89eb41e1e882e8
|
Python
|
geverartsdev/TechnofuturTIC
|
/Flo et toto/Pong/components/score.py
|
UTF-8
| 1,019
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
import pygame
from pygame.locals import Color
from Pong.constants import ECRAN
class Score(pygame.sprite.Sprite):
color = Color('green')
def __init__(self, left):
pygame.sprite.Sprite.__init__(self, self.containers)
self.font = pygame.font.Font(None, 30)
self.font.set_italic(1)
self.valeur = 0
self.modif = True
self.update()
if left == True :
self.rect = self.image.get_rect(right=ECRAN.width/2 - 40, top=10)
self.color = Color('red')
else:
self.rect = self.image.get_rect(left=ECRAN.width/2 + 40, top=10)
self.color = Color('blue')
self.modif = True
self.update()
def point(self):
self.valeur += 1
self.modif = True
def score(self):
return self.valeur
def update(self):
if self.modif:
msg = "Score: " + str(self.valeur)
self.image = self.font.render(msg, 0, self.color)
self.modif = False
| true
|
d65157b5d744d19af3752806b7f1049f1deadab8
|
Python
|
ahndroo/Udemy
|
/SupervisedML/util.py
|
UTF-8
| 1,335
| 3.171875
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
def get_train_data(limit = None):
print('Reading in and transforming data...')
df = pd.read_csv('MNISTtrain.csv')
data = df.as_matrix()
np.random.shuffle(data)
X = data[:,1:] / 255.0 # normalize data
Y = data[:,0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def get_xor():
X = np.zeros((200,2))
X[:50] = np.random.random((50,2)) / 2 + .5 # (0.5-1, 0.5-1)
X[50:100] = np.random.random((50,2)) / 2 # (0-0.5, 0-0.5)
X[100:150] = np.random.random((50,2)) /2 + np.array([[0, .5]]) # (0-0.5, 0.5-1)
X[150:] = np.random.random((50,2)) /2 + np.array([[.5, 0]]) # (0.5-1, 0-0.5)
Y = np.array([0]*100 + [1]*100)
return X, Y
def get_donut():
N = 200
R_inner = 5
R_outer = 10
div = int(N/2)
# dist from origin is radius + random normalize
# angle theta is uniformly dist. between (0,2pi)
R1 = np.random.randn(div) + R_inner
theta = 2*np.pi*np.random.random(div)
X_inner = np.concatenate([[R1*np.cos(theta)], [R1*np.sin(theta)]]).T
R2 = np.random.randn(div) + R_outer
theta = 2*np.pi*np.random.random(div)
X_outer = np.concatenate([[R2*np.cos(theta)], [R2*np.sin(theta)]]).T
X = np.concatenate([X_inner, X_outer])
Y = np.array([0]*div + [1]*div)
return X, Y
| true
|
035d72045751f6f3bb66094db564b5b45431446f
|
Python
|
hehaiyang111/MLAlgorithm
|
/apriori/analysisByapriori.py
|
UTF-8
| 564
| 2.59375
| 3
|
[] |
no_license
|
import pandas as pd
from apriori import *
# inputFile
inputFile = './menu_orders.xls'
outputFile = './apriori_rules.xls' # ็ปๆ
# ่ฏปๅๆฐๆฎ
data = pd.read_excel(inputFile,header=None)
# ๆไธไธบ็ฉบ็ๆฐๆฎ่ฎพ็ฝฎไธบ1
ct = lambda x : pd.Series(1, index=x[pd.notnull(x)])
b = map(ct,data.as_matrix())
# ๅฎ็ฐ็ฉ้ต่ฝฌๆข ็ฉบๅผ็จ0่กฅๅ
data = pd.DataFrame(list(b)).fillna(0)
del b # ๅ ้คไธญ้ดๅ้
# ่ฎพ็ฝฎๆๅฐ้ๅผ๏ผๆฏๆๅบฆใ็ฝฎไฟกๅบฆ๏ผ
support = 0.2
confidence = 0.5
ms='---'
find_rule(data,support,confidence,ms).to_excel(outputFile)
| true
|
33d5c074d7b9d862bf0b524dde91030d8efc4697
|
Python
|
rajlath/rkl_codes
|
/CodeForces/distance.py
|
UTF-8
| 325
| 3.515625
| 4
|
[] |
no_license
|
a = 10
b = 20
weakness = 0
if abs(a - b) == 1: print(1)
elif abs(a - b) == 2: print(2)
elif a == b:print(0)
else:
weakness = 0
while True:
a+=1
b-=1
weakness += 2
if a == b:
break
if abs(a - b) == 1:
weakness += 2
break
print(weakness)
| true
|
c82d9af22d63201f54608c413d335cc0b2997f90
|
Python
|
ogosborne/fasta_alignment_filters
|
/cat_alns.py
|
UTF-8
| 2,499
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/python2
from Bio import AlignIO
import glob
from Bio.Align import MultipleSeqAlignment
from Bio.SeqRecord import SeqRecord
import argparse
import sys
parser = argparse.ArgumentParser(usage = 'python2 cat_alns.py -o STR -t STR [-i STR -h]\n\nPython 2 only\n\nRequires: Bio, glob, argparse\n\nThis program concatenates multiple fasta alignments with missing sequences into a single fasta alignment. You must provide the taxa you want to keep in a comma-delimited list, if they are missing from input alignments they are replaced by "N"s. The script must be run from within the input fasta directory\n\nAuthor: Owen G Osborne\n\n')
parser.add_argument("-i", "--input_extension", help="input file extension, e.g fa. Files must be in current working directory", default="fas")
parser.add_argument("-o", "--outfile", required=True, help="path to output file")
parser.add_argument("-t", "--taxa", help="comma delimited taxon names", required=True, type=str)
args = parser.parse_args()
files = '{0}{1}'.format("*.",args.input_extension)
outfile = args.outfile
taxa = args.taxa
taxa = taxa.split(",")
numfiles = len(glob.glob(files))
numtaxa = len(taxa)
line1 = '{0} fasta files and {1} taxa found, alignments will be concatenated and written to {2}\n'.format(numfiles, numtaxa, outfile)
print(line1)
if numfiles > 0:
cataln = MultipleSeqAlignment([])
for taxon in taxa:
cataln.add_sequence(taxon, "") # make alignment with all required taxa
for fasta in glob.glob(files):
fastaname = fasta.split('/')[-1] # get fasta name without path
aln = AlignIO.read(fasta, "fasta") # extract alignment from fasta
seqLen = aln.get_alignment_length()
newaln = MultipleSeqAlignment([])
seq = "X"
for catrec in cataln: # for each taxon
catid = str(catrec.id)
for rec in aln:
if str(rec.id) == catid: # find sequence in fasta alignment if it's there
seq = str(rec.seq)
if seq == "X":
seq = ("N" * seqLen) # if not make a sequence of Ns of the correct length
catseq = str(catrec.seq)+seq # concatenate the previously concatenate sequences and the new sequence
newaln.add_sequence(catid,catseq) # add to a new alignment
seq = "X"
cataln = newaln # update the concatenated alignment
f = open (outfile,'w')
AlignIO.write(cataln,f,"fasta") # write the concatenated alignment to outfile
print "Finished"
sys.exit()
else:
print "\nNo fasta files found, check working directory contains fasta files and --input_extension is set correctly\n"
sys.exit()
| true
|
242a06cc55e47a16a02d345207eb79d4883539c5
|
Python
|
andrejcermak/twitter_downloader
|
/dat.py
|
UTF-8
| 6,750
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
import json
import oauth2 as oauth
import time
import re
import pandas as pd
from collections import defaultdict
'''
Script that downloads tweets.
user_timeline() downloads 200 tweets for given user
my_timeline() downloads 200 tweets from clients timeline
searchApi() downloads 200 tweets for given query
'''
def user_timeline(user_name, min_id):
timeline_endpoint = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=" + user_name + \
"&count=200&since_id=" + str(
min_id) + ""
return result(timeline_endpoint)
def my_timeline():
timeline_endpoint = "https://api.twitter.com/1.1/statuses/home_timeline.json?trim_user=false"
return result(timeline_endpoint)
def searchApi(query, min_id):
timeline_endpoint = "https://api.twitter.com/1.1/search/tweets.json?q=" + query + \
"&lang=en&result_type=recent&count=200&since_id=" + str(min_id) + "&lang=en"
return result(timeline_endpoint)
def result(timeline_endpoint):
global client
response, data = client.request(timeline_endpoint)
tweets = json.loads(data)
return tweets
def exclude_link(str):
i = str.find("http")
return str[:i]
def exclude_emoji(s):
l = list(s)
df = pd.DataFrame({'phrases': [s]})
for i in range(0, len(s)):
for emoji in re.findall(u'[\U0001f300-\U0001f650]|[\u2000-\u3000]', s[i]):
l[i] = " "
# print emoji.encode('unicode-escape')
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
return ''.join(l)
# format pre datetime
def formTime(t):
mon = t[4:7]
day = t[8:10]
tt = t[11:19]
time_zone = t[20:25]
year = t[26:30]
stime = time.strptime(mon, "%b")
mon = stime.tm_mon
mon = "0" + str(mon)
tt = year + "-" + mon[-2:] + "-" + day + " " + tt
return tt
# tento search pozrie do databazy na najnovsie tweet_id a za neho bude hladat
def search_by_user(db, cur, user):
cur.execute("SELECT user_name,last_tweet_id FROM search_by_user WHERE user_name LIKE '%" + user + "%'")
# vyberie z databazy name, last_tweet_id, vrati ho ako slovnik kde klucom je meno, hodnotou je id
a = dict(cur.fetchall())
min_id = 1
if a == {}:
cur.execute("INSERT INTO search_by_user(user_name, user_id, last_tweet_id) VALUES(%s,%s,%s)", (user, "0", "0"))
else:
min_id = a[user]
max_id = 0
query_id = -1
tweets = user_timeline(user, min_id)
tweet_list = []
for tweet in tweets:
tweet_list.append(tweet)
if not tweets == []:
# stiahne tweety a ulozi ich do tab (databazy)
for tweet in reversed(tweet_list):
name = tweet['user']['name']
i = tweet['user']['id']
t = tweet['created_at']
retweet_count = tweet['retweet_count']
favorite_count = tweet['favorite_count']
text = exclude_link(tweet['text'])
name = exclude_emoji(name)
text = exclude_emoji(text)
print text
tweet_id = tweet['id']
t = formTime(t)
max_id = max(tweet_id, max_id)
min_id = min(tweet_id, min_id)
cur.execute(
"INSERT INTO tab (retweet_count, favorite_count,user_name, user_id,text, date,tweet_id,company_id) \
VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
(retweet_count, favorite_count, name, i, text, t, tweet_id, query_id))
db.commit()
# ulozi najnovsie last_tweet_id a user_id, 2. uklada len preto, ze ak pred tymto searchom sme
# takehoto usera nemali, tak nemame ani jeho idcko- cize ho potrebujeme ulozit
cur.execute("UPDATE search_by_user SET user_id = '" + str(i) + "', last_tweet_id = '" + str(
max_id) + "' WHERE user_name = '" + user + "'")
db.commit()
else:
cur.execute(
"DELETE FROM search_by_user WHERE user_name = '" + user + "' and user_id = 0 and last_tweet_id = 0 limit 1")
def search_by_query(db, cur, query):
cur.execute("SELECT * FROM search_by_query WHERE search_query LIKE '%" + query + "%'")
a = cur.fetchall()
min_id = 1
max_id = 0
if a == ():
cur.execute(
"SELECT `AUTO_INCREMENT` FROM INFORMATION_SCHEMA.TABLES \
WHERE TABLE_SCHEMA = 'tweetdata' and table_name \= 'search_by_query';")
db.commit()
temp = cur.fetchall()
query_id = temp[0][0]
cur.execute("INSERT INTO search_by_query(search_query, last_tweet_id) VALUES(%s,%s)", (query, "0"))
db.commit()
else:
print a
min_id = a[2]
query_id = a[0]
tweets = searchApi(query, min_id)
tweet_list = []
for tweet in tweets['statuses']:
tweet_list.append(tweet)
if not tweet_list == []:
for tweet in reversed(tweet_list):
name = tweet['user']['name']
i = tweet['user']['id']
t = tweet['created_at']
retweet_count = tweet['retweet_count']
favorite_count = tweet['favorite_count']
text = exclude_link(tweet['text'])
name = exclude_emoji(name)
print text, name
text = exclude_emoji(text)
tweet_id = tweet['id']
t = formTime(t)
max_id = max(tweet_id, max_id)
min_id = min(tweet_id, min_id)
command = u"INSERT INTO tab (retweet_count, favorite_count,user_name, user_id,text, date, tweet_id,company_id)\
VALUES(%s,%s,%s,%s,%s,%s,%s,%s)"
cur.execute(command, (retweet_count, favorite_count, name, i, text, t, tweet_id, query_id))
db.commit()
# ulozi najnovsie last_tweet_id a user_id, 2. uklada len preto, ze ak pred tymto searchom sme takehoto usera
# nemali, tak nemame ani jeho idcko- cize ho potrebujeme ulozit
cur.execute(
"UPDATE search_by_query SET last_tweet_id = '" + str(max_id) + "' WHERE search_query = '" + query + "'")
db.commit()
else:
cur.execute("DELETE FROM search_by_query WHERE search_query = '" + query + "' and last_tweet_id = 0 limit 1")
# access to twitter API
ckey = 'aw5e8E7vYZubrrn4Z3FxPgvwL'
csecret = 'sLPu7sykydvGnX1flxN27p26AMH5VhOALR6tqzAJ1kMfSPAntO'
atoken = '882898101740163076-8CSSz9TmOb0IpFQkoSrVlknUhRuGv8G'
asecret = '367tQAoU08JpWP3RFSxgyI6s6DDWV3C9msqRmbtQmKquI'
consumer = oauth.Consumer(key=ckey, secret=csecret)
access_token = oauth.Token(key=atoken, secret=asecret)
client = oauth.Client(consumer, access_token)
# tweets=user_timeline("Reuters", 1)
# access to database in MySql
db = MySQLdb.connect(host="localhost", user="andrej", passwd="password", db="tweetdata", use_unicode=True,
charset='utf8mb4')
cur = db.cursor()
search_by_user(db, cur, "@NASA")
db.close()
| true
|
85dfa01c7d7e90744f2d3807de29f1e91c5317eb
|
Python
|
searchlink/kr_data
|
/A่กๆๆฐๅๆตๅๆ็ณป็ป/H่ก_ๅค็.py
|
UTF-8
| 8,576
| 2.53125
| 3
|
[] |
no_license
|
import time,datetime
import talib
# import numpy as np
import pandas as pd
from KRData import CNData ,HKData
# np.seterr(divide='ignore', invalid='ignore')
#
# pd_display_rows = 10
# pd_display_cols = 100
# pd_display_width = 1000
# pd.set_option('display.max_rows', 10000)
# pd.set_option('display.max_columns', 100)
# pd.set_option('display.width', 100)
# pd.set_option('display.max_colwidth', 120)
# pd.set_option('display.unicode.ambiguous_as_wide', True)
# pd.set_option('display.unicode.east_asian_width', True)
# pd.set_option('expand_frame_repr', False)
# pd.set_option('display.float_format', lambda x: '%.4f' % x)
def get_h_stock(s_time="20200125",e_time="20210126"):
# a = time.time_ns()
hks = HKData.HKStock(data_type='df', adjust='qfq')
df0 = hks[s_time:e_time]
# print(df0.tail())
# print(df0.shape)
# print((time.time_ns() - a) / 1000000000, "s")
df0 = df0.reset_index(drop=True)
return df0
def get_hk_stock_infos(path=r"Table.csv"):
df_hk_infos = pd.read_csv(path, encoding="GBK")
# print(df_hk_infos.keys())
# ['ไปฃ็ ', 'ๅ็งฐ ', 'ๆ็่กไธ(ไธ็บง)']
df_hk_infos = df_hk_infos[["code" , "name" , "industry"]]
# df_hk_infos.columns = ['code', 'ๅ็งฐ', '่กไธๅ']
return df_hk_infos
def MA_Sys(df):
v = df.copy()
v["MA120"] = talib.MA(v["close"], 120)
con120 = v["close"] > v["MA120"]
con120_2 = v["close"] < v["MA120"]
v.loc[v["MA120"].isnull(), "ๆฏๅฆๅคงไบ120ๅ็บฟ"] = 0
v.loc[con120, "ๆฏๅฆๅคงไบ120ๅ็บฟ"] = 1
v.loc[con120_2, "ๆฏๅฆๅคงไบ120ๅ็บฟ"] = -1
con120_3 = v["ๆฏๅฆๅคงไบ120ๅ็บฟ"] != v["ๆฏๅฆๅคงไบ120ๅ็บฟ"].shift()
v.loc[con120_3, "ๅคงไบ120ๅ็บฟๅผๅงๆถ้ด"] = v.loc[con120_3, "date"]
v["ๅคงไบ120ๅ็บฟๅผๅงๆถ้ด"].fillna(method="ffill", inplace=True)
g120 = v.groupby(["ๅคงไบ120ๅ็บฟๅผๅงๆถ้ด"]).get_group(v.iloc[-1]["ๅคงไบ120ๅ็บฟๅผๅงๆถ้ด"])
v.loc[v.index[-1], "ๆฏๅฆๅคงไบ120ๅ็บฟ"] = g120["ๆฏๅฆๅคงไบ120ๅ็บฟ"].sum()
v["MA60"] = talib.MA(v["close"], 60)
con60 = v["close"] > v["MA60"]
con60_2 = v["close"] < v["MA60"]
v.loc[v["MA60"].isnull(), "ๆฏๅฆๅคงไบ60ๅ็บฟ"] = 0
v.loc[con60, "ๆฏๅฆๅคงไบ60ๅ็บฟ"] = 1
v.loc[con60_2, "ๆฏๅฆๅคงไบ60ๅ็บฟ"] = -1
con60_3 = v["ๆฏๅฆๅคงไบ60ๅ็บฟ"] != v["ๆฏๅฆๅคงไบ60ๅ็บฟ"].shift()
v.loc[con60_3, "ๅคงไบ60ๅ็บฟๅผๅงๆถ้ด"] = v.loc[con60_3, "date"]
v["ๅคงไบ60ๅ็บฟๅผๅงๆถ้ด"].fillna(method="ffill", inplace=True)
g60 = v.groupby(["ๅคงไบ60ๅ็บฟๅผๅงๆถ้ด"]).get_group(v.iloc[-1]["ๅคงไบ60ๅ็บฟๅผๅงๆถ้ด"])
v.loc[v.index[-1], "ๆฏๅฆๅคงไบ60ๅ็บฟ"] = g60["ๆฏๅฆๅคงไบ60ๅ็บฟ"].sum()
v["MA20"] = talib.MA(v["close"], 20)
con20 = v["close"] > v["MA20"]
con20_2 = v["close"] < v["MA20"]
v.loc[v["MA20"].isnull(), "ๆฏๅฆๅคงไบ20ๅ็บฟ"] = 0
v.loc[con20, "ๆฏๅฆๅคงไบ20ๅ็บฟ"] = 1
v.loc[con20_2, "ๆฏๅฆๅคงไบ20ๅ็บฟ"] = -1
con20_3 = v["ๆฏๅฆๅคงไบ20ๅ็บฟ"] != v["ๆฏๅฆๅคงไบ20ๅ็บฟ"].shift()
v.loc[con20_3, "ๅคงไบ20ๅ็บฟๅผๅงๆถ้ด"] = v.loc[con20_3, "date"]
v["ๅคงไบ20ๅ็บฟๅผๅงๆถ้ด"].fillna(method="ffill", inplace=True)
g20 = v.groupby(["ๅคงไบ20ๅ็บฟๅผๅงๆถ้ด"]).get_group(v.iloc[-1]["ๅคงไบ20ๅ็บฟๅผๅงๆถ้ด"])
v.loc[v.index[-1], "ๆฏๅฆๅคงไบ20ๅ็บฟ"] = g20["ๆฏๅฆๅคงไบ20ๅ็บฟ"].sum()
return v
def update_h_stocks():
df_infos = get_hk_stock_infos(path=r"data/hk_info.csv")
# print(df_infos.tail())
s_time = (datetime.datetime.now()-datetime.timedelta(days=360)).strftime("%Y%m%d")
e_time = datetime.datetime.now().strftime("%Y%m%d")
df_hk = get_h_stock(s_time=s_time, e_time=e_time)
# print(df_hk.tail())
# df_hk = pd.read_csv(r"200_H่ก_kline.csv",encoding="utf-8")
df_hk = df_hk[['code', 'datetime', 'open', 'high', 'low', 'close', 'volume']]
df_hk.columns = ['code', 'date', 'open', 'high', 'low', 'close', 'volume']
df_hk.reset_index(drop=True, inplace=True)
# print(df_hk_zong.keys())
# print(df_hk_zong.tail(300))
#
# exit()
df_hk_zong = pd.DataFrame()
df_hk.fillna(0, inplace=True)
for k, v in df_hk.groupby("code"):
if v.iloc[-1]["close"] <= 0.1:
continue
# if int(k) > 100:break
df0 = MA_Sys(df=v)
# print("code:", k)
df0["date0"] = pd.to_datetime(df0["date"])
df0 = df0[df0["date0"] >= pd.to_datetime(e_time) - datetime.timedelta(days=30)].reset_index()
df_hk_zong = df_hk_zong.append(df0, ignore_index=True)
# print(df0)
# exit()
df_hk_zong['code'] = df_hk_zong['code'].astype(str)
df_infos['code'] = df_infos['code'].astype(str)
df_infos["code"] = df_infos["code"].str.zfill(5)
# print(df_infos.tail(20))
df_hk_zong = df_hk_zong.merge(df_infos, how="left", on="code")
df_hk_zong = df_hk_zong[['date', 'code', 'name', 'industry', 'MA120', 'MA20', 'MA60',
'close', 'ๆฏๅฆๅคงไบ20ๅ็บฟ', 'ๆฏๅฆๅคงไบ60ๅ็บฟ', 'ๆฏๅฆๅคงไบ120ๅ็บฟ']]
df_hk_zong = df_hk_zong.round(3)
df_hk_zong.to_csv(r"data/H่ก_show_table.csv", mode="w")
# print(df_hk_zong.keys())
print(df_hk_zong.tail(10))
print(df_hk_zong.shape)
return "ok"
#
if __name__ =='__main__':
import pandas as pd
hks = CNData.CNStock(data_type='df', adjust='qfq')
# print(hks.all_codes)
# exit()
df0 = hks["20050101":"20210301"]
df0 = pd.DataFrame(df0)
print(df0.head(100))
print(df0.tail(100))
df0.to_pickle("2005-2021-ASTOCKS.pkl")
# update_h_stocks()
# if True == 0:
# # a =time.time_ns()
# # cns = CNData.CNStock(data_type='df', adjust='qfq')
# # df = cns["20200126":"20210126"]
# # print(df.tail())
# # print(df.shape)
# # df.to_csv("200_A่ก_kline.csv",mode="w")
# # print((time.time_ns()-a)/1000000000,"s")
# # exit()
# df_infos = get_hk_stock_infos(path=r"data/hk_info.csv")
# # df_infos['code'] = df_infos['code'].astype(str)
# #
# # df_infos["code"] = df_infos["code"].str.zfill(6)
# #
# print(df_infos.tail())
# # exit()
# # print(df_infos)
# # exit()
# s_time = "20200128"
# e_time = "20210202"
# df_hk = get_h_stock(s_time=s_time,e_time=e_time)
# print(df_hk)
# # df_hk = pd.read_csv(r"200_H่ก_kline.csv",encoding="utf-8")
# df_hk = df_hk[['code', 'datetime', 'open', 'high', 'low', 'close', 'volume']]
# df_hk.columns = ['code', 'date', 'open', 'high', 'low', 'close', 'volume']
# df_hk.reset_index(drop=True,inplace=True)
# # print(df_hk_zong.keys())
# # print(df_hk_zong.tail(300))
# #
# # exit()
# df_hk_zong = pd.DataFrame()
# df_hk.fillna(0,inplace=True)
# for k , v in df_hk.groupby("code"):
# if v.iloc[-1]["close"] <= 0.1 :
# continue
# # if int(k) > 100:break
# df0 = MA_Sys(df=v)
# print("code:",k)
# df0["date0"] = pd.to_datetime(df0["date"])
# df0 = df0[df0["date0"] >= pd.to_datetime(e_time)-datetime.timedelta(days=30)].reset_index()
# df_hk_zong = df_hk_zong.append(df0,ignore_index=True)
# # print(df0)
# # exit()
# df_hk_zong['code'] = df_hk_zong['code'].astype(str)
# df_infos['code'] = df_infos['code'].astype(str)
# df_infos["code"] = df_infos["code"].str.zfill(5)
# # print(df_infos.tail(20))
# df_hk_zong = df_hk_zong.merge(df_infos,how="left",on="code")
#
# df_hk_zong = df_hk_zong[[ 'date','code', 'name', 'industry','MA120', 'MA20', 'MA60',
# 'close','ๆฏๅฆๅคงไบ20ๅ็บฟ', 'ๆฏๅฆๅคงไบ60ๅ็บฟ', 'ๆฏๅฆๅคงไบ120ๅ็บฟ']]
# df_hk_zong = df_hk_zong.round(3)
# df_hk_zong.to_csv("H่ก_show_table.csv",mode ="w")
#
# print(df_hk_zong.tail(10))
# print(df_hk_zong.keys())
#
# print(df_hk_zong.shape)
# exit()
# # df0.to_csv()
#
# if True == 0:
#
# df = pd.read_csv(r"F:\vnpy_my_gitee\company\A่ก็ฅจ_company\A่ก่กไธๅผบๅบฆ็ญ้\H_stocks_info\H่ก_show_table.csv")
# df = df[[ 'date','code', 'ๅ็งฐ', '่กไธๅ','MA120', 'MA20', 'MA60', 'close','ๆฏๅฆๅคงไบ20ๅ็บฟ', 'ๆฏๅฆๅคงไบ60ๅ็บฟ', 'ๆฏๅฆๅคงไบ120ๅ็บฟ',]]
# df.to_csv(r"F:\vnpy_my_gitee\company\A่ก็ฅจ_company\A่ก่กไธๅผบๅบฆ็ญ้\H_stocks_info\H่ก_show_table.csv")
| true
|
c7c49f527ad1093e5b1010060e97f6c9de63fe47
|
Python
|
cchaisson/Python-Challenge
|
/PyPoll/PyPoll_Hmwk.py
|
UTF-8
| 1,338
| 3.4375
| 3
|
[] |
no_license
|
#Import the os module
import os
#Module for reading CSV files
import csv
csvpath = os.path.join('..','election_data.csv')
#Lists to store data
candidate=[]
total=0
khan=0
correy=0
li=0
otooley=0
with open(csvpath,newline="") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
csvheader = next(csvfile)
#Read through each row of data after the header
for row in csvreader:
total+=1
if row[2]=="Khan":
khan+=1
elif row[2]=="Correy":
correy+=1
if row[2]=="Li":
li+=1
elif row[2]=="O'Tooley":
otooley+=1
khanprop = round(khan/total*100, 0)
correyprop = round(correy/total*100, 0)
liprop = round(li/total*100, 0)
otooleyprop = round(otooley/total*100, 0)
candidates = ["Khan", "Correy", "Li", "O'Tooley"]
cand_props=[khan,correy,li,otooley]
zipper=[(k,r) for k, r in zip(candidates, cand_props)]
winner=[k for k,r in zipper if r==max(cand_props)]
#Write Script that exports results into a text file
print("Election Results")
print("-------------------")
print("Total Votes: "+str(total))
print("Khan: "+str(khanprop)+"%"+" "+str(khan))
print("Corry: "+str(correyprop)+"%"+" "+str(correy))
print("Li: "+str(liprop)+"%"+" "+str(li))
print("O'Tooley: "+str(otooleyprop)+"%"+" "+str(otooley))
print("Winner "+str(winner))
| true
|
83917516318f1633f3cba4f3bbad639b77bc6d02
|
Python
|
gabalese/tweet-a-book
|
/test/test_pyepub.py
|
UTF-8
| 3,633
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
import unittest
import urllib2
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from src.epub import EPUB
class EpubNewTests(unittest.TestCase):
def setUp(self):
remotefile = urllib2.urlopen('http://dev.alese.it/book/urn:uuid:c72fb312-f83e-11e2-82c4-001cc0a62c0b/download')
testfile = NamedTemporaryFile(delete=True)
testfile.write(remotefile.read())
testfile.seek(0)
self.file = EPUB(testfile)
def test_metadata(self):
self.assertEqual(len(self.file.info.manifest), 31)
self.assertGreaterEqual(len(self.file.info), 3)
if len(self.file.info) > 3:
self.assertIsInstance(self.file.info.spine, list)
def test_writetodisk(self):
tmp = NamedTemporaryFile(delete=True)
self.file.writetodisk(tmp)
self.assertIsNot(tmp.name, None)
class EpubTests(unittest.TestCase):
def setUp(self):
# get a small epub test file as a file-like object
self.epub2file = NamedTemporaryFile(delete=True)
test_file_content = urllib2.urlopen(
'http://dev.alese.it/book/urn:uuid:d928ac1a-f3c3-11e2-94df-001cc0a62c0b/download')
self.epub2file.write(test_file_content.read())
self.epub2file.seek(0)
# get an epub with no guide element
self.epub2file2 = NamedTemporaryFile(delete=True)
test_file_content2 = urllib2.urlopen('http://dev.alese.it/book/EO_EB_00001/download')
self.epub2file2.write(test_file_content2.read())
self.epub2file2.seek(0)
def test_instantiation(self):
epub = EPUB(self.epub2file)
self.assertNotEqual(epub.filename, None)
self.assertEqual(len(epub.opf), 4)
self.assertEqual(len(epub.opf[0]), 15) # metadata items
self.assertEqual(len(epub.opf[1]), 45) # manifest items
self.assertEqual(len(epub.opf[2]), 35) # spine items
self.assertEqual(len(epub.opf[3]), 35) # guide items
def test_addpart(self):
epub = EPUB(self.epub2file, mode='a')
self.assertNotEqual(epub.filename, None)
part = StringIO('<?xml version="1.0" encoding="utf-8" standalone="yes"?>')
epub.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2)
self.assertEqual(len(epub.opf[2]), 36) # spine items
def test_addpart_noguide(self):
epub2 = EPUB(self.epub2file2, mode='a')
self.assertEqual(len(epub2.opf), 3)
self.assertEqual(epub2.info['guide'], None)
num_spine_items = len(epub2.opf[2])
part = StringIO('<?xml version="1.0" encoding="utf-8" standalone="yes"?>')
epub2.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2)
self.assertEqual(len(epub2.opf[2]), num_spine_items + 1) # spine items
def test_addmetadata(self):
epub = EPUB(self.epub2file, mode='a')
epub.info["metadata"]["dc:test"] = "GOOD"
epub.info["metadata"]['dc:prova'] = {"token": "token_content"}
epub.info["metadata"]['dc:prova'] = "contenuto"
self.assertTrue(epub.opf.find('.//{http://purl.org/dc/elements/1.1/}test') is not None)
self.assertEqual(epub.info.metadata['dc:test'].text, 'GOOD')
self.assertEqual(epub.info["metadata"]['dc:prova'].attrib, {"token": "token_content"})
self.assertEqual(epub.info["metadata"]['dc:prova'].text, "contenuto")
self.assertEqual(epub.opf.find(".//{http://purl.org/dc/elements/1.1/}prova").text, "contenuto")
self.assertEqual(epub.opf.find(".//{http://purl.org/dc/elements/1.1/}prova").attrib["token"], "token_content")
if __name__ == '__main__':
unittest.main()
| true
|
4239e00230c17a3d01c0adfc4057eac587c97da7
|
Python
|
muftring/iu-python
|
/module-07/Question2.py
|
UTF-8
| 1,509
| 4.40625
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
#
# Michael Uftring, Indiana University
# I590 - Python, Summer 2017
#
# Assignment 7, Question 2
#
# A program which promts the user for a list of numbers, then determines if
# the list is ordered (ascending or descending) and displays the result.
#
import math
#
""" isAscend(nums): check whether numbers in a list are sorted in ascending order, returns True/False """
#
def isAscend(nums):
result = True
prev = -1 * math.inf
for curr in nums:
if curr >= prev:
prev = curr
else:
result = False
break
return result
#
""" isDescend(nums): check whether numbers in a list are sorted in descending order, returns True/False """
#
def isDescend(nums):
result = True
prev = math.inf
for curr in nums:
if curr <= prev:
prev = curr
else:
result = False
break
return result
#
""" getNumbers(): prompt user to enter a list of numbers (separated by spaces), returns a list"""
#
def getNumbers():
nums = []
values = input("Enter numbers separated by a space: ")
for value in values.split():
number = int(value)
nums.append(number)
return nums
def main():
nums = getNumbers()
if isAscend(nums):
print("The numbers are in ascending order.")
elif isDescend(nums):
print("The numbers are in descending order.")
else:
print("The numbers are not sorted.")
if __name__ == '__main__':
main()
| true
|
8ef5202999de5858fe7a60427571f2d6eb1e8038
|
Python
|
kamojiro/atcoderall
|
/grand/025/A.py
|
UTF-8
| 199
| 2.921875
| 3
|
[] |
no_license
|
def kakuwa(S):
A = list(str(S))
B = [int(x) for x in A]
return sum(B)
N = int(input())
ans = 50
for i in range(1,N):
ans = min(ans, kakuwa(i) + kakuwa(N-i))
print(ans)
| true
|
8b80875e5f19bf27de6058bae5a936d35935918d
|
Python
|
Joselyn19/GUI-Software-Development
|
/main.py
|
UTF-8
| 18,885
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tkinter.messagebox import *
import tkinter.filedialog
from tkinter import*
import os
import socket
import webbrowser
from ftplib import FTP
import XPS_Q8_drivers
import sc10
import sys
import time
class IPSetupPage(object):# IPๅฐๅไฟฎๆน้กต้ข
def __init__(self, master=None):# ้กต้ขๅๅงๅ
self.window = master # ๅฎไนๅ
้จๅ้window
self.window.geometry('%dx%d' % (500, 300)) # ่ฎพ็ฝฎ็ชๅฃๅคงๅฐ
self.ip = StringVar()
self.createPage()
def createPage(self): # ่งๅ้กต้ข
self.page = Frame(self.window) # ๅๅปบFrame
self.page.pack()
Label(self.page, text='ไฟฎๆนIPๅฐๅ', font=12, fg='red').grid(row=0,columnspan=3)
Label(self.page, text='ไฟฎๆนIPๅฐๅ:').grid(row=1, stick=E, pady=5)
Button(self.page, text='ๆนไธบ้ๆ', command=self.IP_change_static).grid(row=1, column=1, padx=20)
Button(self.page, text='ๆนไธบๅจๆ', command=self.IP_change_dynamic).grid(row=1, column=2)
Label(self.page, text='ๅฝๅIPๅฐๅ:').grid(row=2, stick=E, pady=0)
self.currentip = StringVar() # ๅฐlabelๆ ็ญพ็ๅ
ๅฎน่ฎพ็ฝฎไธบๅญ็ฌฆ็ฑปๅ๏ผ็จvarๆฅๆฅๆถ
#hit_meๅฝๆฐ็ไผ ๅบๅ
ๅฎน็จไปฅๆพ็คบๅจๆ ็ญพไธ
Label(self.page, textvariable=self.currentip).grid(row=2, column=1, stick=W)
Button(self.page, text='ๅทๆฐIPๅฐๅ', command=self.IP_get).grid(row=2, column=2, stick=E, pady=10)
Button(self.page, text='ไธไธๆญฅ', command=self.IP_check).grid(row=4, column=2, stick=E)
Label(self.page, text='่ฏดๆ๏ผ้ๆIPๅฐๅ๏ผ192.168.0.100๏ผๅญ็ฝๆฉ็ ๏ผ255.255.255.0').grid(row=5, columnspan=3, pady=20)
def IP_change_static(self): # ๆนๅIPๅฐๅไธบ้ๆ
os.system("C:\\Users\95184\Desktop\IPstatic.bat") # batๆไปถไฝ็ฝฎ
def IP_change_dynamic(self): # ๆนๅIPๅฐๅไธบๅจๆ
os.system("C:\\Users\95184\Desktop\IPdynamic.bat ") # batๆไปถไฝ็ฝฎ
def IP_get(self):# ่ทๅพๅฝๅIPๅฐๅ
# ่ทๅ่ฎก็ฎๆบๅ็งฐ
hostname = socket.gethostname()
# ่ทๅๆฌๆบIP
self.ip = socket.gethostbyname(hostname)
self.currentip.set(self.ip)
def IP_check(self): #่ฟๅ
ฅไธไธ้กต
#if socket.gethostbyname(socket.gethostname()) == '192.168.0.100':
self.page.pack_forget()
SetControllerPage(self.window)
#else:
#showinfo(title='้่ฏฏ', message='ๅฝๅIPๅฐๅไธไธบ192.168.0.100')
class SetControllerPage(object):# ่ฟๅจๆงๅถๅจ้
็ฝฎ้กต้ข
def __init__(self, master=None):# ้กต้ขๅๅงๅ
self.window = master # ๅฎไนๅ
้จๅ้window
self.window.geometry('%dx%d' % (500, 300)) # ่ฎพ็ฝฎ็ชๅฃๅคงๅฐ
self.createPage()
def createPage(self):# ่งๅ้กต้ข
self.page = Frame(self.window) # ๅๅปบFrame
self.page.pack()
Label(self.page, text='้
็ฝฎๆงๅถๅจ', font=12, fg='red').grid(row=0,columnspan=2)
Label(self.page, text='่ฏท็ปๅฝhttp://192.198.0.254้
็ฝฎๆงๅถๅจ').grid(row=2,columnspan=2, pady=5)
Button(self.page, text='ๆๅผๆต่งๅจ', command=self.open_website).grid(row=3,columnspan=2, pady=5)
Button(self.page, text='ไธไธๆญฅ', command=self.last_page).grid(row=4, stick=W, pady=5)
Button(self.page, text='ไธไธๆญฅ', command=self.next_page).grid(row=4, column=1, stick=E)
def open_website(self):# ๆๅผ็ฝ้กตhttp://192.198.0.254
webbrowser.open("http://192.198.0.254")
def last_page(self):# ่ฟๅไธไธ้กต
self.page.destroy()
IPSetupPage(self.window)
def next_page(self):# ่ฟๅ
ฅไธไธ้กต
self.page.destroy()
FileLoadPage(self.window)
class FileLoadPage(object):# ่ฝจ่ฟนๆไปถไธไผ ้กต้ข
def __init__(self, master=None):# ้กต้ขๅๅงๅ
self.window = master # ๅฎไนๅ
้จๅ้window
self.window.geometry('%dx%d' % (500, 300)) # ่ฎพ็ฝฎ็ชๅฃๅคงๅฐ
self.Flag = False
self.filelocalpath = ''
self.createPage()
self.ftp=None
def createPage(self):# ่งๅ้กต้ข
self.page = Frame(self.window) # ๅๅปบFrame
self.page.pack()
Label(self.page, text='ไธไผ ่ฝจ่ฟนๆไปถ่ณFTPๆๅกๅจ', font=12, fg='red').grid(row=0,columnspan=2)
Label(self.page, text='็จๆทๅ๏ผ').grid(row=1, stick=E, pady=5)
self.username = Entry(self.page)
self.username.grid(row=1, column=1)
Label(self.page, text='ๅฏ็ ๏ผ').grid(row=2, stick=E, pady=5)
self.password = Entry(self.page,show='*')
self.password.grid(row=2, column=1)
Button(self.page, text='่ฟๆฅFTPๆๅกๅจ', command=self.ftp_connect).grid(row=3, pady=5)
Button(self.page, text='ๆญๅผไธFTPๆๅกๅจ่ฟๆฅ', command=self.ftp_disconnect).grid(row=3, column=1)
Label(self.page, text='FTPๆๅกๅจ่ฟๆฅ็ถๆ:').grid(row=4, stick=E, pady=5)
self.LinkeState = StringVar()
self.LinkeState.set('ๆญๅผ')
Label(self.page, textvariable= self.LinkeState).grid(row=4, column=1, stick=W)
Button(self.page, text='้ๆฉ้่ฆไธไผ ็ๆไปถ', command=self.file_choose).grid(row=5, stick=W, pady=5)
self.var1= StringVar()
self.var1.set('ๆจๆฒกๆ้ๆฉไปปไฝๆไปถ')
Label(self.page, textvariable=self.var1).grid(row=5, column=1, stick=E)
Button(self.page, text='ไธไผ ๆไปถ่ณFTPๆๅก็ซฏ', command=self.file_upload).grid(row=6, stick=W, pady=5)
Button(self.page, text='ไธไธๆญฅ', command=self.last_page).grid(row=7, stick=W,pady=5)
Button(self.page, text='ไธไธๆญฅ', command=self.next_page).grid(row=7, column=1, stick=E)
def file_choose(self):# ้ๆฉๆไปถ
self.filelocalpath = tkinter.filedialog.askopenfilename()
if self.filelocalpath != '':
self.var1.set("ๆจ้ๆฉ็ๆไปถๆฏ๏ผ" + self.filelocalpath)
else:
self.var1.set("ๆจๆฒกๆ้ๆฉไปปไฝๆไปถ")
def ftp_connect(self):# ่ฟๆฅFTPๆๅกๅจ
if self.Flag == False:
self.ftp = FTP()
try: self.ftp.connect('192.168.0.254') # ่ฟๆฅ
except: showinfo(title='้่ฏฏ', message='FTPๆๅกๅจ่ฟๆฅๅคฑ่ดฅ๏ผ่ฏทๆฃๆฅIP่ฎพ็ฝฎ')
else:
try: self.ftp.login(self.username.get(), self.password.get())
except: showinfo(title='้่ฏฏ', message='็จๆทๅๆๅฏ็ ้่ฏฏ')
else:
self.LinkeState.set('่ฟๆฅ')
self.Flag = True
else: showinfo(title='ๆ็คบ', message='FTPๆๅกๅจๅทฒ่ฟๆฅ')
def ftp_disconnect(self):# ๆญๅผFTPๆๅกๅจ
if self.Flag == True:
self.ftp.quit()
self.LinkeState.set('ๆญๅผ')
self.Flag = False
else:
showinfo(title='ๆ็คบ', message='FTPๆๅกๅจ่ฟๆฅๅทฒๆญๅผ')
def file_upload(self):# ่ฝจ่ฟนๆไปถไธไผ
if self.Flag == False:
showinfo(title='ๆ็คบ', message='่ฏท่ฟๆฅFTPๆๅกๅจ')
elif self.filelocalpath != '':
self.bufsize = 1024 #่ฎพ็ฝฎ็ผๅฒๅๅคงๅฐ
self.fp = open(self.filelocalpath, 'rb') #ไปฅ่ฏปๆจกๅผๅจๆฌๅฐๆๅผๆไปถ
self.ftp.storbinary('STOR ' + '/Public/Trajactories', self.fp, self.bufsize)# ไธไผ ๆไปถ
self.ftp.set_debuglevel(0) #ๅ
ณ้ญ่ฐ่ฏ
self.fp.close() #ๅ
ณ้ญๆไปถ
else:
showinfo(title='ๆ็คบ',message = 'ๆจๆฒกๆ้ๆฉไปปไฝๆไปถ')
def last_page(self):# ่ฟๅไธไธ้กต
if self.Flag == False :
self.page.destroy()
SetControllerPage(self.window)
else:
showinfo(title='้่ฏฏ', message='่ฏทๆญๅผไธFTPๆๅกๅจ่ฟๆฅ')
def next_page(self):# ่ฟๅ
ฅไธไธ้กต
if self.Flag == False:
self.page.destroy()
LinkPage(self.window)
else:
showinfo(title='้่ฏฏ', message='่ฏทๆญๅผไธFTPๆๅกๅจ่ฟๆฅ')
class LinkPage(object):# ๆงๅถๅจ่ฟๆฅ้กต้ข
def __init__(self, master=None, socketId=-1, myxps=None, sc=None):# ้กต้ขๅๅงๅ
self.window = master # ๅฎไนๅ
้จๅ้root
self.window.geometry('%dx%d' % (500, 300)) # ่ฎพ็ฝฎ็ชๅฃๅคงๅฐ
self.socketId = socketId
self.myxps = myxps
self.sc = sc
self.SC10Flag = False
self.createPage()
def createPage(self):# ่งๅ้กต้ข
self.page = Frame(self.window) # ๅๅปบFrame
self.page.pack()
Label(self.page, text='ๅปบ็ซๆงๅถ่ฟๆฅ', font=12, fg='red').grid(row=0,columnspan=3)
Label(self.page, text='SC10ๅ
ๅฟซ้จๆงๅถๅจ๏ผ').grid(row=1, stick=E, pady=5)
Button(self.page, text='ๆต่ฏ', command=self.SC10_link).grid(row=1, column=1, padx=20)
self.LinkStateSC10 = StringVar()
self.LinkStateSC10.set('SC10่ฟๆฅๆชๆต่ฏ')
Label(self.page, textvariable=self.LinkStateSC10).grid(row=2, columnspan=3)
Label(self.page, text='XPS-Q8่ฟๅจๆงๅถๅจ๏ผ').grid(row=3, stick=E, pady=5)
Button(self.page, text='่ฟๆฅ', command=self.XPS_link).grid(row=3, column=1, padx=20)
Button(self.page, text='ๆญๅผ', command=self.XPS_link_break).grid(row=3, column=2)
self.LinkStateXPS = StringVar()
self.LinkStateXPS.set('XPS-Q8่ฟๆฅ็ถๆ๏ผๆช่ฟๆฅ')
Label(self.page, textvariable=self.LinkStateXPS).grid(row=4, columnspan=3)
Button(self.page, text='ไธไธๆญฅ', command=self.last_page).grid(row=5, stick=W, pady=10)
Button(self.page, text='ไธไธๆญฅ', command=self.next_page).grid(row=5, column=2, stick=E)
def SC10_link(self):# ๆต่ฏSC10ๅ
ๅฟซ้จๆงๅถๅจ่ฟๆฅ
try:
self.sc = sc10.SC10()
except:
showinfo(title='้่ฏฏ', message='่ฟๆฅSC10ๅ
ๅฟซ้จๆงๅถๅจๅคฑ่ดฅ๏ผ่ฏทๆฃๆฅ่ฎพๅคๆฏๅฆๅผๅฏ')
self.LinkStateSC10.set('SC10่ฟๆฅๆต่ฏๆชๆๅ')
else:
self.SC10Flag = True
self.LinkStateSC10.set('SC10่ฟๆฅๆต่ฏๆๅ')
def XPS_link(self):# ่ฟๆฅXPS่ฟๅจๆงๅถๅจ
if self.socketId != -1:
showinfo(title='ๆ็คบ',message='XPS-Q8่ฟๅจๆงๅถๅจๅทฒ่ฟๆฅ')
else:
self.myxps = XPS_Q8_drivers.XPS() # ่ฟๆฅXPS๏ผๅๅปบๅฎไพ
self.socketId = self.myxps.TCP_ConnectToServer('192.168.0.254', 5001, 30)
if self.socketId == -1 :
showinfo(title='้่ฏฏ',
message='่ฟๆฅXPS-Q8่ฟๅจๆงๅถๅจๅคฑ่ดฅ๏ผ่ฏทๆฃๆฅIPๅฐๅๅ็ซฏๅฃ')
else:
showinfo(title='ๆๅ', message='่ฟๆฅXPS-Q8่ฟๅจๆงๅถๅจๆๅ')
self.LinkStateXPS.set('XPS่ฟๆฅ็ถๆ๏ผๅทฒ่ฟๆฅ')
def XPS_link_break(self):# ๆญๅผXPS่ฟๅจๆงๅถๅจ
if self.socketId == -1 :
showinfo(title='้่ฏฏ', message='XPS-Q8่ฟๅจๆงๅถๅจๆช่ฟๆฅ')
else:
self.myxps.TCP_CloseSocket(self.socketId)
self.socketId = -1
self.LinkStateXPS.set('XPS่ฟๆฅ็ถๆ๏ผๆช่ฟๆฅ')
def last_page(self):# ่ฟๅไธไธ้กต
if self.socketId != -1:
showinfo(title='ๆ็คบ', message='่ฏทๆญๅผXPS-Q8่ฟๆฅ')
else:
self.page.destroy()
FileLoadPage(self.window)
def next_page(self):# ่ฟๅ
ฅไธไธ้กต
#if self.socketId != -1 and self.SC10Flag == True๏ผ
self.page.destroy()
ControlPage(self.window, self.socketId, self.myxps, self.sc)
#else๏ผ
#showinfo(title='้่ฏฏ', message='SC10ๆชๆต่ฏๆXPS-Q8ๆช่ฟๆฅ')
class ControlPage(object):# ่ฟๅจๆงๅถ้กต้ข
# ้กต้ขๅๅงๅ
def __init__(self, master=None, socketId=-1, myxps=None, sc=None, group='XY'):
self.window = master # ๅฎไนๅ
้จๅ้root
self.window.geometry('%dx%d' % (500, 300)) # ่ฎพ็ฝฎ็ชๅฃๅคงๅฐ
self.socketId = socketId
self.myxps = myxps
self.sc =sc
self.group = group
self.shutterstate = False
self.createPage()
self.errorCode=None
def createPage(self):# ่งๅ้กต้ข
self.page = Frame(self.window) # ๅๅปบFrame
self.page.pack()
Label(self.page, text='่ฟๅจๆงๅถ', font=12, fg='red').grid(row=0, columnspan=3)
Label(self.page, text='ๅฎไน่ฟๅจ็ฑปๅ:').grid(row=1, stick=E, pady=5)
self.g = StringVar()
self.g.set('XY')
self.group='XY'
Radiobutton(self.page, text='XYๅ็ป', variable=self.g, value='XY',command=self.group_XY).grid(row=1,column=1)
Radiobutton(self.page, text='XYZๅ็ป', variable=self.g, value='XYZ', command=self.group_XYZ).grid(row=1, column=2)
#Button(self.page, text='ces', command=self.test).grid(row=2, stick=W)
Label(self.page, text='ๅ
ๅฟซ้จ็ถๆ:').grid(row=3, stick=E, pady=5)
self.shutter = StringVar()
self.shutter.set('ๅ
ณ')
Radiobutton(self.page, text='ๅ
ณ', variable=self.shutter, value='ๅ
ณ',command=self.shutter_close).grid(row=3,column=1)
Radiobutton(self.page, text='ๅผ', variable=self.shutter, value='ๅผ',command=self.shutter_open).grid(row=3, column=2)
Label(self.page, text='่ฝจ่ฟนๆไปถ้ๆฉ:').grid(row=4, stick=E, pady=5)
self.file = Entry(self.page)
self.file.grid(row=4, column=1, stick=E)
Label(self.page, text='็งปๅจ้ๅบฆ(mm/s):').grid(row=5, stick=E, pady=5)
self.velocity = Entry(self.page)
self.velocity.grid(row=5, column=1, stick=W)
Label(self.page, text='็งปๅจๅ ้ๅบฆ(mm/s^2):').grid(row=6, stick=E, pady=5)
self.accelerate = Entry(self.page)
self.accelerate.grid(row=6, column=1, stick=W)
Label(self.page, text='ๅฝๅ่ฟๅจไฝ็ฝฎ๏ผ').grid(row=7, stick=E, pady=5)
self.position = StringVar()
self.position.set('')
Label(self.page, textvariable=self.position).grid(row=7, stick=W)
Button(self.page, text='ๅทๆฐ', command=self.position_find).grid(row=7, column=2, stick=W)
Button(self.page, text='ไธไธๆญฅ', command=self.last_page).grid(row=8, stick=W)
Button(self.page, text='่ฟ่ก', command=self.move).grid(row=8, column=2, stick=E, pady=5)
def group_XY(self):# ้ๆฉXYๅ็ป
self.group='XY'
self.positioner1 = self.group + '.X'
self.positioner2 = self.group + '.Y'
def group_XYZ(self):# ้ๆฉXYZๅ็ป
self.group='XYZ'
self.positioner1 = self.group + '.X'
self.positioner2 = self.group + '.Y'
self.positioner3 = 'S' + '.Pos'
def shutter_open(self):# ๅ
ๅฟซ้จๆๅผ
self.shutterstate = True
def shutter_close(self):# ๅ
ๅฟซ้จๅ
ณ้ญ
self.shutterstate = False
def move(self):# ่ฟๅจๆงๅถๅจ็งปๅจ
if self.sc==None:
showinfo(title='้่ฏฏ',message='ๅ
ๅฟซ้จๆช่ฟๆฅ๏ผ่ฏท่ฟๅไธไธ้กต่ฟๆฅๅ
ๅฟซ้จ')
return
else:
time.sleep(3) # ๅปถ่ฟ3s
if self.shutterstate :
self.sc.openShutter() # ๆๅผๅ
ๅผๅ
ณ
else:
self.sc.closeShutter()
time.sleep(1) # ๅปถ่ฟ1s
if self.myxps==None:
showinfo(title='้่ฏฏ',message='XPS-Q8ๆช่ฟๆฅ๏ผ่ฏท่ฟๅไธไธ้กต่ฟๆฅXPS-Q8')
return
else:
# ่ฟ่ก่ฝจ่ฟนๆไปถArc1.trj.txt ่ฟๅจ้ๅบฆ0.1ๅไฝ/s ๅผๅงๅ ้ๅบฆ1ๅไฝ/s ่ฟๅจๆฌกๆฐ5ๆฌก
[self.errorCode, returnString] = self.myxps.XYLineArcExecution(self.socketId, self.group, self.file.get(), self.velocity.get(), self.accelerate.get(), 5) # ่ฟ่ก่ฝจ่ฟนๆไปถ
if self.errorCode != 0:
self.ErrorAndClose(self.socketId, self.errorCode, 'XYLineArcExecution')
# ่ฎพๅฎXPS่ฟๅจๆงๅถๅจ็้ไฟก้่ฏฏ็ฑปๅ่ฟๅๅฝๆฐ
def ErrorAndClose(self, socketId, errorCode, APIName):
if (errorCode != -2) and (errorCode != -108):
[errorCode2, errorString] = self.myxps.ErrorStringGet(socketId, errorCode)
if errorCode2 != 0:
showinfo(title='้่ฏฏ', message=APIName + ': ERROR ' + str(errorCode))
else:
showinfo(title='้่ฏฏ',message=APIName + ': ' + errorString)
else:
if errorCode == -2: # ่ฟๅ2ๆถ้่ฏฏ
showinfo(title='้่ฏฏ', message=APIName + ': TCP่ถ
ๆถ')
if errorCode == -108: # ่ฟๅ108ๆถ้่ฏฏ
showinfo(title='้่ฏฏ', message=APIName + ': TCP/IP่ฟๆฅ่ขซ็ฎก็ๅๅ
ณ้ญ')
self.myxps.TCP_CloseSocket(socketId)
#ๅบ็ฐ้่ฏฏๆถๆญๅผไธXPS่ฟๆฅ๏ผ้ๆฐ่ฟๆฅ้่ฆ่ฟๅไธไธๆญฅ
return
def position_find(self):# ๆพ็คบๅฝๅๅๆ
if self.myxps==None:
showinfo(title='้่ฏฏ',message='XPS-Q8ๆช่ฟๆฅ๏ผ่ฏท่ฟๅไธไธ้กต่ฟๆฅXPS-Q8')
return
else:
[errorCode1, currentPosition1] = self.myxps.GroupPositionCurrentGet(self.socketId, self.positioner1, 1)
if errorCode1 != 0:
self.ErrorAndClose(self.socketId, errorCode1, 'GroupPositionCurrentGet')
showinfo(title='้่ฏฏ',message='X่ฝดๅๆ ่ฏปๅๅคฑ่ดฅ')
return
else:
[errorCode2, currentPosition2] = self.myxps.GroupPositionCurrentGet(self.socketId, self.positioner2, 1)
if errorCode2 != 0:
self.ErrorAndClose(self.socketId, errorCode2, 'GroupPositionCurrentGet')
showinfo(title='้่ฏฏ', message='Y่ฝดๅๆ ่ฏปๅๅคฑ่ดฅ')
return
if self.group == 'XY':
self.position.set('X:'+str(currentPosition1)+'Y:'+str(currentPosition2))
return
else:
[errorCode3, currentPosition3] = self.myxps.GroupPositionCurrentGet(self.socketId, self.positioner3, 1)
if errorCode3 != 0:
self.ErrorAndClose(self.socketId,errorCode3, 'GroupPositionCurrentGet')
showinfo(title='้่ฏฏ', message='Z่ฝดๅๆ ่ฏปๅๅคฑ่ดฅ')
return
else:
self.position.set('X:' + str(currentPosition1) + 'Y:' + str(currentPosition2)+'Z:' + str(currentPosition3))
return
def last_page(self):# ่ฟๅไธไธ้กต
self.page.destroy()
LinkPage(self.window,self.socketId, self.myxps, self.sc)
window = tkinter.Tk()
window.title('้ฃ็งๆฟๅ
ๅ ๅทฅๅนณๅฐๆงๅถ่ฝฏไปถ')
IPSetupPage(window)
window.mainloop()
| true
|
bc09d897c4a3111e76d9f69ff5a88cfea126b670
|
Python
|
sidazhong/leetcode
|
/leetcode/easy/155_minStack.py
|
UTF-8
| 2,070
| 4.09375
| 4
|
[] |
no_license
|
'''
# tuple pushๆฏ่พไธไธไธชไธๅฝๅ๏ผๅญๆๅฐ
class MinStack(object):
def __init__(self):
self.stack=[]
def push(self, val):
if(not self.stack):
item=(val,val)
else:
item=(val,min(val,self.stack[-1][1]))
self.stack.append(item)
def pop(self):
return self.stack.pop()
def top(self):
return self.stack[-1][0]
def getMin(self):
return self.stack[-1][1]
'''
'''
# ๅstack๏ผๅฆไธไธชๅญๆพๆๅฐๅผ๏ผpopไน่ฆไธ่ตทๅบ
class MinStack(object):
def __init__(self):
self.stack=[]
self.minStack=[]
def push(self, val):
if(not self.stack):
self.minStack.append(val)
if(val<=self.minStack[-1]):
self.minStack.append(val)
self.stack.append(val)
def pop(self):
pop=self.stack.pop()
if(pop==self.minStack[-1]):
self.minStack.pop()
def top(self):
return self.stack[-1]
def getMin(self):
return self.minStack[-1]
'''
# 3stack๏ผๅฆไธไธชstackๅจ็จstack่ฎกๆฐๆๅฐๅผ
class MinStack(object):
def __init__(self):
self.stack=[]
self.minStack=[]
def push(self, val):
if(not self.stack):
self.minStack.append([val,1])
if(val==self.minStack[-1][0]):
self.minStack[-1][1]+=1
if(val<self.minStack[-1][0]):
self.minStack.append([val,1])
self.stack.append(val)
def pop(self):
pop=self.stack.pop()
if(pop==self.minStack[-1][0]):
if(self.minStack[-1][1]==1):
self.minStack.pop()
else:
self.minStack[-1][1]-=1
def top(self):
return self.stack[-1]
def getMin(self):
return self.minStack[-1][0]
# Your MinStack object will be instantiated and called as such:
obj = MinStack()
obj.push(-2)
obj.push(0)
obj.push(-3)
param_4 = obj.getMin()
obj.pop()
param_3 = obj.top()
param_4 = obj.getMin()
| true
|
25ed58147387a6215a5fbf22085ecf41888a36c3
|
Python
|
rv-kesnyder/ita-challenges-2020
|
/python-week-1/07-for-loops.py
|
UTF-8
| 168
| 2.71875
| 3
|
[] |
no_license
|
def even_nums(numbers, even_numbers):
# numbers is the list of all numbers
# even_numbers is the empty list, that holds only the even numbers.
# Your code in here.
| true
|
f24e496bf3636dfa141543035c2963402d87b0f0
|
Python
|
Cjkendel/Lower-Bound-Estimation
|
/main.py
|
UTF-8
| 601
| 2.59375
| 3
|
[] |
no_license
|
from LocalVariationalBoundsVectorized import EstimateLowerBound
from LogisticRegression import LogisticReg
if __name__ == "__main__":
#JJ Bound Estimation
lower = EstimateLowerBound(batch_size=1, full_batch=False, n_batch=False)
# lower.call_and_write_results(10000)
# Torch Logistic Regression, get point estimates
# model = LogisticReg(batch_size=16, full_batch=False, n_batch=False)
# model.call_and_write_results(5000)
# Graph means over time
for i in range(1, 5):
lower.generate_plots([1, 2, 4, 8, 16, 32, lower.trainloader.total_data_len], epochs=i)
| true
|
4acf52e6238fc0108481b32f6ac9b8b6d9c84206
|
Python
|
zgmartin/minset-cover
|
/min_inventory_checks.py
|
UTF-8
| 627
| 3.234375
| 3
|
[] |
no_license
|
from objects import Data
from algorithms import greedy
import sys
def min_inventory_check(file_name):
"""Returns a list of the minimum number of inventory checks for an input file."""
#generates usable data from JSON file
input_data = Data()
input_data.extract_data(file_name)
#runs greedy algorithm on data
min_num = greedy(input_data.all_stores, input_data.zips)
return min_num
if __name__ == '__main__':
args = sys.argv
print 'min inventory checks: \n'
for x in range(1 , len(args)):
file_name = args[x]
print 'store', x, ':', min_inventory_check(file_name)
| true
|
82601c806636d0bce3741ddffcc1094b0579dfe2
|
Python
|
kalloc/insanities-testing
|
/tests/utils/odict.py
|
UTF-8
| 616
| 3.109375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import unittest
from insanities.utils.odict import OrderedDict
class OrderedDictTests(unittest.TestCase):
def test_pop_with_default(self):
d = OrderedDict([('a', 'a'), ('b', 'b')])
self.assertEqual(d.pop('a', ('c', 'c')), ('a', 'a'))
self.assertEqual(len(d.items()), 1)
self.assertEqual(d.pop('c', ('c', 'c')), ('c', 'c'))
self.assertEqual(len(d.items()), 1)
def test_pop_without_default(self):
d = OrderedDict([('a', 'a'), ('b', 'b')])
self.assertEqual(d.pop('a'), ('a', 'a'))
self.assertEqual(len(d.items()), 1)
| true
|
3a9d17d0d764972501c6303a86f648ddae245496
|
Python
|
littlemesie/recommend-learning
|
/src/dssm/data_process.py
|
UTF-8
| 2,819
| 2.859375
| 3
|
[] |
no_license
|
import json
UNK = '[UNK]'
PAD = '[PAD]'
MAX_SEQ_LEN = 10
class Processor:
"""ๆฐๆฎๅค็"""
def __init__(self, vocab_path):
self.vocab_path = vocab_path
self.vocab_map = self.load_vocab()
self.nwords = len(self.vocab_map)
def load_vocab(self):
"""ๅ ่ฝฝvocabๆฐๆฎ"""
word_dict = {}
with open(self.vocab_path, encoding='utf8') as f:
for idx, word in enumerate(f.readlines()):
word = word.strip()
word_dict[word] = idx
return word_dict
def convert_word2id(self, query):
""""""
ids = []
for w in query:
if w in self.vocab_map:
ids.append(self.vocab_map[w])
else:
ids.append(self.vocab_map[UNK])
# ่กฅ้ฝ
while len(ids) < MAX_SEQ_LEN:
ids.append(self.vocab_map[PAD])
return ids[:MAX_SEQ_LEN]
def get_data(self, file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 7]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': [], 'label': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
label = int(label)
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(self.convert_word2id(each))
each_len = len(each) if len(each) < MAX_SEQ_LEN else MAX_SEQ_LEN
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(self.convert_word2id(prefix))
data_map['query_len'].append(len(prefix) if len(prefix) < MAX_SEQ_LEN else MAX_SEQ_LEN)
data_map['doc_pos'].append(self.convert_word2id(title))
data_map['doc_pos_len'].append(len(title) if len(title) < MAX_SEQ_LEN else MAX_SEQ_LEN)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
data_map['label'].append(label)
# data_map['doc_neg'].extend(cur_arr)
# data_map['doc_neg_len'].extend(cur_len)
pass
return data_map
| true
|
bc48b3c5035e9b2273aefbbf5457139e43b70128
|
Python
|
chrisjluc/algorithms
|
/graphs/tests/test_topsort.py
|
UTF-8
| 867
| 3.203125
| 3
|
[] |
no_license
|
from graphs.topological_sort import *
from graphs import util
from graphs.graph import Graph
import unittest
graph1 = Graph()
for v in [0, 1, 2, 3, 4, 5]:
graph1.add_node(v)
graph1.add_edge(5, 2)
graph1.add_edge(5, 0)
graph1.add_edge(4, 0)
graph1.add_edge(4, 1)
graph1.add_edge(2, 3)
graph1.add_edge(3, 1)
graph2 = Graph()
for v in [5, 7, 3, 11, 8, 2, 9, 10]:
graph2.add_node(v)
graph2.add_edge(3, 8)
graph2.add_edge(3, 10)
graph2.add_edge(5, 11)
graph2.add_edge(7, 8)
graph2.add_edge(7, 11)
graph2.add_edge(8, 9)
graph2.add_edge(11, 2)
graph2.add_edge(11, 9)
graph2.add_edge(11, 10)
class TestTopsort(unittest.TestCase):
def test_dfs_topsort(self):
expected1 = [5, 4, 2, 3, 1, 0]
expected2 = [7, 5, 11, 3, 10, 8, 9, 2]
self.assertEqual(expected1, topsort_dfs(graph1))
self.assertEqual(expected2, topsort_dfs(graph2))
| true
|
881bbf4985be61764786f67b1cd7490d724e8305
|
Python
|
ananxuan/web_crawler
|
/็พๅบฆ่ดดๅง_spider.py
|
UTF-8
| 2,562
| 2.6875
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
#็พๅบฆ่ดดๅง็ฌ่ซ็ฑป
class BDTB:
#ๅๅงๅ๏ผไผ ๅ
ฅๅบๅฐๅ๏ผๆฏๅฆๅช็ๆฅผไธปๅๆฐ
#URLADDR:http://tieba.baidu.com/p/3138733512?see_lz=1&pn=1
def __init__(self,baseURL, seeLZ):
self.baseURL=baseURL
self.seeLZ= '?see_lz=' +str(seeLZ)
#ไผ ๅ
ฅ้กต็ ๏ผ่ทๅ่ฏฅ้กตๅธๅญ็ไปฃ็
def getpage(self,pageNum):
try:
url = self.baseURL +self.seeLZ + '&pn=' + str(pageNum)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
print response.read()
return response
except urllib2.URLError, e:
if hasattr(e, "reason"):
print u"failed,reson",e.reason
return None
baseURL = 'http://tieba.baidu.com/p/3138733512'
bdtb = BDTB(baseURL,1)
bdtb.getpage(1)
#ๆๅ็ธๅ
ณไฟกๆฏ
#ๅธๅญๆ ้ข
#htmldata:<h1 class="core_title_txt ย " title="็บฏๅๅๆๅฟไธญ็NBA2014-2015่ตๅญฃ็ฐๅฝน50ๅคง" style="width: 396px">
#็บฏๅๅๆๅฟไธญ็NBA2014-2015่ตๅญฃ็ฐๅฝน50ๅคง</h1>
#get title
def getTitle(self):
page= self.getPage(1)
pattern = re.compile('<h1 class="core_title_text.*?>(.*?)</h1>',re.S)
result = re.search(pattern, page)
if result:
return result.group(1).strip()
else:
return None
#ๅธๅญ้กตๆฐ
def getPageNum(self):
page = self.getPage(1)
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
result=re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
#่ทๅๆญฃๆdata
def getContent(self,page):
pattern = re.compile('<div id = "post_content_.*?>(.*?)</div>',re.S)
items = re.findall(pattern, page)
for item in items:
print item
#ๅค็้กต้ข
class Tool:
removeImg = re.compile('<img.*?>| {7}|')#ๅป้คimgๆ ็ญพ๏ผ7ไฝ้ฟ็ฉบๆ ผ
removeAddr = re.compile('<a.*?>|</a>')#ๅป้ค่ถ
้พๆฅๆ ็ญพ
removeLine = re.compile('tr>|<div>|</div>|</p>')#ๅป้ค่ถ
้พๆฅๆ ็ญพ
repalceTD = re.compile('<td>')#ๅฐ่กจๆ ผๅถ่กจtdๆฟๆขไธบ\t
replacePara = re.compile('<p.*?>')#ๆฎต่ฝๅผๅคดๆขไฝ\nๅฐ็ฉบไธคๆ ผ
repalceBR = re.compile('<br><br>|<br>')#ๅฐๆข่ก็ฌฆๆฟๆขไธบ\n
removeExtraTag = re.compile('<.*?>')#ๅ ้คๅ
ถไฝๆ ็ญพ
def replace(self,x):
x = re.sub(self.removeImg, "",x)
x = re.sub(self.removeAddr, "",x)
x = re.sub(self.removeLine, "",x)
x = re.sub(self.removeTD, "",x)
x = re.sub(self.removePara, "",x)
x = re.sub(self.removeBR, "",x)
x = re.sub(self.removeExtraTag, "",x)
return x.strip() #ๅป้คๅคไฝๅ
ๅฎน
| true
|
33a695384b9ae8f9db030ba9f993681856d81d0b
|
Python
|
bruzecruise/Intro_Biocom_ND_319_Tutorial10
|
/exercise_10_B.py
|
UTF-8
| 4,031
| 2.703125
| 3
|
[] |
no_license
|
#Load packages
import pandas
import scipy
import scipy.integrate as si
from plotnine import *
# function
def SIR (y,t0,beta,gamma):
S = y[0]
I = y[1]
R = y[2]
dS = -1*(beta*I*S)
dI = (beta*I*S)-(gamma*I)
dR = (gamma*I)
return dS, dI, dR
# initial conditions
times = range(0,500)
NO = [999, 1, 0]
# dataframe of gamma and beta values
data = [{'beta' : .0005, 'gamma' : .05},
{'beta': .005, 'gamma': .5},
{'beta': .0001, 'gamma': .1},
{'beta': .00005, 'gamma': .1},
{'beta': .0001, 'gamma': .05},
{'beta': .0002, 'gamma': .05},
{'beta': .0001, 'gamma': .06},
{'beta': .0001, 'gamma': .9},
{'beta': .0001, 'gamma': .5},
{'beta': .0001, 'gamma': .25},
{'beta': .0001, 'gamma': .1},
{'beta': .0001, 'gamma': .05},
{'beta': .0001, 'gamma': .01},
{'beta': .0001, 'gamma': .001},
{'beta': .0001, 'gamma': .0001},
{'beta': .9, 'gamma': .0001},
{'beta': .5, 'gamma': .0001},
{'beta': .25, 'gamma': .0001},
{'beta': .1, 'gamma': .0001},
{'beta': .05, 'gamma': .0001},
{'beta': .01, 'gamma': .0001},
{'beta': .001, 'gamma': .0001},
{'beta': .0001, 'gamma': .0001},
{'beta': .00001, 'gamma': .0001},
]
my_data = pandas.DataFrame(data)
# make lists to hold the results
mdi = []
mdp = []
pa = []
ro = []
b = []
g = []
# start big loop here
for line in range(0,len(my_data),):
q = my_data.iloc[line]['beta']
p = my_data.iloc[line]['gamma']
params = (q, p) # make tuple
b.append(params[0]) # append list
g.append(params[1])
# make dataframe
infection = pandas.DataFrame({"time":times,"S":0,"I":0,"R":0})
# sim shite
sim = si.odeint(func=SIR, y0=NO, t=times, args=params)
# fill dataframe
infection.iloc[:,2]=sim[:,0]
infection.iloc[:,0]=sim[:,1]
infection.iloc[:,1]=sim[:,2]
# calc max daily incidence
daily_incidence = []
for i in range(0,len(infection),):
if infection.time[i]==0:
continue
else:
I = infection.iloc[i]['I']
Iold = infection.iloc[i-1]['I']
incidence = I-Iold
daily_incidence.append(incidence)
max_daily_incidence = max(daily_incidence)
mdi.append(max_daily_incidence)
# calc max daily prevalence
daily_prev = []
for i in range(0,len(infection),):
I = infection.iloc[i]['I']
R = infection.iloc[i]['R']
S = infection.iloc[i]['S']
prev = I/(S+I+R)
daily_prev.append(prev)
max_daily_prev = max(daily_prev)
mdp.append(max_daily_prev)
#calc percent affected over simulation- use last time step (499)
I= infection.iloc[499]['I']
R= infection.iloc[499]['R']
S= infection.iloc[499]['S']
percent_affected = (I+R)/(S+I+R)
pa.append(percent_affected)
# basic reproduction number initial SIR
beta = params[0]
gamma = params[1]
I= infection.iloc[0]['I']
R= infection.iloc[0]['R']
S= infection.iloc[0]['S']
repo_number = (beta*(S+I+R))/gamma
ro.append(repo_number)
# make a dataframe for results from all the lists
results = pandas.DataFrame(
{'beta' : b,
'gamma' : g,
'max_daily_incide' : mdi,
'max_daily_prev' : mdp,
'percent_affect' : pa,
'repo_num' : ro})
print results
#need these to fill into a list or a dataframe
# need to put all this intoa bigger loop
'''
* observations *
We noticed that if beta is held constant while gamma gets smaller the max incidence, daily prevalence, percent infection,
and reproductive number all rise. Thus a bigger gamma causes, things to get smaller If we hold gamma constant, as beta
gets bigger the max incidence, daily prevalence, percent affected, and reproductive number all rise. Thus a small beta,
causes things to get smaller To summarize, a high beta and a small gamma cause the disease to have a higher rate and
srpead of infection in the population.
'''
| true
|
3399ccffb3fa8f15d63fe0b50b6398472d9112e5
|
Python
|
ywzyl/algorithm013
|
/Week_01/twoSum.py
|
UTF-8
| 990
| 3.78125
| 4
|
[] |
no_license
|
class Solution:
def twoSum(self, nums, target):
# ๆ่ทฏ๏ผๅตๅฅ้ๅ๏ผๅ
ๅฑ้ๅๅช้ๅๅคๅฑindexไนๅ็ๅ่กจ๏ผๆ้ค้ๅค๏ผๆถ้ดๅคๆๅบฆไธบO(n*2)
for i in range(len(nums)):
for j in range(i+1, len(nums)):
if nums[i] + nums[j] == target:
return [i, j]
return []
def twoSumT(self, nums, target):
# ๆ่ทฏ๏ผๅ
ๆๅบ๏ผๆฅ็็จๅๆ้ไปๅ่กจไธค่พนๅพไธญ้ด้ๅ
temp = nums.copy()
temp.sort()
i = 0
j = len(temp) - 1
while i<j:
if (temp[i] + temp[j]) > target:
j = j - 1
elif (temp[i] + temp[j]) < target:
i = i + 1
else:
break
p = nums.index(temp[i])
nums.pop(p) # ๆญคๅคๅผนๅบๆฏ้ฒๆญขtemp[i]ๅtemp[j]ๆฏ็ธๅ็ๅผ๏ผๆฏๅฆ7+7=14
k = nums.index(temp[j])
if k >= p:
k = k+1
return [p, k]
| true
|
aa24e792ae5d84148feb9fba91257416d5aedd39
|
Python
|
shen-huang/selfteaching-python-camp
|
/19100402/Autumn0808/1001S01E05_array.py
|
UTF-8
| 642
| 4.125
| 4
|
[] |
no_license
|
mylist = [0,1,2,3,4,5,6,7,8,9]
print(mylist)
#ๅฐmylistๆฐ็ปๅ่ฝฌ
list1 = mylist[::-1]
print(list1)
#ๅ่ฝฌๅ็ๆฐ็ปๆผๆฅๆๅญ็ฌฆไธฒ
##็จmap()ๅฐlist1ไธญ็ๅ
็ด ไธไธๆ ๅฐไธบstrๅนถๆผๆฅๆๆฐ็ๅญ็ฌฆไธฒ
str1 = "".join(map(str,list1))
print(str1)
#็จๅญ็ฌฆไธฒๅ็็ๆนๅผๅๅบ็ฌฌไธๅฐๅ
ซไธชๅญ็ฌฆ๏ผๅ
ๅซไธๅๅ
ซ
str2 = str1[2:7]
print(str2)
#ๅฐ่ทๅพ็ๅญ็ฌฆไธฒ่ฟ่กๅ่ฝฌ
str3 = str2[::-1]
print(str3)
#ๅฐ็ปๆ่ฝฌๆขไธบint็ฑปๅ
str4 = int(str3)
print(str4)
#ๅฐ็ปๆ่ฝฌๆขๆไบ่ฟๅถใๅ
ซ่ฟๅถๅๅๅ
ญ่ฟๅถ
str5 = bin(str4)
str6 = oct(str4)
str7 = hex(str4)
#็ปๆ่พๅบ
print(str5)
print(str6)
print(str7)
| true
|
1663fb886939d93213ea426d65f00d9740476bc9
|
Python
|
sungguenja/studying
|
/sort/ํต์ ๋ ฌ.py
|
UTF-8
| 710
| 3.59375
| 4
|
[] |
no_license
|
def quick_sort(a,low,high):
if low < high:
pivot = partition(a,low,high)
quick_sort(a,low,pivot-1)
quick_sort(a,pivot+1,high)
def partition(a,pivot,high):
print("start partition",a,pivot,high)
i = pivot + 1
j = high
while True:
while i<high and a[i] < a[pivot]:
i += 1
while j > pivot and a[j] > a[pivot]:
j -= 1
if j <= i:
break
a[i],a[j] = a[j],a[i]
i += 1
j -= 1
print("i,j change",i,j,pivot,a)
a[pivot],a[j] = a[j],a[pivot]
print("pivot,j change",i,j,pivot,a)
return j
a = [53,88,77,26,93,17,49,10,17,77,11,31,22,44,17,20]
quick_sort(a,0,len(a)-1)
print(a)
| true
|
26a6060ac8f32b1277ac8bf5cf2b3ba31d6ff938
|
Python
|
Atsuhiko/Web-App
|
/facial-expression/second Iisan/app/app.py
|
UTF-8
| 4,245
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import random
from flask import Flask, make_response,render_template, request, redirect, url_for, send_from_directory, g, flash, jsonify
from keras.preprocessing.image import load_img
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
from datetime import datetime # ไบไผ่ฟฝๅ filepathใฎใฟใคใ ในใฟใณใๅๅพใฎใใ
app = Flask(__name__) # ไบไผๆธใๆใ
app.config.from_object(__name__)
jinja_options = app.jinja_options.copy()
jinja_options.update({
'block_start_string': '<%',
'block_end_string': '%>',
'variable_start_string': '<<',
'variable_end_string': '>>',
'comment_start_string': '<#',
'comment_end_string': '#>'
})
app.jinja_options = jinja_options
SAVE_DIR = "images" # ไบไผๆธใๆใ
if not os.path.isdir(SAVE_DIR):
os.mkdir(SAVE_DIR)
#@app.route('/<path:filepath>')
@app.route('/images/<path:filepath>')
def send_js(filepath):
return send_from_directory(SAVE_DIR, filepath)
@app.route('/start-camera')
def startCamera():
cap = cv2.VideoCapture(0)
if cap.isOpened() is False:
print("IO Error")
else:
ret, frame = cap.read()
# image_path = "images"
image_path = os.path.join(SAVE_DIR, "image.png") # ไบไผๆธใๆใ
if (ret):
# cv2.imwrite(image_path + "image.png", frame)
cv2.imwrite(image_path, frame)
else:
print("Read Error")
# ๆถใใ
# cap.release()
# response = {
# "message": "ok"
# }
# return jsonify(response)
return render_template("index.html") # filepathใ้ใ
@app.route("/")
def route():
return render_template("index.html")
@app.route("/pred-emotion")
def pred_emotion():
# filenames = os.listdir("../images")
filenames = os.listdir("/images") # ไบไผๆธใๆใ
IMAGE_SIZE = (48, 48)
# ็ปๅใ่ชญใฟ่พผใ
# sample = random.choice(filenames)
# img = load_img("./images/"+sample, target_size=IMAGE_SIZE)
# img = load_img("/images/image.png", target_size=IMAGE_SIZE)
save_path = os.path.join(SAVE_DIR, "image.png") # ไบไผๆธใๆใ
img = load_img(save_path, target_size=IMAGE_SIZE) # ไบไผๆธใๆใ
img_org = img
# model = load_model('../2020-09-27-epoch50.h5')
model = load_model('2020-09-29.h5')
# ็ปๅใnumpy้
ๅใซๅคๆ
img = np.asarray(img)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray = img_gray.reshape(-1, 48, 48, 1) / 255.0
# ไบๆธฌใใใ
pred = model.predict(img_gray)
# ็ตๆใ่กจ็คบ๏ผใใใใใฎ่กจๆ
ใฎไบๆธฌ๏ผ
print("Angry: ", pred[:, 0])
print("Disgust: ", pred[:, 1])
print("Fear: ", pred[:, 2])
print("Happy :", pred[:, 3])
print("Sad: ", pred[:, 4])
print("Surprise: ", pred[:, 5])
print("Neutral: ", pred[:, 6])
angry = pred[:, 0][0].astype(np.float64)
disgust = pred[:, 1][0].astype(np.float64)
fear = pred[:, 2][0].astype(np.float64)
happy = pred[:, 3][0].astype(np.float64)
sad = pred[:, 4][0].astype(np.float64)
surprise = pred[:, 5][0].astype(np.float64)
neutral = pred[:, 6][0].astype(np.float64)
print(type(angry))
# plt.imshow(img_org) # ๅ
ฅๅใใใชใชใธใใซใคใกใผใธ
"""
return render_template(
"index.html",
angry=angry,
disgust=disgust,
fear=fear,
happy=happy,
sad=sad,
surprise=surprise,
neutral=neutral
)
"""
response = {
"angry": angry,
"disgust": disgust,
"fear": fear,
"happy": happy,
"sad": sad,
"surprise": surprise,
"neutral": neutral,
}
"""
response = [angry,
disgust,
fear,
happy,
sad,
surprise,
neutral]
"""
# return response
# response = make_response(response_body)
return render_template(
"index.html",
predict = response
)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=3032) # ใใผใใฎๅคๆด
| true
|
c85796971dddb19c55f4a959bf8a4ea62d15067a
|
Python
|
yearfunla/ML5155
|
/sampling.py
|
UTF-8
| 6,091
| 2.703125
| 3
|
[] |
no_license
|
"""
Course: CSI5155
Tiffany Nien Fang Cheng
Group 33
Student ID: 300146741
"""
import json
import numpy as np
import pandas as pd
from scipy.io import arff as af
import arff
from sklearn.impute import SimpleImputer
from imblearn.over_sampling import SMOTE, ADASYN
from sklearn import svm, preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imblearn.under_sampling import ClusterCentroids, EditedNearestNeighbours
from imblearn.combine import SMOTEENN
from sklearn.feature_selection import VarianceThreshold, SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
from util import *
# grouping scenario for different cases
group_classes=[1,2] # considering only injury cases
# group_classes = [1, (2, 3)] # considering fatal injury vs the rest
# group_classes = [(1, 2), 3] # considering injury vs property damage
# group_classes=[(1,2,3),0] # considering multiclass
with open("traffic_{}_{}.csv".format(group_classes[0], group_classes[1]), encoding="utf8") as f:
cvs_data = pd.read_csv(f, sep=',')
# seperate the input and the classification result
Y = cvs_data['class']
X = cvs_data.copy(deep=True)
# X.drop(['class', "TIME", "DATE"], axis=1, inplace=True)
X.drop(['class'], axis=1, inplace=True)
import ipdb
ipdb.set_trace()
# convert pandas to numpy
features = X.to_records(index=False).dtype
X = X.to_numpy()
Y = np.array(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=42)
print(len(X_train))
print('Training Features Shape:', X_train.shape)
print('Training Labels Shape:', Y_train.shape)
print('Testing Features Shape:', X_test.shape)
print('Testing Labels Shape:', Y_test.shape)
# replacing missing data nan to 0
imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0)
imp = imp.fit(X)
X = imp.transform(X)
def sampling(X, Y, sample_type="over"):
"""
This is to pick the sampling technique and output the data after sampled
:param X: input data
:param Y: classification data
:param sample_type: can take a list or str of sampling technique
default is oversampling. options: over, under, combine
:return: cascade data of X and Y
"""
if "over" in sample_type:
# using SMOTE for over sampling
X_oversampled, y_oversampled = SMOTE(sampling_strategy="minority", random_state=42).fit_resample(X, Y)
if "under" in sample_type:
# using ENN for under sampling, since centroid has memory issues
# centroid undersample
# X_under, y_under = ClusterCentroids(random_state=42).fit_resample(X,Y)
X_under, y_under = EditedNearestNeighbours(random_state=42).fit_resample(X, Y)
if "combine" in sample_type:
# using sklearn built-in SMOTEENN for comebined sampling
# because centroids has memory issue
X_comb, y_comb = SMOTEENN(random_state=42).fit_resample(X, Y)
# X_oversampled, y_oversampled = SMOTE(sampling_strategy="minority", random_state=42).fit_resample(X, Y)
# X_comb, y_comb = ClusterCentroids(random_state=42).fit_resample(X_oversampled,y_oversampled)
X_Y_under = list()
X_Y_over = list()
X_Y_comb = list()
X_Y = dict()
# append the data back for return
if 'under' in sample_type:
X_Y_under = np.append(X_under, y_under.reshape(len(y_under), 1), axis=1)
if 'over' in sample_type:
X_Y_over = np.append(X_oversampled, y_oversampled.reshape(len(y_oversampled), 1), axis=1)
if 'combine' in sample_type:
X_Y_comb = np.append(X_comb, y_comb.reshape(len(y_comb), 1), axis=1)
X_Y.setdefault("under", X_Y_under)
X_Y.setdefault("over", X_Y_over)
X_Y.setdefault("combine", X_Y_comb)
return X_Y
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=42)
# get the name and data type of features
features_variance = list()
tmp = list(features.descr)
print(tmp)
for i in range(len(tmp)):
if tmp[i]:
features_variance.append(tmp[i])
print(features_variance)
# features
feature_list = get_features(features_variance)
def feature_selection(X, Y, FV, estimators=100):
"""
Using tree as feature selection instead of variance
because number of pedestrian is very import feature but there are a lot of zeros in the feature
:param X: Input data
:param Y: Label
:param FV: list of features
:param estimators: estimator passing to the tree algo
:return: reduced data and feature list
"""
# using ExtraTreesClassifier for getting the important features
clf = ExtraTreesClassifier(n_estimators=estimators)
clf = clf.fit(X, Y)
# feature selection
clf.feature_importances_
model = SelectFromModel(clf, prefit=True)
features_lsv = model.get_support()
# get the name of the selected features
feature_list = list()
for i in range(len(features_lsv)):
if features_lsv[i]:
feature_list.append(FV[i])
# debug
# print the feature selected out
# [print(FV[i]) for i in range(len(features_lsv)) if features_lsv[i]]
# to reduce the column of X
X_new = model.transform(X)
# X_new.shape
# append class label back to data
feature_list.append(FV[-1])
X_Y = np.append(X_new, Y.reshape(len(X_new), 1), axis=1)
return X_Y, feature_list
# file control
select_feature = False
SAMPLE = "over"
if select_feature:
X_Y, feature_list = feature_selection(X, Y, feature_list)
if SAMPLE:
X_Y = sampling(X, Y, SAMPLE)[SAMPLE]
# get the arff obj
arff_dump_v = get_arff_dump(X_Y, feature_list, "selection")
# append the file name properly
file_name_list=[group_classes[0], group_classes[1]]
if select_feature:
file_name_list.append("selection")
elif SAMPLE:
file_name_list.append(SAMPLE)
file_name='traffic'
for it in file_name_list:
file_name = "{}_{}".format(file_name,it)
# generate feature selectiong arff or sampling arff
with open("{}.arff".format(file_name), "w", encoding="utf8") as f:
arff.dump(fp=f, obj=arff_dump_v)
| true
|
967b60ef865868e98c9e1257b114f023bba8fea5
|
Python
|
ZhengJiaCode/Selection_strength_evolvability
|
/2020sel_Scripts/8_frequencyOfU.py
|
UTF-8
| 2,519
| 2.625
| 3
|
[] |
no_license
|
#This program is used for calculating the frequency of sequences carrying both G66S and Y204 during phase II evolution
import os
import csv
import sys
import re
GList=['II-1','II-2','II-3','II-4']
GSea=['phase-II_1st','phase-II_2nd','phase-II_3rd','phase-II_4th']
#the following codes are used for grouping sequences from each generation of phase II evolution
def repSeq_File(filepath):
f1 = open(filepath, "r")
SNPlines = f1.readlines()
Numseq=[]
Seqlist=[]
for i in range (len(GList)):
Numseq.append(0)
Seqlist.append([])
for line in SNPlines:
if not line.strip(): continue
if re.search(">",line):
seqName=line
else:
for gs in range (len(GSea)):
if re.search(GSea[gs],seqName):
Seqlist[gs].append(line.strip())
Numseq[gs]=Numseq[gs]+1
return Seqlist
#ref indicates YFP (ancestor) protein sequence
ref="MVSKGEELFTGVVPILVELDGDVNGHKFSVSGEGEGDATYGKLTLKFICTTGKLPVPWPTLVTTFGYGLQCFARYPDHMKLHDFFKSAMPEGYVQERTIFFKDDGNYKTRAEVKFEGDTLVNRIELKGIDFKEDGNILGHKLEYNYNSHNVYIMADKQKNGIKVNFKIRHNIEDGSVQLADHYQQNTPIGDGPVLLPDNHYLSYQSALSKDPNEKRDHMVLLEFVTAAGITLGMDELYK*"
#the following codes are used for calculating frequency of YFP molecules which carried G66S and Y204 in each replicate population during each generation of phase II evolution
def SNP_list(seqlist):
numSeq=len(seqlist)
freList=[]
if numSeq>0:
n=0
for line in seqlist:
if line[65]=="S" and line[203]=='C':
n=n+1
fre="%.4f" %(float(100.0*n)/numSeq)
else:
fre=''
return fre
SNP_all=[]
def allSNP_File(filepath):
nowDir = os.path.split(filepath)[0]
fileName = os.path.split(filepath)[1]
Seqlist_each=repSeq_File(filepath)
SNP_each=[fileName[5:-7],fileName[5:-6],'U']
for g in range (len(GList)):
SNP_each.append(SNP_list(Seqlist_each[g]))
SNP_all.append(SNP_each)
#the following codes are used for reading all input files (in the folder "~/ProSeq") which contain protein sequences of each evolving population sequenced by SMRT sequencing
def eachFile(filepath):
os.chdir(filepath)
pathDir = os.listdir(filepath)
for s in pathDir:
newDir=os.path.join(filepath,s)
if os.path.isfile(newDir) :
if os.path.splitext(newDir)[1]==".fasta":
allSNP_File(newDir)
eachFile("~/ProSeq")
#the following codes are used for writing the result into a csv file
with open("~/7_frequency_of_U.csv", 'wb') as csvfile:
Wri = csv.writer(csvfile)
Wri.writerow(['Population','Replicate','Mutation']+GList)
for each in SNP_all:
Wri.writerow(each)
| true
|
0375f0a058ff4d9ab7ce7ff2653f1e5e13d0548f
|
Python
|
MIKOLAJW197/cityParking
|
/prog.py
|
UTF-8
| 3,276
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import thread
import time
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
from flask import Flask, jsonify
from flask import request
from flask_cors import CORS, cross_origin
# Configuration czujnika odleglosci
GPIO.setup("P9_12", GPIO.OUT, initial=GPIO.LOW) # Trigger
GPIO.setup("P9_15", GPIO.IN, pull_up_down=GPIO.PUD_UP) # Echo
# SERVO config
servoPin = "P9_14"
PWM.start(servoPin, 5, 50) # NOTE lub bez tej piatki
# led config
greenLed = "P8_10"
redLed = "P8_9"
GPIO.setup("P8_10", GPIO.OUT) # green
GPIO.setup("P8_9", GPIO.OUT) # red
# config of restful-api
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# note: 1 - wolny/odblokowany , 2 - zajety samochod, 3 - zarezerowwany
parking = [
{"id": 0,
"type": "1"},
{"id": 1,
"type": "1"}
]
@app.route('/', methods=['GET'])
@cross_origin()
def api_root():
if request.method == 'GET':
return jsonify(parking)
#
#
@app.route('/block/<id>', methods=['GET'])
@cross_origin()
def api_block(id):
for spot in parking:
if (int(id) == spot['id']):
spot['type'] = 3
closeServo()
ledOff(greenLed)
ledOn(redLed)
return jsonify(parking)
#
#
@app.route('/unblock/<id>', methods=['GET'])
@cross_origin()
def api_unblock(id):
for spot in parking:
if (int(id) == spot['id']):
spot['type'] = 1
openServo()
ledOff(redLed)
ledOn(greenLed)
return jsonify(parking)
# def serverStart():
# app.run()
def distanceMeasurement(TRIG, ECHO):
# Measure the distance between HC-SR04 and nearest wall or solid object.
GPIO.output(TRIG, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(TRIG, GPIO.LOW)
pulseStart = time.time()
while GPIO.input(ECHO) == 0:
pulseStart = time.time()
while GPIO.input(ECHO) == 1:
pulseEnd = time.time()
pulseDuration = pulseEnd - pulseStart
distance = pulseDuration * 17150
distance = round(distance, 2)
return distance
def openServo():
dutyCycle = 12
PWM.set_duty_cycle(servoPin, dutyCycle)
def closeServo():
dutyCycle = 3
PWM.set_duty_cycle(servoPin, dutyCycle)
def ledOn(ledPin):
GPIO.output(ledPin, GPIO.HIGH)
def ledOff(ledPin):
GPIO.output(ledPin, GPIO.LOW)
def measure_average():
d1 = distanceMeasurement("P9_12", "P9_15")
time.sleep(1)
d2 = distanceMeasurement("P9_12", "P9_15")
time.sleep(1)
d3 = distanceMeasurement("P9_12", "P9_15")
time.sleep(1)
d4 = distanceMeasurement("P9_12", "P9_15")
distance = (d1 + d2 + d3 + d4) / 4
return distance
# main Loop
if __name__ == '__main__':
# _thread.start_new_thread(serverStart,())
ledOn(greenLed)
ledOff(redLed)
while(1):
if (parking[0]['type'] != 3):
recoveredDistance = measure_average()
if (recoveredDistance < 10):
ledOff(greenLed)
ledOn(redLed)
parking[0]['type'] = 2
time.sleep(10)
else:
ledOff(redLed)
ledOn(greenLed)
parking[0]['type'] = 1
else:
time.sleep(10)
| true
|
e2c38b8d182f49f96e020ceb3d3f7a00c3a84d24
|
Python
|
1cg2cg3cg/BOJ
|
/1065_ํ์.py
|
UTF-8
| 418
| 3.234375
| 3
|
[] |
no_license
|
import sys
N = int(sys.stdin.readline().strip())
number = 0
if N < 100 :
number = N
print(N)
else :
number = 99
for i in range(100, N+1) :
A = str(i)
Dif = []
for j in range(1, len(A)) :
Dif.append(int(A[j]) - int(A[j-1]))
Dif = set(Dif)
if len(Dif) == 1 :
number += 1
print(number)
| true
|
96418dc8f0152c90c5bb19c04f27eff92d251e83
|
Python
|
Isen-kun/Python-Programming
|
/Sem_3_Lab/05.01.21/asign.py
|
UTF-8
| 893
| 4.59375
| 5
|
[] |
no_license
|
def rect_area():
l = int(input("Enter rectangle's length: "))
b = int(input("Enter rectangle's breadth: "))
ar = l * b
print("The area of rectangle is", ar)
def tri_area():
h = int(input("Enter triangle's height length: "))
b = int(input("Enter triangle's breadth length: "))
ar = 0.5 * b * h
print("The area of triangle is", ar)
def cir_area():
r = int(input("Enter circle's radius length: "))
pi = 3.14
ar = pi * r * r
print("The area of triangle is", ar)
while(1):
choice = int(input(
"Select the name of shape whose area you want to find: \n 1. rectangle. \n 2. Triangle \n 3. circle. \n 4. Exit. \n"))
if choice == 1:
rect_area()
elif choice == 2:
tri_area()
elif choice == 3:
cir_area()
elif choice == 4:
break
else:
print("Sorry! This shape is not available")
| true
|
72bd593fc69430dbae7ec98dbfc64e5cacc253bc
|
Python
|
sindish/unitedwelearn
|
/object_detection/detectron2/datasets/prepare_bdd100k.py
|
UTF-8
| 3,208
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import cv2
import json
from pathlib import Path
def convert_bdd_coco_format(split) -> None:
data_folder = Path("/media/deepstorage01/datasets_external/BDD100K")
json_file = data_folder / "bdd100k_labels_release" / f"bdd100k_labels_images_{split}.json"
image_root = Path("/media/deepstorage01/datasets_external/bdd100k/bdd100k/images/100k") / f"{split}"
with open(json_file) as file:
results = json.load(file)
# all_categories = {'traffic sign': 1, 'traffic light': 2, 'car': 3, 'rider': 4, 'motorcycle': 5,
# 'pedestrian': 6, 'bus': 7, 'truck': 8, 'bicycle': 9, 'other vehicle': 10,
# 'train': 11, 'trailer': 12, 'other person': 13}
old_categories = {'traffic sign': 9, 'traffic light': 8, 'car': 3, 'rider': 6, 'motor': 4,
'person': 7, 'bus': 1, 'truck': 2, 'bike': 5, 'train': 10}
"""
Group 1: 1-Bus, 2-Truck, 3-Car
Group 2: 4-Motor, 5-Bike
Group 3: 6-Rider, 7-Person
Group 4: 8-Traffic light, 9-Traffic sign
Group 5: 10-Train
"""
list_categories = []
for name, index in old_categories.items():
single_cat = {'id': index, 'name': name, 'supercategory': name}
list_categories.append(single_cat)
images = []
annotations = []
for image_id, img_gt in enumerate(results):
image_name = img_gt["name"]
imagepath = image_root / image_name
img = cv2.imread(str(imagepath))
height, width, c = img.shape
img_dict = {
"license": 0,
"file_name": image_name,
"coco_url": "",
"height": height,
"width": width,
"data_captured": "",
"id": image_id,
}
images.append(img_dict)
detection_gt = img_gt["labels"] if img_gt["labels"] is not None else []
for bbox_i in detection_gt:
label_id = old_categories.get(bbox_i["category"], -1)
if label_id > 0:
coord = bbox_i["box2d"]
# coco format: [x1, y1, width, height] in pixel
coco_box = [coord["x1"], coord["y1"], coord["x2"] - coord["x1"], coord["y2"] - coord["y1"]]
box_area = coco_box[2] * coco_box[3]
# ignore the box which is not included in the categories defined
annotations_dict = {
"segmentation": {"counts": None}, # to support usage of tide analysis
"area": box_area,
"iscrowd": 0,
"image_id": image_id,
"bbox": coco_box,
"category_id": label_id,
"id": bbox_i["id"],
}
annotations.append(annotations_dict)
json_file = data_folder / "bdd100k_labels_detection20" / f"det_v1_{split}_detectron2_format.json"
with json_file.open("w") as file:
instances = {
"annotations": annotations,
"images": images,
"categories": list_categories,
}
json.dump(instances, file, indent=2)
if __name__ == "__main__":
convert_bdd_coco_format(split="val")
convert_bdd_coco_format(split="train")
| true
|
58b367f5a0259a9ac38fde53d66fe137503aa409
|
Python
|
HardikSingh97/hebi-python-examples
|
/kits/arm/ex_teach_repeat_armApi_w_gripper.py
|
UTF-8
| 4,370
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import arm
import hebi
from hebi.util import create_mobile_io
from time import sleep
# Set up arm
family_name = "Arm"
module_names = ["J1_base", "J2_shoulder", "J3_elbow", "J4_wrist1", "J5_wrist2", "J6_wrist3"]
hrdf = "hrdf/A-2085-06G.hrdf"
gripper_name = "gripperSpool"
p = arm.ArmParams(family_name, module_names, hrdf, hasGripper=True, gripperName=gripper_name)
a = arm.Arm(p)
a.loadGains("gains/A-2085-06.xml")
a.gripper.loadGains("gains/gripper_spool_gains.xml")
a.gripper.open()
# Mobile device setup
phone_family = 'Arm'
phone_name = "mobileIO"
lookup = hebi.Lookup()
sleep(2)
print('Waiting for Mobile IO device to come online...')
m = create_mobile_io(lookup, phone_family, phone_name)
if m is None:
raise RuntimeError("Could not find Mobile IO device")
m.update()
abort_flag = False
waypoints = []
grip_states = []
flow = []
durations = []
run_mode = "training"
print("")
print("B1 - Add waypoint (stop)")
print("B2 - Add waypoint (stop) and toggle the gripper")
print("B3 - Add waypoint (flow)")
print("B5 - Toggle training/playback")
print("B6 - Clear waypoints")
print("A3 - Up/down for longer/shorter time to waypoint")
print("B8 - Quit")
print("")
while not abort_flag:
# Update arm and mobile io
a.update()
# Update button states
if not m.update():
print("Failed to get feedback from MobileIO")
continue
slider3 = m.get_axis_state(3)
# B8 Check for quit
if m.get_button_diff(8) == 3: # "ToOn"
abort_flag = True
break
if run_mode == "training":
# B1 add waypoint (stop)
if m.get_button_diff(1) == 3: # "ToOn"
print("Stop waypoint added")
waypoints.append(a.fbk.position)
flow.append(False)
durations.append(slider3 + 4)
grip_states.append(a.gripper.state)
# B2 add waypoint (stop) and toggle the gripper
if m.get_button_diff(2) == 3:
# Add 2 waypoints to allow the gripperr to open or close
print("Stop waypoint added and gripper toggled")
# first waypoint
waypoints.append(a.fbk.position)
flow.append(False)
grip_states.append(a.gripper.state)
durations.append(slider3 + 4)
# toggle the gripper
a.gripper.toggle()
# second waypoint
waypoints.append(a.fbk.position)
flow.append(False)
grip_states.append(a.gripper.state) # this will now be the toggled state
durations.append(2) # time given to gripper for closing
# B3 add waypoint (flow)
if m.get_button_diff(3) == 3: # "ToOn"
print("Flow waypoint added")
waypoints.append(a.fbk.position)
flow.append(True)
durations.append(slider3 + 4)
grip_states.append(a.gripper.state)
# B5 toggle training/playback
if m.get_button_diff(5) == 3: # "ToOn"
# Check for more than 2 waypoints
if len(waypoints) > 1:
print("Starting playback of waypoints")
run_mode = "playback"
playback_waypoint = 0 # reset playback_waypoint before starting playback
a.gripper.open()
a.send()
continue
else:
print("At least two waypoints are needed")
# B6 clear waypoints
if m.get_button_diff(6) == 3: # "ToOn"
print("Waypoints cleared")
waypoints = []
flow = []
durations = []
grip_states = []
if run_mode == "playback":
# B5 toggle training/playback, leave playback
if m.get_button_diff(5) == 3: # "ToOn"
print("Returned to training mode")
run_mode = "training"
a.cancelGoal()
if a.at_goal:
print("Reached waypoint number:", playback_waypoint) # waypoint 0 is position from where you start the playback
if playback_waypoint == len(waypoints): # finished playback, reset counter and restart
playback_waypoint = 0
# Set up next waypoint according to waypoint counter
next_waypoint = waypoints[playback_waypoint]
next_flow = flow[playback_waypoint]
next_duration = durations[playback_waypoint]
# Send the new commands
a.createGoal([next_waypoint], flow=[next_flow], duration=[next_duration])
a.setGoal()
a.gripper.setState(grip_states[playback_waypoint])
# Iterate waypoint counter
playback_waypoint += 1
a.send()
| true
|
0f6dc96354cfa59ccadadf8b57986175224132b2
|
Python
|
Menci/TuringAdvancedProgramming19A
|
/Task 2/evaluator-checker/expression.py
|
UTF-8
| 1,623
| 3.25
| 3
|
[] |
no_license
|
from math import copysign
from utils import float_to_string
class Expression:
def __init__(self, type, rng, value=None, operator=None, left_operand=None, right_operand=None):
self.type = type
self.value = value
self.operator = operator
self.left_operand = left_operand
self.right_operand = right_operand
self.rng = rng
def evaluate(self):
if self.type == "number":
return self.value
elif self.operator == "+":
return self.left_operand.evaluate() + self.right_operand.evaluate()
elif self.operator == "-":
return self.left_operand.evaluate() - self.right_operand.evaluate()
elif self.operator == "*":
return self.left_operand.evaluate() * self.right_operand.evaluate()
else:
left_value = self.left_operand.evaluate()
right_value = self.right_operand.evaluate()
try:
return left_value / right_value
except ZeroDivisionError:
if left_value == 0:
return float("nan")
else:
return copysign(float("inf"), copysign(1, left_value) * copysign(1, right_value))
def to_string(self):
if self.type == "number":
return float_to_string(self.value)
format_string = [
"(%s) %s (%s)",
"(%s)%s (%s)",
"(%s) %s(%s)",
"(%s)%s(%s)",
][self.rng.randint(0, 3)]
return format_string % (self.left_operand.to_string(), self.operator, self.right_operand.to_string())
| true
|
f8dd9b0b915846d859f03adea7c4b10b6856b361
|
Python
|
vikpe/pydynamiccalc
|
/custom/calculators.py
|
UTF-8
| 368
| 2.953125
| 3
|
[] |
no_license
|
from pykm.calculators import AbstractPriceCalculator
class DiscountForGoblinsCalculator(AbstractPriceCalculator):
GOBLIN_FACTOR: float = 0.8
@classmethod
def calculate_price(cls, card_info: dict) -> float:
price = card_info["price"]
if "goblin" in card_info["name"].lower():
price *= cls.GOBLIN_FACTOR
return price
| true
|
471df3e26d733174abf4948c3a990e62fe86ac66
|
Python
|
ECE-492-W2020-Group-6/smart-blinds-rpi
|
/scripts/check_motor_interactive.py
|
UTF-8
| 1,712
| 3
| 3
|
[
"MIT"
] |
permissive
|
"""
Date: Feb 26, 2020
Author: Ishaat Chowdhury
Contents: Motor test script
"""
from easydriver.easydriver import EasyDriver, PowerState, MicroStepResolution, StepDirection
from gpiozero.pins.rpigpio import RPiGPIOFactory
from gpiozero import Device
import time
import RPi.GPIO as rpigpio
if __name__ == "__main__":
STEP_PIN = 20
DIR_PIN = 21
ENABLE_PIN = 25
MS1_PIN = 24
MS2_PIN = 23
rpigpio.setmode(rpigpio.BCM)
rpigpio.setwarnings(False)
Device.pin_factory = RPiGPIOFactory()
driver = EasyDriver(step_pin=STEP_PIN,
dir_pin=DIR_PIN,
ms1_pin=MS1_PIN,
ms2_pin=MS2_PIN,
enable_pin=ENABLE_PIN)
print("Starting script...")
while True:
step_input = input("Number of steps: ")
steps = int(step_input)
dir_input = input("Direction [fwd or rev]: ")
direction = StepDirection.FORWARD if dir_input.lower() in \
["fwd", "forward", "f"] else StepDirection.REVERSE
ms_res_input = input("Microstep Resolution: [full or half or quarter or eighth]: ")
d = {
"full": MicroStepResolution.FULL_STEP,
"1": MicroStepResolution.FULL_STEP,
"half": MicroStepResolution.HALF_STEP,
"1/2": MicroStepResolution.HALF_STEP,
"quarter": MicroStepResolution.QUARTER_STEP,
"1/4": MicroStepResolution.QUARTER_STEP,
"eigth": MicroStepResolution.EIGHTH_STEP,
"1/8": MicroStepResolution.EIGHTH_STEP,
}
microstep_resolution = d[ms_res_input]
driver.microstep_resolution = microstep_resolution
driver.step(steps=steps, direction=direction)
| true
|
de79984049b64957e0f971ba0b116b5573913eac
|
Python
|
Hyunwoo29/keras01
|
/ml/m21_pickle.py
|
UTF-8
| 1,529
| 2.640625
| 3
|
[] |
no_license
|
from os import scandir
from xgboost import XGBRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import r2_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#1.๋ฐ์ดํฐ
datasets = load_boston()
x = datasets["data"]
y = datasets["target"]
# print(x.shape, y.shape) (506, 13) (506,)
x_train, x_test, y_train, y_test = train_test_split(x, y,
shuffle=True, random_state=66, train_size=0.8)
scalar = MinMaxScaler()
scalar.fit(x_train)
x_train = scalar.transform(x_train)
x_test = scalar.transform(x_test)
model = XGBRegressor(n_estimators=1000, learing_rate=0.1, n_jobs=1)
model.fit(x_train,y_train, verbose=1, eval_metric=['rmse', 'mae', 'logloss'],
eval_set=[(x_train, y_train), (x_test, y_test)],
early_stopping_rounds=10
)
results = model.score(x_test, y_test)
print("results = ", results)
y_predict = model.predict(x_test)
r2 = r2_score(y_test, y_predict)
print("r2 = ", r2)
print("=============================================")
hist = model.evals_result()
print(hist)
import matplotlib.pyplot as plt
epochs = len(hist['validation_0']['logloss'])
x_axis = range(0,epochs)
plt.subplots()
plt.plot(x_axis, hist['validation_0']['logloss'], label='Train')
plt.plot(x_axis, hist['validation_1']['logloss'], label='Test')
plt.legend()
plt.ylabel('Log loss')
plt.title('XGBoost Log Loss')
plt.show()
import pickle
pickle.dump(model, open('./_data/xgb_save/m21_pickle.dat', 'wb'))
| true
|
39d76b252e7c8486f008f9c9ffbe420803aa73dd
|
Python
|
fanyuguang/lstm-text-classification
|
/tfrecords_utils.py
|
UTF-8
| 4,770
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env Python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import os
import tensorflow as tf
import data_utils
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('tfrecords_path', 'data/tfrecords/', 'tfrecords directory')
tf.app.flags.DEFINE_integer('batch_size', 1024, 'words batch size')
tf.app.flags.DEFINE_integer('min_after_dequeue', 10000, 'min after dequeue')
tf.app.flags.DEFINE_integer('num_threads', 1, 'read batch num threads')
tf.app.flags.DEFINE_integer('num_steps', 50, 'num steps, equals the length of words')
def create_record(word_datasets, label_datasets, tfrecords_path):
print 'Create record to ' + tfrecords_path
writer = tf.python_io.TFRecordWriter(tfrecords_path)
for (word_ids, label_ids) in zip(word_datasets, label_datasets):
word_list = [int(word) for word in word_ids.strip().split() if word]
label = [int(label_ids)]
example = tf.train.Example(features=tf.train.Features(feature={
'words': tf.train.Feature(int64_list=tf.train.Int64List(value=word_list)),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=label)),
}))
writer.write(example.SerializeToString())
writer.close()
def read_and_decode(tfrecords_path):
print 'Read record from ' + tfrecords_path
filename_queue = tf.train.string_input_producer([tfrecords_path], num_epochs=None)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
# 'words': tf.FixedLenFeature([50], tf.int64),
'words': tf.VarLenFeature(tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
})
num_steps = FLAGS.num_steps
words = features['words']
words = tf.sparse_to_dense(sparse_indices=words.indices[:num_steps], output_shape=[num_steps],
sparse_values=words.values[:num_steps], default_value=0)
label = features['label']
batch_size = FLAGS.batch_size
min_after_dequeue = FLAGS.min_after_dequeue
capacity = min_after_dequeue + 3 * batch_size
num_threads = FLAGS.num_threads
words_batch, label_batch = tf.train.shuffle_batch([words, label], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads)
return words_batch, label_batch
def print_all(tfrecords_path):
number = 1
for serialized_example in tf.python_io.tf_record_iterator(tfrecords_path):
example = tf.train.Example()
example.ParseFromString(serialized_example)
words = example.features.feature['words'].int64_list.value
labels = example.features.feature['label'].int64_list.value
word_list = [word for word in words]
labels = [label for label in labels]
print('Number:{}, label: {}, features: {}'.format(number, labels, word_list))
number += 1
def print_shuffle(tfrecords_path):
words_batch, label_batch = read_and_decode(tfrecords_path)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
batch_words_r, batch_label_r = sess.run([words_batch, label_batch])
print 'batch_words_r : ',
print batch_words_r.shape
print batch_words_r
print 'batch_label_r : ',
print batch_label_r.shape
print batch_label_r
except tf.errors.OutOfRangeError:
print 'Done reading'
finally:
coord.request_stop()
coord.join(threads)
def main(_):
train_path = FLAGS.train_path
ids_path = FLAGS.ids_path
vocab_path = FLAGS.vocab_path
vocab_size = FLAGS.vocab_size
tfrecords_path = FLAGS.tfrecords_path
train_percent = FLAGS.train_percent
val_percent = FLAGS.val_percent
words_vocab = data_utils.create_vocabulary(train_path, os.path.join(vocab_path, 'words_vocab.txt'), vocab_size)
datasets = data_utils.prepare_datasets(train_path, ids_path, vocab_path, words_vocab, train_percent, val_percent)
train_word_ids_list, train_label_ids_list, validation_word_ids_list, validation_label_ids_list, \
test_word_ids_list, test_label_ids_list = datasets
create_record(train_word_ids_list, train_label_ids_list, os.path.join(tfrecords_path, 'train.tfrecords'))
create_record(validation_word_ids_list, validation_label_ids_list, os.path.join(tfrecords_path, 'validate.tfrecords'))
create_record(test_word_ids_list, test_label_ids_list, os.path.join(tfrecords_path, 'test.tfrecords'))
print_all(os.path.join(tfrecords_path, 'test.tfrecords'))
# print_shuffle(os.path.join(tfrecords_path, 'test.tfrecords'))
if __name__ == '__main__':
tf.app.run()
| true
|
6d89d0e082692b421ad1e8c244ab8d8c3326798f
|
Python
|
kjans123/CPS_testing
|
/maxDifference02.py
|
UTF-8
| 271
| 2.953125
| 3
|
[] |
no_license
|
def maxFindDiff(inputList):
diffList = []
for i in range(len(inputList)):
if i != (len(inputList)-1):
oneDiff = abs(inputList[i] - inputList[i+1])
diffList.append(oneDiff)
maxxVal = round(max(diffList), 5)
return maxxVal
| true
|
444e53533bed6a74c96970699edd277373b72272
|
Python
|
croptek/sensors
|
/test/RHT03-tmp&hum/RHT03-tmp&hum.py
|
UTF-8
| 1,348
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
#import redis
import Adafruit_DHT
import time
confDoc = open('config.txt','r')
sysName = confDoc.readline().rstrip()
tmpSensorName = confDoc.readline().rstrip()
humSensorName = confDoc.readline().rstrip()
pin = int(confDoc.readline())
updateRate = int(confDoc.readline())
redisName = 'localhost'
#print 'Server: '+redisName+', system: '+sysName+', tmp: '+tmpSensorName+', hum: '+humSensorName+', pin: '+str(pin)+', updateRate: '+str(updateRate)
#DB = redis.StrictRedis(host=redisName, port=6379, db=0)
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
if updateRate < 2:
updateRate = 2
while True:
# Try to grab a sensor reading. Use the read_retry method which will retry up
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, pin)
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
#DB.set(sysName+'.Sensors.'+tmpSensorName+'.val', str(temperature))
#DB.set(sysName+'.Sensors.'+humSensorName+'.val', str(humidity))
print 'Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
else:
print 'Failed to get reading. Try again!'
time.sleep(updateRate)
| true
|
ef8e69ab6081c4db76f0af1a89b42c6f76b01673
|
Python
|
SamProkopchuk/maps20
|
/D.py
|
UTF-8
| 301
| 3.171875
| 3
|
[] |
no_license
|
import math as m
for i in range(int(input())):
n, l, d, g = list(map(int, input().split()))
# n = sides
# l = side length
# d = expansion distance
# g = land grabs
apothem = l/2*m.tan(m.pi*(n-2)/(2*n))
area = 0.5*n * l *apothem
ds = g*d
area += (ds)**2*m.pi
area += n * l * ds
print(area)
| true
|
2174fc702399b702d9324ce564fc2d461bf1e208
|
Python
|
rushkock/project
|
/project/data/scripts/convertCSV2JSONus.py
|
UTF-8
| 1,042
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# Name: Ruchella Kock
# Student number: 12460796
"""
This script transforms the us csv datafiles to a JSON file
"""
import pandas as pd
def main():
columns = ["FIPS", "State", "Substate Region", "Small Area Estimate",
"95% CI (Lower)", "95% CI (Upper)"]
# choose column names
names_columns = ["FIPS", "state", "substate", "percentage", "lower_CI",
"upper_CI"]
# read csv into datframe
df = pd.read_csv("../csv/depression.csv", usecols=columns)
df.columns = names_columns
df["percentage"] = df["percentage"].map(lambda x: x.rstrip("%"))
df["percentage"] = pd.to_numeric(df["percentage"])
df["lower_CI"] = df["lower_CI"].map(lambda x: x.rstrip("%"))
df["lower_CI"] = pd.to_numeric(df["lower_CI"])
df["upper_CI"] = df["upper_CI"].map(lambda x: x.rstrip("%"))
df["upper_CI"] = pd.to_numeric(df["upper_CI"])
# set to json file
df.to_json(path_or_buf="json/depression.json", orient="records")
if __name__ == '__main__':
main()
| true
|
4072f3124a82c0a1d17fe3c9fbeeedd0babd7eef
|
Python
|
Manon-des-sources/C003-python
|
/NoteBook/3001-lesson_codes/23-002-dice.py
|
UTF-8
| 942
| 3.859375
| 4
|
[] |
no_license
|
# coding=utf-8
#!python3
# =======================================================================
"""
ๆฅๆบ๏ผ
้ฎ้ข๏ผ
ๆฅๅฃ๏ผ
่ฏดๆ๏ผ
"""
# =======================================================================
# modules section
import random
# ็จไธไธช11้ข็้ชฐๅญใไปฃๆฟไธคไธช6้ข็้ชฐๅญ๏ผๆๅบ2-12 ไน้ด็ๆฐๅผ
totals = [0,0,0,0, 0,0,0,0, 0,0,0,0, 0]
for i in range(1000):
dice_tatal = random.randint(2, 12)
totals[dice_tatal] += 1
print('one die 11:')
print('index\t', 'times')
for i in range(2, 13):
print(i, '\t', totals[i])
# ็จไธคไธช6้ข็้ชฐๅญใๅฐๅฎไปฌๆๅบ็้ชฐๅญ็ธๅ ๏ผ็ปๆๅจ2-12
totals_2 = [0,0,0,0, 0,0,0,0, 0,0,0,0, 0]
for i in range(1000):
die_1 = random.randint(1, 6)
die_2 = random.randint(1, 6)
dice_tatal = die_1 + die_2
totals_2[dice_tatal] += 1
print('two die 6:')
print('index\t', 'times')
for i in range(2, 13):
print(i, '\t', totals_2[i])
| true
|
cc083918fe768c6d0a4193a3db884590eadfc7ef
|
Python
|
drestion/leetcode
|
/python/ContainsDuplicate.py
|
UTF-8
| 670
| 3.421875
| 3
|
[] |
no_license
|
class Solution:
# def containsDuplicate(self, nums: List[int]) -> bool:
# # brutal force is O(n2) as it needs to check at least two elements
# # to speed up, needs memory, with hash
# num_dict = {}
# for n in nums:
# if n in num_dict.keys():
# return True
# else:
# num_dict[n] = 1
# return False
def containsDuplicate(self, nums: List[int]) -> bool:
# can also sort
nums = sorted(nums)
for i in range(len(nums)-1):
if nums[i] == nums[i+1]:
return True
return False
| true
|
5fab2aa8384c6585fe746d079a7524cf449530a1
|
Python
|
daniellealll/rootcow
|
/admin/services/instituicao_service.py
|
UTF-8
| 1,350
| 2.578125
| 3
|
[] |
no_license
|
from admin.models.instituicao import Instituicao
from admin.dao import instituicao_dao
def listar():
instituicoes = []
instituicoes_bd = instituicao_dao.listar()
for instituicao_bd in instituicoes_bd:
instituicoes.append(Instituicao(instituicao_bd['nome'], instituicao_bd['telefone'], instituicao_bd['email'], instituicao_bd['endereco'],
instituicao_bd['id_instituicao']))
return instituicoes
def localizar(id_instituicao):
instituicao_bd = instituicao_dao.localizar(id_instituicao)
return Instituicao(instituicao_bd['nome'], instituicao_bd['telefone'], instituicao_bd['email'], instituicao_bd['endereco'],
instituicao_bd['id_instituicao'])
def criar(nome, telefone, email, endereco):
nova_instituicao = Instituicao(nome, telefone, email, endereco)
nova_instituicao.id_instituicao = instituicao_dao.criar(nova_instituicao)
return nova_instituicao
def atualizar(id_instituicao, nome, telefone, email, endereco):
instituicao_atualizada = Instituicao(nome, telefone, email, endereco, id_instituicao)
instituicao_dao.atualizar(instituicao_atualizada)
return instituicao_atualizada
def deletar(id_instituicao):
instituicao = localizar(id_instituicao)
instituicao_dao.deletar(id_instituicao)
return instituicao
| true
|
a3008a097955b83e56ca6b7ace08373da99fdb68
|
Python
|
Pavlenkovv/e-commerce
|
/HW5/Task_2/fraction.py
|
UTF-8
| 1,986
| 3.765625
| 4
|
[] |
no_license
|
"""ะกะพะทะดะฐะนัะต ะบะปะฐัั ยซะัะฐะฒะธะปัะฝะฐั ะดัะพะฑัยป ะธ ัะตะฐะปะธะทัะนัะต ะผะตัะพะดั ััะฐะฒะฝะตะฝะธั,
ัะปะพะถะตะฝะธั, ะฒััะธัะฐะฝะธั ะธ ะฟัะพะธะทะฒะตะดะตะฝะธั ะดะปั ัะบะทะตะผะฟะปััะพะฒ ััะพะณะพ ะบะปะฐััะฐ."""
from math import gcd
class Fraction:
def __init__(self, a, b):
if not isinstance(a, int):
raise TypeError('a')
if not isinstance(b, int):
raise TypeError('b')
if b == 0:
raise ZeroDivisionError()
self.a, self.b = a, b
def __add__(self, other):
if not isinstance((self or other), Fraction):
return NotImplemented
a = self.a * other.b + other.a * self.b
b = self.b * other.b
return Fraction(a, b)
def __sub__(self, other):
if not isinstance((self or other), Fraction):
return NotImplemented
a = self.a * other.b - other.a * self.b
b = self.b * other.b
return Fraction(a, b)
def __mul__(self, other):
if not isinstance((self or other), Fraction):
return NotImplemented
a = self.a * other.a
b = self.b * other.b
return Fraction(a, b)
def __truediv__(self, other):
if not isinstance((self or other), Fraction):
return NotImplemented
elif other.b == 0:
raise ZeroDivisionError()
a = self.a * other.b
b = self.b * other.a
return Fraction(a, b)
def __str__(self):
nod = gcd(self.a, self.b)
self.a, self.b = self.a // nod, self.b // nod
if abs(self.a) > abs(self.b):
part1 = self.a // self.b
part2 = self.a % self.b
return f'{part1}({part2}/{self.b})'
elif abs(self.a) == abs(self.b):
return f'1'
else:
return f'{self.a}/{self.b}'
# return f'{self.a}/{self.b}'
x1 = Fraction(1, 2)
x2 = Fraction(3, 4)
c = x1 + x2
print(x1, x2, c, sep='; ')
| true
|
15d06a5a751c33bb07ac2c767d280f2fd973bfce
|
Python
|
evershinemj/python
|
/songlist.py
|
UTF-8
| 592
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
# encoding=utf-8
# from collections import Iterable
# this form of import doesn't raise warning
from collections.abc import Iterable
class Songlist(Iterable):
'''
the advantage of inheriting collections.Iterable
is that is you do not implement __init__, an
exception will be raised
'''
def __init__(self, *args):
self.songs = args
def __iter__(self):
return iter(self.songs)
if __name__ == '__main__':
songlist = Songlist('ๅๅฒ้็', '่ฝจ่ฟน', 'ไธ้ฃ็ ด', 'ๅๅฆ้ช')
for s in songlist:
print(s)
| true
|
6ca7352f65a3849abdfb9bcdffb44b1f794629c0
|
Python
|
RawitSHIE/Algorithms-Training-Python
|
/python/Time Machine r.py
|
UTF-8
| 283
| 3.25
| 3
|
[] |
no_license
|
"""calendar"""
def main():
"""step"""
month = str(input())
step = int(input())%12
almonth = "JANFEBMARAPRMAYJUNJULAUGSEPOCTNOVDECJANFEBMARAPRMAYJUNJULAUGSEPOCTNOVDEC"
loca = almonth.find(month)
pos = (loca+2)+((step*3)-2)
print(almonth[pos:pos+3])
main()
| true
|
20eda7d2c70ff03c184743d576da4260f9aea285
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_206/1219.py
|
UTF-8
| 912
| 2.96875
| 3
|
[] |
no_license
|
import argparse
def solve():
pass
def main(f_in, f_out):
t = int(f_in.readline().strip())
for case in range(1, t+1):
d, n = f_in.readline().strip().split()
d = int(d)
n = int(n)
horses = []
for r in range(n):
k, s = f_in.readline().strip().split()
horses.append([int(k), int(s)])
sorted_horses = sorted(horses, key=lambda x: x[0], reverse=True)
print(sorted_horses)
max_time = 0
for h in sorted_horses:
time = (d - h[0] * 1.0)/h[1]
max_time = max(max_time, time)
solution = d * 1.0 / max_time
f_out.write('Case #{}: {:.8f}\n'.format(case, solution))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile')
opts = parser.parse_args()
infile = opts.infile
outfile = infile.split('.')[0]+'.out'
with open(infile, 'r') as f_in:
with open(outfile, 'w') as f_out:
main(f_in, f_out)
| true
|
47a3f84c74dfaa6c87e829e14b1fc0cd6a677f6a
|
Python
|
ess2/AlgoritmosBioinspirados
|
/Bioinspirada/MiniProjeto1/bin/8QueensAlgorithm.py
|
UTF-8
| 5,195
| 3.34375
| 3
|
[] |
no_license
|
import numpy
import random
import time
max_fitness = 28
qt_exec = 20
qt_population = 200
def main():
for f in range(qt_exec):
inicio = time.time()
population = list()
qtFitness = 0
#Gera a populaรงรฃo
for i in range(qt_population):
a = random.sample(range(1,9), 8)
if a not in population:
fitn = fitness(a)
qtFitness += 1
tuple = (a, fitn)
population.append(tuple)
binaryArray = ['', '', '', '', '', '', '', '']
b = numpy.array(tuple[0])
for u in range(8):
binaryArray[u] = '{0:03b}'.format(b[u])
#print "Genรณtipo " + str(i) + ":" + str(binaryArray)
else:
i = i - 1
population.sort(key=takeSecond,reverse=True)
#(population[0])[1] < max_fitness
while (qtFitness < 10000):
probRecombNumber = random.randint(1,10)
probMutNumber = random.randint(1,10)
if (probRecombNumber > 0) and (probRecombNumber < 10):
firstEl = population[0]
secondEl = population[1]
childs = reproduce(firstEl[0], secondEl[0])
fitness1 = fitness(childs[0])
fitness2 = fitness(childs[1])
qtFitness += 2
tuple = (childs[0], fitness1)
tuple2 = (childs[1], fitness2)
population = excludeBad(population, [tuple, tuple2])
population.sort(key=takeSecond, reverse=True)
if(probMutNumber > 0) and (probMutNumber < 5):
firstEl = population[0]
child = mutate(firstEl[0])
fitn = fitness(child)
qtFitness += 1
tuple = (child, fitn)
population = excludeBad(population, [tuple])
population.sort(key=takeSecond, reverse=True)
binaryArray = ['','','','','','','','']
#print "Melhor Soluรงรฃo:"
#element = population[0]
#a = numpy.array(element[0])
#for i in range(8):
# binaryArray[i] = '{0:03b}'.format(a[i])
#print binaryArray
#print "Fitness:"
#print (population[0])[1]
#print "Nรบmero de avaliaรงรตes de fitness:" + str(qtFitness)
qtMaxFit = 0
for i in range(len(population)):
if (population[i])[1] == max_fitness:
qtMaxFit += 1
else:
break
print "Quantidade de indรญviduos que atingiram o fitness mรกximo:" + str(qtMaxFit)
fim = time.time()
print "Tempo de Execuรงรฃo " + str(f) + ": " + str((fim - inicio))
print "################################################################"
def fitness(individual):
clashes = 0;
# calculate row and column clashes
# just subtract the unique length of array from total length of array
# [1,1,1,2,2,2] - [1,2] => 4 clashes
row_col_clashes = abs(len(individual) - len(numpy.unique(individual)))
clashes += row_col_clashes
# calculate diagonal clashes
for i in range(len(individual)):
for j in range(len(individual)):
if (i != j):
dx = abs(i - j)
dy = abs(individual[i] - individual[j])
if (dx == dy):
clashes += 1
#print max_fitness - clashes
return max_fitness - clashes
def mutate(x):
n = len(x)
c = 0
a = 0
while(c==a):
c = random.randint(0, n - 1)
m = random.randint(1, n)
t = x[c]
a = x.index(m)
x[c] = m
x[a] = t
return x
def reproduce(x, y):
n = len(x)
c = random.randint(0, n - 1)
childX = x[0:(c+1)]
childY = y[0:(c+1)]
d = c + 1
if d==n or d==(n-1):
return (x, y)
else:
for k in range(d,n):
#Generate new elements for Child Y
count = k
for i in range(k,n):
tempX = x[i]
if tempX not in childY:
childY+=[tempX]
count += 1
break
count += 1
if(count == n):
for j in range(c+1):
tempX = x[j]
if tempX not in childY:
childY += [tempX]
break
# Generate new elements for Child X
count = k
for i in range(k,n):
tempY = y[i]
if tempY not in childX:
childX+=[tempY]
count += 1
break
count += 1
if(count == n):
for j in range(c+1):
tempY = y[j]
if tempY not in childX:
childX += [tempY]
break
return (childX, childY)
def excludeBad(finalList, elements):
for i in range(len(elements)):
finalList[len(finalList)-(i+1)] = elements[i]
return finalList
def takeSecond(elem):
return elem[1]
if __name__ == '__main__':
main()
| true
|
b448ace50aa4bda118e828de85f77c0b45ec2d52
|
Python
|
DveloperY0115/texture_fields
|
/mesh2tex/layers.py
|
UTF-8
| 6,396
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class ResnetBlockFC(nn.Module):
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
# Resnet Blocks
class ResnetBlockConv1D(nn.Module):
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class ResnetBlockPointwise(nn.Module):
def __init__(self, f_in, f_out=None, f_hidden=None,
is_bias=True, actvn=F.relu, factor=1., eq_lr=False):
super().__init__()
# Filter dimensions
if f_out is None:
f_out = f_in
if f_hidden is None:
f_hidden = min(f_in, f_out)
self.f_in = f_in
self.f_hidden = f_hidden
self.f_out = f_out
self.factor = factor
self.eq_lr = eq_lr
# Activation function
self.actvn = actvn
# Submodules
self.conv_0 = nn.Conv1d(f_in, f_hidden, 1)
self.conv_1 = nn.Conv1d(f_hidden, f_out, 1, bias=is_bias)
if self.eq_lr:
self.conv_0 = EqualizedLR(self.conv_0)
self.conv_1 = EqualizedLR(self.conv_1)
if f_in == f_out:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Conv1d(f_in, f_out, 1, bias=False)
if self.eq_lr:
self.shortcut = EqualizedLR(self.shortcut)
# Initialization
nn.init.zeros_(self.conv_1.weight)
def forward(self, x):
net = self.conv_0(self.actvn(x))
dx = self.conv_1(self.actvn(net))
x_s = self.shortcut(x)
return x_s + self.factor * dx
class ResnetBlockConv2d(nn.Module):
def __init__(self, f_in, f_out=None, f_hidden=None,
is_bias=True, actvn=F.relu, factor=1.,
eq_lr=False, pixel_norm=False):
super().__init__()
# Filter dimensions
if f_out is None:
f_out = f_in
if f_hidden is None:
f_hidden = min(f_in, f_out)
self.f_in = f_in
self.f_hidden = f_hidden
self.f_out = f_out
self.factor = factor
self.eq_lr = eq_lr
self.use_pixel_norm = pixel_norm
# Activation
self.actvn = actvn
# Submodules
self.conv_0 = nn.Conv2d(self.f_in, self.f_hidden, 3,
stride=1, padding=1)
self.conv_1 = nn.Conv2d(self.f_hidden, self.f_out, 3,
stride=1, padding=1, bias=is_bias)
if self.eq_lr:
self.conv_0 = EqualizedLR(self.conv_0)
self.conv_1 = EqualizedLR(self.conv_1)
if f_in == f_out:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Conv2d(f_in, f_out, 1, bias=False)
if self.eq_lr:
self.shortcut = EqualizedLR(self.shortcut)
# Initialization
nn.init.zeros_(self.conv_1.weight)
def forward(self, x):
x_s = self.shortcut(x)
if self.use_pixel_norm:
x = pixel_norm(x)
dx = self.conv_0(self.actvn(x))
if self.use_pixel_norm:
dx = pixel_norm(dx)
dx = self.conv_1(self.actvn(dx))
out = x_s + self.factor * dx
return out
def _shortcut(self, x):
if self.learned_shortcut:
x_s = self.conv_s(x)
else:
x_s = x
return x_s
class EqualizedLR(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
self._make_params()
def _make_params(self):
weight = self.module.weight
height = weight.data.shape[0]
width = weight.view(height, -1).data.shape[1]
# Delete parameters in child
del self.module._parameters['weight']
self.module.weight = None
# Add parameters to myself
self.weight = nn.Parameter(weight.data)
# Inherit parameters
self.factor = np.sqrt(2 / width)
# Initialize
nn.init.normal_(self.weight)
# Inherit bias if available
self.bias = self.module.bias
self.module.bias = None
if self.bias is not None:
del self.module._parameters['bias']
nn.init.zeros_(self.bias)
def forward(self, *args, **kwargs):
self.module.weight = self.factor * self.weight
if self.bias is not None:
self.module.bias = 1. * self.bias
out = self.module.forward(*args, **kwargs)
self.module.weight = None
self.module.bias = None
return out
def pixel_norm(x):
sigma = x.norm(dim=1, keepdim=True)
out = x / (sigma + 1e-5)
return out
| true
|
dd6b08775ffd8e91771148e3d8732a04cf6b10a5
|
Python
|
kmader/qbi-2019-py
|
/Exercises/06-AdvShape.py
|
UTF-8
| 4,955
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import skimage.transform
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt
from skimage.morphology import medial_axis, watershed
create_dist_map = lambda img, mask=None: medial_axis(img, mask, return_distance=True)[1]
import os
from skimage.measure import block_reduce
plt_settings = {"interpolation": "none"}
# # Distance Maps
# Here we calculate distance maps with the ```ndimage.distance_transform_``` family of functions. Initially we focus on test images since it is easier to see what is happening with these images.
# In[2]:
def generate_dot_image(size=100, cutoff=0.15):
"""
Create a simple synthetic image with a repeating pattern
Keyword arguments:
size -- the size of the image on one size, final size is size x size (default 100)
imag -- the cutoff between 0 and 1, higher means less connected objects (default 0.15)
"""
xx, yy = np.meshgrid(range(size), range(size))
return (
np.sin(6 * np.pi * xx / (100) - 1) + 1.25 * np.cos(5 * np.pi * yy / (100) - 2)
> cutoff
)
# In[3]:
img_bw = generate_dot_image(28, 0.50)
plt.imshow(img_bw, cmap="gray", **plt_settings)
# In[4]:
img_dist = ndimage.distance_transform_edt(img_bw)
plt.imshow(img_dist, **plt_settings)
# ## Comparing
# There are a number of different methods for ```distance_transform``` inside the ```ndimage``` package of ```scipy``` compare the results of the different approaches for this and other images.
# - What are the main differences?
# - Quantitatively (histogram) show what situations each one might be best suited for?
# In[5]:
# calculate new distance transforms
img_dist = ndimage.distance_transform_edt(img_bw)
img_dist_cityblock = ndimage.distance_transform_cdt(img_bw, metric="taxicab")
img_dist_chess = ndimage.distance_transform_cdt(img_bw, metric="chessboard")
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(30, 10))
ax1.imshow(img_bw, cmap="gray", **plt_settings)
ax1.set_title("Mask Image")
dmap_im = ax2.imshow(img_dist, vmax=img_dist.max(), **plt_settings)
ax2.set_title("Euclidean")
ax3.imshow(img_dist_cityblock, vmax=img_dist.max(), **plt_settings)
ax3.set_title("Cityblock")
ax4.imshow(img_dist_chess, vmax=img_dist.max(), **plt_settings)
ax4.set_title("Chess")
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar = fig.colorbar(dmap_im, cax=cbar_ax)
cbar_ax.set_title("Distance\n(px)")
# ## More Complicated Objects
# We now make the image bigger (changing the ```size``` parameter) and connect them together (the ```cutoff``` parameter)
# In[6]:
# use a bigger base image
img_bw = generate_dot_image(100, 0.15)
img_dist = ndimage.distance_transform_edt(img_bw)
img_dist_cityblock = ndimage.distance_transform_cdt(img_bw, metric="taxicab")
img_dist_chess = ndimage.distance_transform_cdt(img_bw, metric="chessboard")
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(30, 10))
ax1.imshow(img_bw, cmap="gray", **plt_settings)
ax1.set_title("Mask Image")
dmap_im = ax2.imshow(img_dist, vmax=img_dist.max(), **plt_settings)
ax2.set_title("Euclidean")
ax3.imshow(img_dist_cityblock, vmax=img_dist.max(), **plt_settings)
ax3.set_title("Cityblock")
ax4.imshow(img_dist_chess, vmax=img_dist.max(), **plt_settings)
ax4.set_title("Chess")
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar = fig.colorbar(dmap_im, cax=cbar_ax)
cbar_ax.set_title("Distance\n(px)")
# # Watershed
# We can use the watershed transform to segment closely connected objects. We see in the first image that the standard connected component labeling ```ndimage.label``` shows only 3 when we see 9
# In[7]:
cc_img = ndimage.label(img_bw)[0]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 10))
ax1.imshow(img_bw, cmap="gray", **plt_settings)
ax1.set_title("Mask Image")
dmap_im = ax2.imshow(cc_img, **plt_settings)
ax2.set_title("Connected Component Analysis")
# In[8]:
from skimage.feature import peak_local_max
def simple_watershed(img_dist, img_bw):
"""
Calculate the watershed transform on an image and its distance map
by finding the troughs and expanding from these points
"""
local_maxi = peak_local_max(
img_dist, labels=img_bw, footprint=np.ones((3, 3)), indices=False
)
markers = ndimage.label(local_maxi)[0]
return watershed(-img_dist, markers, mask=img_bw)
# ## Applying Watershed
# We can apply watershed to the following image.
# - Why do the bottom row of objects not show up?
# - How can the results be improved
# In[9]:
ws_img = simple_watershed(img_dist, img_bw)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(30, 10))
ax1.imshow(img_bw, cmap="gray", **plt_settings)
ax1.set_title("Mask Image")
ax2.imshow(cc_img, **plt_settings)
ax2.set_title("Connected Component Analysis")
ax3.imshow(ws_img, **plt_settings)
ax3.set_title("Watershed Analysis")
# In[ ]:
| true
|
b7651d001bb3086119389b656717604e1145ef66
|
Python
|
daydreamerli/python_study
|
/Dp_coinchange_error.py
|
UTF-8
| 1,984
| 3.53125
| 4
|
[] |
no_license
|
import timeit
# Function to create the matrix we'll use for the optimization
def _change_matrix(coin_set, change_amount):
matrix = [[0 for m in range(change_amount + 1)] for m in range(len(coin_set) + 1)]
for i in range(change_amount + 1):
matrix[0][i] = i
return matrix
# Function we'll use to optimize the default above matrix
def change_making(coins, change):
matrix = _change_matrix(coins, change)
for c in range(1, len(coins) + 1):
for r in range(1, change + 1):
if coins[c-1] == r:
matrix[c][r] = 1
elif coins[c-1] > r:
matrix[c][r] = matrix[c-1][r]
else:
matrix[c][r] = min(matrix[c - 1][r], 1 + matrix[c][r - coins[c - 1]])
return matrix[-1][-1]
# print(change_making([1,5,10,25], 56)) # List contains arbitrary coins
# The second value contains the sum you're trying to get to
print(change_making([2,5], 3)) # problemk
# return num_of_change
# print( min_coins(31))
#
# Function to create the matrix we'll use for the optimization
def _change_matrix(coin_set, change_amount):
matrix = [[0 for m in range(change_amount + 1)] for m in range(len(coin_set) + 1)]
for i in range(change_amount + 1):
matrix[0][i] = i
return matrix
# Function we'll use to optimize the default above matrix
def change_making(coins, change):
matrix = _change_matrix(coins, change)
for c in range(1, len(coins) + 1):
for r in range(1, change + 1):
if coins[c-1] == r:
matrix[c][r] = 1
elif coins[c-1] > r:
matrix[c][r] = matrix[c-1][r]
else:
matrix[c][r] = min(matrix[c - 1][r], 1 + matrix[c][r - coins[c - 1]])
return matrix[-1][-1]
# print(change_making([1,5,10,25], 56)) # List contains arbitrary coins
# The second value contains the sum you're trying to get to
print(change_making([2,5], 3)) # problem this returns :2
| true
|
5cd4fa29bef4f15a41945a739f1efe0134264549
|
Python
|
kq-li/stuy
|
/pclassic/2016s/PClassic2016Stubs/stubs/JediAcademy.py
|
UTF-8
| 409
| 2.875
| 3
|
[] |
no_license
|
# Change the body of this method
def best_grouping(scores, G):
return 0
if __name__ == "__main__":
with open("JediAcademyIN.txt", "r") as f:
while True:
s = f.readline()
if s == "":
break
data = s.split("--")
scores = [int(x) for x in data[0].split(",")]
G = int(data[1])
print(best_grouping(scores, G))
| true
|
57559136d818776996ca53830a22f476507126d6
|
Python
|
maratserik/NN-clothes-classification
|
/learning.py
|
UTF-8
| 1,512
| 3.140625
| 3
|
[] |
no_license
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = np.array(['T-shirt/top', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'])
# print(len(test_labels)): 10000 images in training set
# Scaling data
train_images = train_images / 255
test_images = test_images / 255
# Building the model
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
# Why 128?
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
# Compiling the model
model.compile(optimizer='adam', # Stochastic Gradient decent
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# Train the model
model.fit(train_images, train_labels, epochs=10)
# Evaluating accuracy using test set
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('Test accuracy:', test_acc)
# Making predictions
probability_model = keras.Sequential([model, tf.keras.layers.Softmax()])
# Here, the model has predicted the label for each image in the testing set.
predictions = probability_model.predict(test_images)
print(f"Prediction: {class_names[np.argmax(predictions[0])]}, Actual: {class_names[test_labels[0]]}")
| true
|
df5f8fbe064ca1e2aa4ce17a18e0e79361c39a12
|
Python
|
ultrabug/py3status
|
/py3status/output.py
|
UTF-8
| 6,764
| 3.265625
| 3
|
[] |
permissive
|
import sys
from json import dumps
class OutputFormat:
"""
A base class for formatting the output of py3status for various
different consumers
"""
@classmethod
def instance_for(cls, output_format):
"""
A factory for OutputFormat objects
"""
supported_output_formats = {
"dzen2": Dzen2OutputFormat,
"i3bar": I3barOutputFormat,
"lemonbar": LemonbarOutputFormat,
"none": NoneOutputFormat,
"term": TermOutputFormat,
"tmux": TmuxOutputFormat,
"xmobar": XmobarOutputFormat,
}
if output_format in supported_output_formats:
return supported_output_formats[output_format]()
raise ValueError(
f"Invalid `output_format` attribute, should be one of `{'`, `'.join(supported_output_formats.keys())}`. Got `{output_format}`."
)
def __init__(self):
"""
Constructor
"""
self.separator = None
def format_separator(self, separator, color):
"""
Produce a formatted and colorized separator for the output format,
if the output_format requires it, and None otherwise.
"""
pass
def format(self, outputs):
"""
Produce a line of output from a list of module output dictionaries
"""
raise NotImplementedError()
def write_header(self, header):
"""
Write the header to output, if supported by the output_format
"""
raise NotImplementedError()
def write_line(self, output):
"""
Write a line of py3status containing the given module output
"""
raise NotImplementedError()
class I3barOutputFormat(OutputFormat):
"""
Format the output for consumption by i3bar
"""
def format(self, outputs):
"""
Produce a line of output from a list of module outputs for
consumption by i3bar. separator is ignored.
"""
return ",".join(dumps(x) for x in outputs)
def write_header(self, header):
"""
Write the i3bar header to output
"""
write = sys.__stdout__.write
flush = sys.__stdout__.flush
write(dumps(header))
write("\n[[]\n")
flush()
def write_line(self, output):
"""
Write a line of py3status output for consumption by i3bar
"""
write = sys.__stdout__.write
flush = sys.__stdout__.flush
out = ",".join(x for x in output if x)
write(f",[{out}]\n")
flush()
class SeparatedOutputFormat(OutputFormat):
"""
Base class for formatting output as an enriched string containing
separators
"""
def begin_color(self, color):
"""
Produce a format string for a colorized output for the output format
"""
raise NotImplementedError()
def end_color(self):
"""
Produce a format string for ending a colorized output for the output format
"""
raise NotImplementedError()
def end_color_quick(self):
"""
Produce a format string for ending a colorized output, but only
if it is syntactically required. (for example because a new color
declaration immediately follows)
"""
return self.end_color()
def get_default_separator(self):
"""
Produce the default separator for the output format
"""
return " | "
def format_separator(self, separator, color):
"""
Format the given separator with the given color
"""
if separator is None:
separator = self.get_default_separator()
if color is not None:
separator = self.begin_color(color) + separator + self.end_color()
self.separator = separator
def format_color(self, block):
"""
Format the given block of module output
"""
full_text = block["full_text"]
if "color" in block:
full_text = self.begin_color(block["color"]) + full_text + self.end_color_quick()
return full_text
def format(self, outputs):
"""
Produce a line of output from a list of module outputs by
concatenating individual blocks of formatted output
"""
return "".join(self.format_color(x) for x in outputs)
def write_header(self, header):
"""
Not supported in separated output formats
"""
pass
def write_line(self, output):
"""
Write a line of py3status output separated by the formatted separator
"""
write = sys.__stdout__.write
flush = sys.__stdout__.flush
out = self.separator.join(x for x in output if x)
write(f"{out}\n")
flush()
class Dzen2OutputFormat(SeparatedOutputFormat):
"""
Format the output for consumption by dzen2
"""
def begin_color(self, color):
return f"^fg({color})"
def end_color(self):
return "^fg()"
def end_color_quick(self):
return ""
def get_default_separator(self):
"""
Produce the default separator for the output format
"""
return "^p(5;-2)^ro(2)^p()^p(5)"
class XmobarOutputFormat(SeparatedOutputFormat):
"""
Format the output for consumption by xmobar
"""
def begin_color(self, color):
return f"<fc={color}>"
def end_color(self):
return "</fc>"
class LemonbarOutputFormat(SeparatedOutputFormat):
"""
Format the output for consumption by lemonbar
"""
def begin_color(self, color):
return f"%{{F{color}}}"
def end_color(self):
return "%{F-}"
def end_color_quick(self):
return ""
class TmuxOutputFormat(SeparatedOutputFormat):
"""
Format the output for consumption by tmux
"""
def begin_color(self, color):
return f"#[fg={color.lower()}]"
def end_color(self):
return "#[default]"
def end_color_quick(self):
return ""
class TermOutputFormat(SeparatedOutputFormat):
"""
Format the output using terminal escapes
"""
def begin_color(self, color):
col = int(color[1:], 16)
r = (col & (0xFF << 0)) // 0x80
g = (col & (0xFF << 8)) // 0x8000
b = (col & (0xFF << 16)) // 0x800000
col = (r << 2) | (g << 1) | b
return f"\033[3{col};1m"
def end_color(self):
return "\033[0m"
def end_color_quick(self):
return ""
class NoneOutputFormat(SeparatedOutputFormat):
"""
Format the output without colors
"""
def begin_color(self, color):
return ""
def end_color(self):
return ""
| true
|
98c0fde4c69912b7882e5cacb02a8c2cd95d4795
|
Python
|
fyeee/reproducing-machine-learning-algorithms
|
/DecisionTree/DecisionTree.py
|
UTF-8
| 3,830
| 3.1875
| 3
|
[] |
no_license
|
import math
# for testing the algo
from sklearn import datasets
from sklearn.model_selection import train_test_split
class DecisionTreeClassifier:
def __init__(self, max_depth=None):
self.max_depth = max_depth
self.root = None
def fit(self, X, y, node={}, depth=0):
if node is None:
return None
elif len(y) == 0:
return None
elif all(x == y[0] for x in y):
return {"val": y[0]}
elif depth > self.max_depth:
return None
else:
node["entropy"] = entropy_multi(y)
col, cutoff = best_split(X, y)
node["feature"] = col
node["cutoff"] = cutoff
X_left, X_right = split_data_on_val(X, X, col, cutoff)
y_left, y_right = split_data_on_val(y, X, col, cutoff)
node["left"] = self.fit(X_left, y_left, {}, depth + 1)
node["right"] = self.fit(X_right, y_right, {}, depth + 1)
self.root = node
return node
def predict(self, X):
pred = []
for row in X:
node = self.root
while node:
if len(node.keys()) == 1:
pred.append(node["val"])
break
if row[node["feature"]] < node["cutoff"]:
node = node["left"]
else:
node = node["right"]
return pred
def split_data_on_val(data, ref_data, col, val):
result_left = []
result_right = []
for i, row in enumerate(ref_data):
if row[col] < val:
result_left.append(data[i])
elif row[col] >= val:
result_right.append(data[i])
return result_left, result_right
def entropy(count, n):
return -1 * math.log(count / n, 2) * (count / n)
def entropy_multi(array):
"""
H(Y)
"""
s = set(array)
n = len(array)
total_entropy = 0
for item in s:
count = 0
for element in array:
if element == item:
count += 1
total_entropy += entropy(count, n)
return total_entropy
def best_split(X, y):
max_info_gain = float("-inf")
best_split_feature = None
best_cutoff = None
for i in range(len(X[0])):
info_gain, cutoff = information_gain(X, y, i)
if info_gain > max_info_gain:
max_info_gain = info_gain
best_split_feature = i
best_cutoff = cutoff
return best_split_feature, best_cutoff
def information_gain(X, y, element_index):
"""
IG(Y|element)
"""
s = set([row[element_index] for row in X])
entropy_y = entropy_multi(y)
entropy_condition = float("inf")
best_split = None
for item in s:
array_left = []
array_right = []
for i in range(len(X)):
if X[i][element_index] < item:
array_left.append(y[i])
else:
array_right.append(y[i])
curr_entropy = len(array_left) / len(X) * entropy_multi(array_left) + len(array_right) / len(X) * \
entropy_multi(array_right)
if curr_entropy < entropy_condition:
entropy_condition = curr_entropy
best_split = item
return entropy_y - entropy_condition, best_split
def accuracy(prediction, actual):
count = 0
for i in range(len(prediction)):
if prediction[i] == actual[i]:
count += 1
return count / len(prediction)
if __name__ == "__main__":
data = datasets.load_iris()
X = data["data"].tolist()
y = data["target"].tolist()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
clf = DecisionTreeClassifier(max_depth=6)
print(clf.fit(X_train, y_train))
pred = clf.predict(X_test)
print(accuracy(pred, y_test))
| true
|
3036fd76a93061a8cbcadf7a6afa2f5d450725ea
|
Python
|
anshsaikia/GSSDeliverables-YesProject
|
/VE-Tests/tests_framework/ui_building_blocks/KSTB/fullcontent.py
|
UTF-8
| 22,773
| 2.546875
| 3
|
[] |
no_license
|
from tests_framework.ui_building_blocks.screen import Screen
import logging
import tests_framework.ui_building_blocks.KSTB.constants as CONSTANTS
from math import ceil
from time import sleep
class Fullcontent(Screen):
def __init__(self, test):
Screen.__init__(self, test, "full_content")
def navigate(self, filter_path = ('GENRES', 'ACTION')):
logging.info("Navigate to fullcontent")
elements = self.test.milestones.getElements()
screen = self.test.milestones.get_current_screen(elements)
if screen == "full_content":
return True
if screen == "action_menu":
'''
Dismiss the action menu by the Back key
:return:
'''
self.test.appium.key_event("KEYCODE_BACK")
status = self.test.wait_for_screen(CONSTANTS.WAIT_TIMEOUT, "full_content")
self.verify_active()
return status
if screen == "filter":
"""
In filter screen, go the the full content of a category (or leaf item)
:param filter_path: path to go through in the tree to reach the category. This is the ID of the items to go through. In dummy, id == title
:return: True when in category fullcontent
:return: Classification id from the ctap for the category (or leaf item)
"""
status, classification_id = self.screens.filter.focus_filter_category(filter_path=filter_path)
logging.info("Going into Fullcontent")
self.test.appium.key_event("KEYCODE_DPAD_LEFT")
self.test.wait(CONSTANTS.GENERIC_WAIT)
self.test.appium.key_event("KEYCODE_DPAD_CENTER")
self.test.wait(CONSTANTS.GENERIC_WAIT)
status = self.test.wait_for_screen(CONSTANTS.WAIT_TIMEOUT, "full_content")
if not status:
logging.error("wait for full_content timed out")
return False, classification_id
self.verify_active()
return True, classification_id
self.verify_active()
assert True, "Navigation not implementted in this screen : " + screen
def focus_sort_in_fullcontent(self):
"""
focus sort options in fullcontent
:return: True when sort is focused
"""
key_event = "KEYCODE_DPAD_UP"
isSortFocused = self.test.milestones.get_value_by_key(self.test.milestones.getElements(), "isSortFocused")
stop = False
count_actions = 0
while (not isSortFocused) and (not stop):
self.test.appium.key_event(key_event)
sleep(0.5)
count_actions = count_actions + 1
isSortFocused = self.test.milestones.get_value_by_key(self.test.milestones.getElements(), "isSortFocused")
if count_actions == CONSTANTS.MAX_ACTIONS:
if key_event != "KEYCODE_DPAD_DOWN" :
key_event = "KEYCODE_DPAD_DOWN"
count_actions = 0
else:
stop = True
if stop:
return False
return True
def fullcontent_check_sorting_categories_ux(self):
"""
Check sorting items order
:return: True/False
"""
if not self.focus_sorting_item_in_fullcontent():
return False
milestone = self.test.milestones.getElements()
sortingList = self.fullcontent_get_sorting_list(milestone)
for sorting in sortingList:
if not sorting == self.test.milestones.get_value_by_key(milestone, "selected_sort"):
logging.error("" + self.test.milestones.get_value_by_key(milestone, "selected_sort") + " is focused instead of " + sorting)
return False
else :
logging.info(self.test.milestones.get_value_by_key(milestone, "selected_sort") + "is focused")
self.test.appium.key_event("KEYCODE_DPAD_RIGHT")
return True
def fullcontent_select_alphabetical_order(self):
return self.fullcontent_select_item("ALPHABETICAL")
def fullcontent_select_item(self, item):
"""
Select an item from the sorting menu
:param item: item to be selected
:return: True/False
"""
status = self.fullcontent_focus_item(item)
if not status:
return False
# logging.info(self.test.milestones.getElements())
self.test.appium.key_event("KEYCODE_DPAD_CENTER")
sleep(0.5)
# logging.info(self.test.milestones.getElements())
return True
def focus_sorting_item_in_fullcontent(self):
"""
Focus sorting menu in fullcontent
:return: True/False
"""
milestone = self.test.milestones.getElements()
if not isinstance(self.fullcontent_get_asset_list(milestone), list):
return False
if self.test.milestones.get_value_by_key(milestone, "selected_type") == "2_by_3":
max_number_of_lines = ceil(float(len(self.fullcontent_get_asset_list(milestone)))/6)
else :
max_number_of_lines = ceil(float(len(self.fullcontent_get_asset_list(milestone)))/3)
index = 0
while self.test.milestones.get_value_by_key(milestone, "selected_type") != "category" and index < max_number_of_lines:
self.test.appium.key_event("KEYCODE_DPAD_UP")
index += 1
sleep(0.5)
milestone = self.test.milestones.getElements()
if self.test.milestones.get_value_by_key(milestone, "selected_type") != "category":
logging.error("Could not focus sorting list: %s " % self.test.milestones.get_value_by_key(milestone, "selected_type") )
return False
logging.info("Focus is now on sorting items")
return True
def fullcontent_focus_item(self, item):
"""
Focus an item from the sorting menu
:param item: item to be focused
:return: True/False
"""
# Set the focus in the Category List part
milestone = self.test.milestones.getElements()
if self.test.milestones.get_value_by_key(milestone, "selected_type") != "category":
logging.info("Move to Sorting Category")
if not self.focus_sorting_item_in_fullcontent():
return False
# Select the wanted Category
milestone = self.test.milestones.getElements()
current_sort = self.test.milestones.get_value_by_key(milestone, "current_category")
sorting_list = self.fullcontent_get_sorting_list(milestone)
current_sort_location = sorting_list.index(current_sort)
go_to_sort_location = sorting_list.index(item)
logging.info("Going to sort: " + item)
if current_sort_location > go_to_sort_location:
for i in range(go_to_sort_location, current_sort_location):
self.test.appium.key_event("KEYCODE_DPAD_LEFT")
sleep(0.5)
else:
for i in range(current_sort_location, go_to_sort_location):
self.test.appium.key_event("KEYCODE_DPAD_RIGHT")
sleep(0.5)
milestone = self.test.milestones.getElements()
if self.test.milestones.get_value_by_key(milestone, "selected_item") == item:
return True
logging.error("Could not focus item " + item + " in fullcontent")
return False
def fullcontent_check_sort_source(self, assets_arr, source_sort_order):
"""
Checking the sort order according to assets type
:param assets_arr: the full list of results assets
:param source_sort_order: the expected order of assets. options: 'linear', 'pvr', 'vod'
:returns: True if order as expected
"""
logging.info("Expected sort order: %s" % source_sort_order)
for asset in assets_arr:
logging.info("--> asset: %s " % asset)
linear_assets = [asset for asset in assets_arr if asset['source'] == 'broadcastTv']
pvr_assets = [asset for asset in assets_arr if asset['source'] == 'recording']
vod_assets = [asset for asset in assets_arr if
asset['source'] == 'vodUnEntitled' or asset['source'] == 'vodEntitled']
linear_num = len(linear_assets)
pvr_num = len(pvr_assets)
vod_num = len(vod_assets)
logging.info("Num of linear assets:" + str(linear_num))
logging.info("Num of pvr assets:" + str(pvr_num))
logging.info("Num of vod assets:" + str(vod_num))
sorting_order_dict = {}
internal_dict = {}
i = 0
for sort in source_sort_order:
if sort == "linear":
internal_dict["name"] = "LINEAR"
internal_dict["num"] = linear_num
internal_dict["assets"] = linear_assets
if sort == "pvr":
internal_dict["name"] = "PVR"
internal_dict["num"] = pvr_num
internal_dict["assets"] = pvr_assets
if sort == "vod":
internal_dict["name"] = "VOD"
internal_dict["num"] = vod_num
internal_dict["assets"] = vod_assets
sorting_order_dict[i] = internal_dict
i = i + 1
internal_dict = {}
sort_num1 = sorting_order_dict.get(0).get('num')
sort_num2 = sorting_order_dict.get(1).get('num')
sort_num3 = sorting_order_dict.get(2).get('num')
if assets_arr[0:sort_num1] != sorting_order_dict.get(0).get('assets'):
logging.info(
"Sort by Source failed: " + sorting_order_dict.get(0).get('name') + " assets not in order expectation!")
return False
if assets_arr[sort_num1:sort_num1 + sort_num2] != sorting_order_dict.get(1).get('assets'):
logging.info(
"Sort by Source failed: " + sorting_order_dict.get(1).get('name') + " assets not in order expectation!")
return False
if assets_arr[sort_num1 + sort_num2:sort_num1 + sort_num2 + sort_num3] != sorting_order_dict.get(2).get(
'assets'):
logging.info(
"Sort by Source failed: " + sorting_order_dict.get(2).get('name') + " assets not in order expectation!")
return False
return True
def fullcontent_get_asset_list(self, milestone=None):
"""
get list of asset in full content
:param milestone: milestone used. If None, a resquest to the app will be made to get one
:return: True/False
"""
if milestone == None :
return self.test.milestones.get_value_by_key(self.test.milestones.getElements(), "asset_list")
return self.test.milestones.get_value_by_key(milestone, "asset_list")
def fullcontent_get_sorting_list(self, milestone=None):
"""
get list of sorting option in fullcontent
:param milestone: milestone used. If None, a resquest to the app will be made to get one
:return: True/False
"""
if milestone == None:
return self.test.milestones.get_value_by_key(self.test.milestones.getElements(), "sorting_category_list")
return self.test.milestones.get_value_by_key(milestone, "sorting_category_list")
def fullcontent_check_assets_are_focused(self, milestone=None):
"""
Check that the asset part in fullcontent is focused
:param milestone: milestone used. If None, a resquest to the app will be made to get one
:return: True/False
"""
if milestone == None:
milestone = self.test.milestones.getElements()
selected_type = self.test.milestones.get_value_by_key(milestone, "selected_type")
# logging.info("selected_type: %s" %selected_type)
if selected_type == "category":
logging.error("Focus is not (or not only) on assets")
return False
else:
logging.info("Focus is on Assets part")
return True
def fullcontent_check_focused_asset(self, asset, milestone=None):
"""
Check that [asset] in fullcontent is focused
:param asset: asset to be focused
:param milestone: milestone used. If None, a resquest to the app will be made to get one
:return: True/False
"""
if milestone == None:
milestone = self.test.milestones.getElements()
if not self.fullcontent_check_assets_are_focused(milestone):
return False
focused_asset = self.test.milestones.get_value_by_key(milestone, "selected_item")
# logging.info("focused_asset: %s asset['title']: %s" % (focused_asset, asset['title']))
if not focused_asset == asset['title']:
logging.error("focus is on asset " + focused_asset + " instead of " + asset['title'])
return False
return True
def fullcontent_check_asset_n_is_focused(self, n, milestone=None):
"""
Check that focused asset is the nth one of the list
:param n: position of the focused asset
:param milestone: milestone used. If None, a resquest to the app will be made to get one
:return: True/False
"""
if milestone == None:
milestone = self.test.milestones.getElements()
expected_asset_list = self.test.milestones.get_value_by_key(milestone, "asset_list")
if expected_asset_list == False:
logging.error("No asset list")
return False
self.test.log_assert(expected_asset_list.__len__() > n , "There is not enough asset in the asset list :" + str(expected_asset_list))
expected_asset = expected_asset_list[n]
if expected_asset == None :
logging.error("There is no asset number " + n + " defined in the model: %s" %expected_asset)
return self.fullcontent_check_focused_asset(expected_asset, milestone)
def fullcontent_check_assets_displayed_respect_model(self, fullcontent_type):
"""
Check that all assets displayed are from the same type
and that the number of asset per line is respected
:param fullcontent_type: Type of the fullcontent : '2_by_3', '16_by_9', 'mixed'
:return: True/False
"""
if fullcontent_type not in ('2_by_3', '16_by_9', 'mixed') :
logging.error("Invalid argument. Fullcontent type must be either 2_by_3, 16_by_9 or mixed")
return False
self.test.wait(2)
milestone = self.test.milestones.getElements()
asset_list = self.fullcontent_get_asset_list(milestone)
selected_asset = self.test.milestones.get_value_by_key(milestone,"selected_item")
selected_type = self.test.milestones.get_value_by_key(milestone,"selected_type")
if not self.fullcontent_check_asset_n_is_focused(0, milestone):
logging.error("Focus is not on the first asset")
return False
# Navigate through all the asset in order to check their type
assets_by_line = 0 # nb of asset or sum of the weight of each asset
tab_assets_byline=[]
logging.info("going to verify %d assets"%(len(asset_list)))
wait_time = 4 # time to wait before going to the next asset. for the 1st line,
MAX_LINES = 5 # max lines to verify. must be limited, if too much assets test can takes hours
for i in range(0, len(asset_list)):
# verify the current asset is ok in the list
current_asset = None
if asset_list[i]['title'] == selected_asset: # must be in the same order (list / screen )
current_asset = asset_list[i]
if fullcontent_type in ('2_by_3', '16_by_9'):
assets_by_line += 1 # num of assets by line
else: # in mixed mode it's the weight of assets
if selected_type == 'mixed_16_by_9' :
assets_by_line += 16 # weight of this type of asset
else:
assets_by_line += 6 # weight of mixed 2_3 type
#logging.info("%d {%d} '%s': %s"%(i,assets_by_line,selected_asset,selected_type))
else: # may be at the end of the line
logging.info("new line: %d '%s' != '%s'"%(i,asset_list[i]['title'] , selected_asset))
tab_assets_byline.append(assets_by_line) # collect all the lines sizes
if len(tab_assets_byline) == MAX_LINES: # not too much tests, limit is x lines
logging.info("max nb lines to test reached: end")
break
elif len(tab_assets_byline) == 1:
wait_time -= 2 # after the first line, it can be faster (why 1st line so slow ?)
self.test.appium.key_event("KEYCODE_DPAD_DOWN") # at the beginning of the below line
self.test.wait(2)
milestone = self.test.milestones.getElements()
selected_asset = self.test.milestones.get_value_by_key(milestone,"selected_item")
if asset_list[i]['title'] == selected_asset: # must be in the same order
current_asset = asset_list[i]
assets_by_line = 1 # first of the line
logging.info("%d {%d} '%s': %s"%(i,assets_by_line,selected_asset,current_asset['type']))
# Check that the asset type is the expected one
if current_asset != None:
if current_asset['type'] != fullcontent_type: # the type in the list
logging.error("Asset type in list is not correct: %s" %(current_asset['type']))
return False
if selected_type != fullcontent_type: # the type of the asset on the screen
logging.error("Asset type focused is not correct: %s" %(selected_type))
return False
else:
logging.error("Asset '%s' is not in the asset list" % selected_asset)
return False
self.test.appium.key_event("KEYCODE_DPAD_RIGHT") # next asset is on the right
self.test.wait(wait_time)
milestone = self.test.milestones.getElements()
selected_asset = self.test.milestones.get_value_by_key(milestone,"selected_item")
selected_type = self.test.milestones.get_value_by_key(milestone,"selected_type")
# must verify that the number/weight of asset by line is good
if fullcontent_type in ('2_by_3', '16_by_9'):
ref = -1
for nb in tab_assets_byline:
logging.info("verifying nb assets/ln: %d"%(nb))
if ref == -1:
ref = nb # the nb of assets on the first line is the ref
elif (nb != ref):
logging.error("the assets strike the line %d / %d" %(nb,ref))
else:
for nb in tab_assets_byline:
logging.info("verifying weight of assets: %d"%(nb))
if nb > CONSTANTS.FULLCONTENT_MIXED_ASSETS_LINE_WIDTH: # reached the max weight per line
logging.error("the assets overload the line %d / %d" %(nb,CONSTANTS.FULLCONTENT_MIXED_ASSETS_LINE_WIDTH))
return True
def is_in_full_content(self):
"""
Test wheter we are in fullcontent
:return: True/False
"""
sleep(1)
milestone = self.test.milestones.getElements()
return self.test.milestones.get_value_by_key(milestone, "screen") == "full_content"
def fullcontent_assets_are_in_alphabetical_order(self):
"""
Test wheter the assets are in alphabetical order
:return: True/False
"""
asset_list = self.fullcontent_get_asset_list()
asset_list_title = []
for asset in asset_list:
asset_list_title.append(asset['title'])
asset_list_title_sort_alpha = sorted(asset_list_title, key=lambda s: s.lower())
if asset_list_title_sort_alpha != asset_list_title:
logging.error("Assets in datamodel are not in alphabetical order:\n python sort: %s"
% asset_list_title_sort_alpha + "\n asset_list: %s " % asset_list_title)
return False
return True
def fullcontent_check_poster_format_by_source(self, assets_arr):
"""
Checking the poster format associated to each source
:param assets_arr: the full list of results assets
:returns: True if format as expected
"""
logging.info("fullcontent_check_poster_format_by_source")
for asset in assets_arr:
logging.info("--> asset: %s " % asset)
linear_assets = [asset for asset in assets_arr if asset['source'] == 'broadcastTv']
pvr_assets = [asset for asset in assets_arr if asset['source'] == 'recording']
vod_assets = [asset for asset in assets_arr if
asset['source'] == 'vodUnEntitled' or asset['source'] == 'vodEntitled']
linear_num = len(linear_assets)
pvr_num = len(pvr_assets)
vod_num = len(vod_assets)
logging.info("Num of linear assets:" + str(linear_num))
logging.info("Num of pvr assets:" + str(pvr_num))
logging.info("Num of vod assets:" + str(vod_num))
for asset in linear_assets:
if asset['type'] == "16_by_9" or asset['type'] == "mixed_16_by_9":
logging.info("Success on Linear poster format for asset: %s" % asset)
else:
logging.error("Failure on Linear poster format displayed: %s" % asset['type'])
return False
for asset in pvr_assets:
if asset['type'] == "16_by_9" or asset['type'] == "mixed_16_by_9":
logging.info("Success on Linear poster format for asset: %s" % asset)
else:
logging.error("Failure on PVR poster format displayed: %s" % asset['type'])
return False
for asset in vod_assets:
if asset['type'] == "2_by_3" or asset['type'] == "mixed_2_by_3":
logging.info("Success on Linear poster format for asset: %s" % asset)
else:
logging.error("Failure of VOD poster format displayed: %s" % asset['type'])
return False
return True
| true
|
aeb8cf183547020684e968d00a677f5cda3d1c8a
|
Python
|
PeterZhouSZ/feasible-form-parameter-design
|
/relational_lsplines/test_idea.py
|
UTF-8
| 4,057
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 3 13:41:20 2017
@author: luke
Testing examples from
Interval Constraint Logic Programming
by Frederic Benhamou
"""
#import numpy as np
from extended_interval_arithmetic import ia
#from hull_inference_ob_graph import Hull as hullclp
#import uKanren as lp #original
#import eKanren as lp#nicer! NOT backwards compatible
import sqKanren as lp
if __name__ == "__main__":
x = lp.Variable('x')
y = lp.Variable('y')
z = lp.Variable('z')
a = lp.Variable('a')
b = lp.Variable('b')
c = lp.Variable('c')
d = lp.Variable('d')
s = lp.State(values={x:None,y:None,z:None,
a:None,b:None,c:None,d:None})
st = lp.States(s)
#st1 = (st == (y,ia(1.,3.)))
#st1 = (st == (ia(-100.,100.),x))
#st1 = (st1 * (x,ia(2.,2.),y))
#st1 = (st1 ** (x,2.,y))
#st1 = (st1 ** (y,.5,x))
#st1 = (st1 ** (y,-1.,x))
# finding exponenets must have positive y
#st1 = (st == (y,ia(1.,3.)))
#st1 = (st1 ** (ia(1.73205080757,1.73205080757) , x, y ) )
st1 = (st == (y,ia(-1.,3.)))
#st1 = (st1 ** (y,-2.,x))
st1 = (st1 ** (y,-1.,x))
print 'Query: y in {},y = x**2'.format(st1(y))
print 'answer:'
print 'y = {}'.format(st1(y))
print 'x = {}'.format(st1(x))
print 'NOTE: this does not support either '
print 'integer reasoning of CLP(BNR) Prolog'
print 'nor Boolean reasoning of CLP(BNR)'
print 'at the moment'
#E=0.
#st2 = (st + (x,1.,a))
#st2 = (st2 ** (x,19.,b))
#st2 = (st2 * (E,b,c))
#st2 = (st2 + (a,c,d))
x = lp.Variable('x')
y = lp.Variable('y')
c = lp.Variable('c')
d = lp.Variable('d')
s = lp.State(values={x:None,y:None,c:None,d:None})
st6 = lp.States(s)
#st7 = st6.eq((d,c))
#st7 = st7.eq( (ia(.9,1.5), x) )
#st7 = st7.eq( (ia(.5,1.2), y) )
st7 = (st6 == (d,c))
#st7 = (st6 == (ia(.9,1.5), x) )
st7 = (st7 == (ia(.9,1.5), x) )
st7 = (st7 == (ia(.5,1.2), y) )
st7 = (st7 == (ia(-1.,1.9),c) )
print '\nst7\n',st7
st8 = (st7 / (x,y,c) )
print 'st9 is a list of states resulting from interval split'
st9 = (st7 / (y,c,d) )
##
##
##
lp.reify(x,{x:'Hello Reify'})
print 'reification allows us to compute things and return values'
st10 = (st6 * ( lp.reify(x,{x:ia(1.,2.)}), y, ia(5.,10) ) )
dummy = lp.Variable()
#lp.reify(dummy,{dummy:st10(x)/st1})
st10 = (st10 * (x,y,dummy) )
print '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'
"""
###
#############
###
Extended Division
###
#############
###
"""
X = ia(2.,4.)
Y = ia(-2.,2.)
Z = ia(-10.,10)
def printst():
print 'x = ', state1(x)
print 'y = ', state1(y)
print 'z = ', state1(z)
return
x = lp.Variable('x')
y = lp.Variable('y')
c = lp.Variable('z')
s = lp.State(values={x:None,y:None,z:None})
state1 = lp.States(s)
print '\n\n----------------------------------------------------'
print '----------------------------------------------------'
print '\n'
print 'Start the example for the talk'
print '\n'
#st7 = (st6 == (ia(.9,1.5), x) )
state1 = (state1 == (ia(2.,4.), x) )
state1 = (state1 == (ia(-2.,2.), y) )
state1 = (state1 == (ia(-10.,10),z) )
print '\n\n----------------------------------------------------'
print '----------------------------------------------------'
print 'initial x y z values:\n'
printst()
print '\n y contains 0!'
print ''
#print '\nstate1\n',state1
print 'lets compute '
print '(state1 / (x,y,z))'
print '\n In words, x over y is z.'
print ''
state1 = (state1 / (x,y,z) )
print '----------------------------------------------------'
print '----------------------------------------------------'
| true
|
183278c0f49c3a3bf5446875008eb1e4359343dc
|
Python
|
ZoneTsuyoshi/Data_Assimilation
|
/phase2/source/kalman.py
|
UTF-8
| 39,204
| 2.609375
| 3
|
[] |
no_license
|
'''
Kalman Filter ใฎใฏใฉใน
pykalman ใๅ่ใซใใฆไฝๆใใ
pykalman ใงใฏๆฌ ๆธฌๅคใซๅฏพๅฟใงใใฆใใชใใฎใง๏ผๆฌ ๆธฌใซใๅฏพๅฟใงใใใฏใฉในไฝๆใใกใคใณใใผใ
-> ใจๆใฃใฆใใใใฉ๏ผใในใฏๅฆ็ใงๅฏพๅฟใใฆใใ
-> ๆฌ ๆธฌๅค(NaN)ใฎๅ ดๅใฏ่ชๅใงใในใฏๅฆ็ใใใใใชใณใผใใ่ฟฝๅ ใใใฐๆกๅผตใฎๆ็พฉใใใ
18.03.13
่กๅใฎ็นๅฎใฎ่ฆ็ด ใๆ้ฉๅใงใใ EM Algorithm ใซๆนๅค
18.03.17
ใกใขใชๅน็ๅใ่กใ
- ไธ่ฆใชใกใขใชใไฟๅญใใชใใใใซใใ
- pykalman ใใซใซใใณใฒใคใณ็ญใงใกใขใชใๅฐใใใฆใใใใ๏ผใใใใ็ฏ็ดใใ
- ใใใฉใซใใง np.float32 ใไฝฟ็จใใฆใกใขใช็ฏ็ด
- ้ใใชใฃใใใฉใใใฏๆธฌๅฎใใฆใใชใ
- ่พๆธใฏใใใทใฅใใผใใซ็จๆใใใใใกใขใชๅฐใ
- ่พๆธใจใชในใใใชใในใๆธใใใใใใฉใฉใใใในใใ
18.05.12
EMใขใซใดใชใบใ ๆงๅผใๅคใใ
- em_dics ใง่ฆ็ด ใๆๅฎใใฆใใใ๏ผๅ
ฑๅๆฃๆง้ ใซๅฟใใฆๅฎ่กใใใใใซๅคๆด
- all : ๅ
จ่ฆ็ด ๆ้ฉๅ
- triD1 : ๅฏพ่ง่ฆ็ด ฯ, ไธ้ๅฏพ่ง่ฆ็ด ฯไปฅๅคใฏๅ
จใฆ0ใฎๆง้
- triD2 : 2ๆฌกๅ
ใฎๆ ผๅญ้็จใ่ใใใจใใซ้ฃๆฅ่ฆ็ด ใจใฎๅ
ฑๅๆฃฯ๏ผ้้ฃๆฅ่ฆ็ด ใจใฎๅ
ฑๅๆฃ0
ใจใชใๆง้ ๏ผ็ฉบ้ใฎ็ธฆๆจชในใฑใผใซ vertical_length x horizontal_length ใ
ๅ
ฅๅใใๅฟ
่ฆใใใ๏ผtransition_vh_length, observation_vh_length ใงๆๅฎ๏ผ
vertical x vertical ใใญใใฏใ horizontal x horizontal ๅใใๆใ
'''
# ใใใฑใผใธใฎใคใณในใใผใซ
import math
import numpy as np
# pykalman
import warnings
from scipy import linalg
from utils import array1d, array2d, check_random_state, \
get_params, log_multivariate_normal_density, preprocess_arguments
from utils_filter import _parse_observations, _last_dims, \
_determine_dimensionality
# Dimensionality of each Kalman Filter parameter for a single time step
# EM algorithm ใงไฝฟ็จใใๅฏ่ฝๆงใฎใใใใฉใกใผใฟ็พคใฏ DIM ใงๆๅฎใใฆใใ
DIM = {
'transition_matrices': 2,
'transition_offsets': 1,
'observation_matrices': 2,
'observation_offsets': 1,
'transition_covariance': 2,
'observation_covariance': 2,
'initial_mean': 1,
'initial_covariance': 2,
}
class Kalman_Filter(object) :
'''
ใณใผใไธใงใฏ๏ผpred, filt ใ0:Tใงใใ๏ผtใฏtใซๅฏพๅฟใใฆใใ
ไธๆน๏ผsmooth ใฏ 0:T-1ใงใใ๏ผtใฏt-1ใซๅฏพๅฟใใฆใใ
<Input Variables>
observation [n_time, n_dim_obs] {numpy-array, float}
: observation y
่ฆณๆธฌๅค[ๆ้่ปธ,่ฆณๆธฌๅคๆฐ่ปธ]
initial_mean [n_dim_sys] {float}
: initial state mean
ๅๆ็ถๆ
ๅๅธใฎๆๅพ
ๅค[็ถๆ
ๅคๆฐ่ปธ]
initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float}
: initial state covariance ๏ผๅๆ็ถๆ
ๅๅธใฎๅ
ฑๅๆฃ่กๅ[็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]๏ผ
transition_matrices [n_time - 1, n_dim_sys, n_dim_sys]
or [n_dim_sys, n_dim_sys]{numpy-array, float}
: transition matrix from x_{t-1} to x_t
ใทในใใ ใขใใซใฎๅคๆ่กๅ[ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
or [็ถๆ
ๅคๆฐ่ปธ,็ถๆ
ๅคๆฐ่ปธ] (ๆไธๅคใชๅ ดๅ)
transition_noise_matrices [n_time - 1, n_dim_sys, n_dim_noise]
or [n_dim_sys, n_dim_noise] {numpy-array, float}
: transition noise matrix
ใใคใบๅคๆ่กๅ[ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผใใคใบๅคๆฐ่ปธ] or [็ถๆ
ๅคๆฐ่ปธ๏ผใใคใบๅคๆฐ่ปธ]
observation_matrices [n_time, n_dim_sys, n_dim_obs] or [n_dim_sys, n_dim_obs]
{numpy-array, float}
: observation matrix
่ฆณๆธฌ่กๅ[ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ่ฆณๆธฌๅคๆฐ่ปธ] or [็ถๆ
ๅคๆฐ่ปธ๏ผ่ฆณๆธฌๅคๆฐ่ปธ]
transition_covariance [n_time - 1, n_dim_noise, n_dim_noise]
or [n_dim_sys, n_dim_noise]
{numpy-array, float}
: covariance of system noise
ใทในใใ ใใคใบใฎๅ
ฑๅๆฃ่กๅ[ๆ้่ปธ๏ผใใคใบๅคๆฐ่ปธ๏ผใใคใบๅคๆฐ่ปธ]
observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float}
: covariance of observation noise
่ฆณๆธฌใใคใบใฎๅ
ฑๅๆฃ่กๅ[ๆ้่ปธ๏ผ่ฆณๆธฌๅคๆฐ่ปธ๏ผ่ฆณๆธฌๅคๆฐ่ปธ]
transition_offsets [n_time - 1, n_dim_sys] or [n_dim_sys] {numpy-array, float}
: offsets of system transition model
ใทในใใ ใขใใซใฎๅ็๏ผใใคใขใน๏ผใชใใปใใ๏ผ[ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ] or [็ถๆ
ๅคๆฐ่ปธ]
observation_offsets [n_time, n_dim_obs] or [n_dim_obs] {numpy-array, float}
: offsets of observation model
่ฆณๆธฌใขใใซใฎๅ็[ๆ้่ปธ๏ผ่ฆณๆธฌๅคๆฐ่ปธ] or [่ฆณๆธฌๅคๆฐ่ปธ]
transition_observation_covariance [n_time, n_dim_obs, n_dim_sys]
or [n_dim_obs, n_dim_sys], {numpy-array, float}
: covariance between transition noise and observation noise
็ถๆ
ใใคใบใจ่ฆณๆธฌใใคใบ้ใฎๅ
ฑๅๆฃ [ๆ้่ปธ๏ผ่ฆณๆธฌๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
or [่ฆณๆธฌๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
em_vars {list, string} : variable name list for EM algorithm
(EMใขใซใดใชใบใ ใงๆ้ฉๅใใๅคๆฐใชในใ)
transition_covariance_structure, transition_cs {str} :
covariance structure for transition
็ถๆ
้ท็งปๅๅธใฎๅ
ฑๅๆฃๆง้
observation_covariance_structure, observation_cs {str} :
covariance structure for observation
่ฆณๆธฌๅๅธใฎๅ
ฑๅๆฃๆง้
transition_vh_length, transition_v {list or numpy-array, int} :
if think 2d space, this shows vertical dimension and horizontal length
for transition space
2ๆฌกๅ
็ฉบ้ใฎ้ท็งปใ่ใใฆใใๅ ดๅ๏ผ็ถๆ
ๅคๆฐใฎๅ็ฉบ้ใฎ้ทใ
observation_vh_length, observation_v {list or numpy-array, int} :
if think 2d space, this shows vertical dimension and horizontal length
for observation space
2ๆฌกๅ
็ฉบ้ใฎ้ท็งปใ่ใใฆใใๅ ดๅ๏ผ่ฆณๆธฌๅคๆฐใฎๅ็ฉบ้ใฎ้ทใ
n_dim_sys {int} : dimension of system variable ๏ผใทในใใ ๅคๆฐใฎๆฌกๅ
๏ผ
n_dim_obs {int} : dimension of observation variable ๏ผ่ฆณๆธฌๅคๆฐใฎๆฌกๅ
๏ผ
dtype {type} : numpy-array type (numpy ใฎใใผใฟๅฝขๅผ)
<Variables>
y : observation
F : transition_matrices
Q : transition_covariance, transition_noise_matrices
b : transition_offsets
H : observation_matrices
R : observation_covariance
d : observation_offsets
S : transition_observation_covariance
x_pred [n_time+1, n_dim_sys] {numpy-array, float}
: mean of prediction distribution
ไบๆธฌๅๅธใฎๅนณๅ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
V_pred [n_time+1, n_dim_sys, n_dim_sys] {numpy-array, float}
: covariance of prediction distribution
ไบๆธฌๅๅธใฎๅ
ฑๅๆฃ่กๅ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
x_filt [n_time+1, n_dim_sys] {numpy-array, float}
: mean of filtering distribution
ใใฃใซใฟๅๅธใฎๅนณๅ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
V_filt [n_time+1, n_dim_sys, n_dim_sys] {numpy-array, float}
: covariance of filtering distribution
ใใฃใซใฟๅๅธใฎๅ
ฑๅๆฃ่กๅ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
x_smooth [n_time, n_dim_sys] {numpy-array, float}
: mean of RTS smoothing distribution
ๅบๅฎๅบ้ๅนณๆปๅๅๅธใฎๅนณๅ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float}
: covariance of RTS smoothing distribution
ๅบๅฎๅบ้ๅนณๆปๅใฎๅ
ฑๅๆฃ่กๅ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
filter_update {function}
: update function from x_t to x_{t+1}
ใใฃใซใฟใผๆดๆฐ้ขๆฐ
state space model(็ถๆ
ๆน็จๅผ)
x[t+1] = F[t]x[t] + G[t]v[t]
y[t+1] = H[t]x[t] + w[t]
v[t] ~ N(0, Q[t])
w[t] ~ N(0, R[t])
'''
def __init__(self, observation = None,
initial_mean = None, initial_covariance = None,
transition_matrices = None, observation_matrices = None,
transition_covariance = None, observation_covariance = None,
transition_noise_matrices = None,
transition_offsets = None, observation_offsets = None,
transition_observation_covariance = None,
em_vars = ['transition_covariance', 'observation_covariance',
'initial_mean', 'initial_covariance'],
transition_covariance_structure = 'all',
observation_covariance_structure = 'all',
transition_vh_length = None,
observation_vh_length = None,
n_dim_sys = None, n_dim_obs = None, dtype = np.float32) :
# ๆฌกๅ
ๆฑบๅฎ
# determine dimensionality
self.n_dim_sys = _determine_dimensionality(
[(transition_matrices, array2d, -2),
(transition_offsets, array1d, -1),
(transition_noise_matrices, array2d, -2),
(initial_mean, array1d, -1),
(initial_covariance, array2d, -2),
(observation_matrices, array2d, -1),
(transition_observation_covariance, array2d, -2)],
n_dim_sys
)
self.n_dim_obs = _determine_dimensionality(
[(observation_matrices, array2d, -2),
(observation_offsets, array1d, -1),
(observation_covariance, array2d, -2),
(transition_observation_covariance, array2d, -1)],
n_dim_obs
)
# transition_noise_matrices ใ่จญๅฎใใฆใใชใๅ ดๅใฏ๏ผsystem ใจๆฌกๅ
ใไธ่ดใใใ
if transition_noise_matrices is None :
self.n_dim_noise = _determine_dimensionality(
[(transition_covariance, array2d, -2)],
self.n_dim_sys
)
transition_noise_matrices = np.eye(self.n_dim_noise, dtype = dtype)
else :
self.n_dim_noise = _determine_dimensionality(
[(transition_noise_matrices, array2d, -1),
(transition_covariance, array2d, -2)]
)
# ๆฌกๅ
ๆฐใใใงใใฏ๏ผๆฌ ๆธฌๅคใฎใในใฏๅฆ็
self.y = _parse_observations(observation)
# initial_mean ใๆชๅ
ฅๅใชใใฐ้ถใใฏใใซ
if initial_mean is None:
self.initial_mean = np.zeros(self.n_dim_sys, dtype = dtype)
else:
self.initial_mean = initial_mean.astype(dtype)
# initial_covariance ใๆชๅ
ฅๅใชใใฐๅไฝ่กๅ
if initial_covariance is None:
self.initial_covariance = np.eye(self.n_dim_sys, dtype = dtype)
else:
self.initial_covariance = initial_covariance.astype(dtype)
# transition_matrices ใๆชๅ
ฅๅใชใใฐๅไฝ่กๅ
if transition_matrices is None:
self.F = np.eye(self.n_dim_sys, dtype = dtype)
else:
self.F = transition_matrices.astype(dtype)
# transition_covariance ใๆชๅ
ฅๅใชใใฐๅไฝ่กๅ
if transition_covariance is not None:
if transition_noise_matrices is not None:
self.Q = self._calc_transition_covariance(
transition_noise_matrices,
transition_covariance
).astype(dtype)
else:
self.Q = transition_covariance.astype(dtype)
else:
self.Q = np.eye(self.n_dim_sys, dtype = dtype)
# transition_offsets ใๆชๅ
ฅๅใงใใใฐ๏ผ้ถใใฏใใซ
if transition_offsets is None :
self.b = np.zeros(self.n_dim_sys, dtype = dtype)
else :
self.b = transition_offsets.astype(dtype)
# observation_matrices ใๆชๅ
ฅๅใงใใใฐ๏ผๅไฝ่กๅ
if observation_matrices is None:
self.H = np.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype)
else:
self.H = observation_matrices.astype(dtype)
# observation_covariance ใๆชๅ
ฅๅใงใใใฐ๏ผๅไฝ่กๅ
if observation_covariance is None:
self.R = np.eye(self.n_dim_obs, dtype = dtype)
else:
self.R = observation_covariance.astype(dtype)
# observation_offsets ใๆชๅ
ฅๅใงใใใฐ๏ผ้ถใใฏใใซ
if observation_offsets is None :
self.d = np.zeros(self.n_dim_obs, dtype = dtype)
else :
self.d = observation_offsets.astype(dtype)
# transition_observation_covariance ใๆชๅ
ฅๅใชใใฐ๏ผ้ถ่กๅ
if transition_observation_covariance is None:
self.predict_update = self._predict_update_no_noise
else:
self.S = transition_observation_covariance
self.predict_update = self._predict_update_noise
## EM algorithm ใงๆ้ฉๅใใใใฉใกใผใฟ็พค
self.em_vars = em_vars
if transition_covariance_structure == 'triD2':
if transition_vh_length is None:
raise ValueError('you should input transition_vh_length.')
elif transition_vh_length[0] * transition_vh_length[1] != self.n_dim_sys:
raise ValueError('you should confirm transition_vh_length.')
else:
self.transition_v = transition_vh_length[0]
self.transition_cs = transition_covariance_structure
elif transition_covariance_structure in ['all', 'triD1']:
self.transition_cs = transition_covariance_structure
else:
raise ValueError('you should confirm transition_covariance_structure.')
if observation_covariance_structure == 'triD2':
if observation_vh_length is None:
raise ValueError('you should input observation_vh_length.')
elif observation_vh_length[0]*observation_vh_length[1] != self.n_dim_obs:
raise ValueError('you should confirm observation_vh_length.')
else:
self.observation_v = observation_vh_length[0]
self.observation_cs = observation_covariance_structure
elif observation_covariance_structure in ['all', 'triD1']:
self.observation_cs = observation_covariance_structure
else:
raise ValueError('you should confirm observation_covariance_structure.')
# dtype
self.dtype = dtype
# filter function (ใใฃใซใฟๅคใ่จ็ฎใใ้ขๆฐ)
def filter(self) :
'''
T {int} : length of data y ๏ผๆ็ณปๅใฎ้ทใ๏ผ
x_pred [n_time, n_dim_sys] {numpy-array, float}
: mean of hidden state at time t given observations
from times [0...t-1]
ๆๅป t ใซใใใ็ถๆ
ๅคๆฐใฎไบๆธฌๆๅพ
ๅค [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
V_pred [n_time, n_dim_sys, n_dim_sys] {numpy-array, float}
: covariance of hidden state at time t given observations
from times [0...t-1]
ๆๅป t ใซใใใ็ถๆ
ๅคๆฐใฎไบๆธฌๅ
ฑๅๆฃ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
x_filt [n_time, n_dim_sys] {numpy-array, float}
: mean of hidden state at time t given observations from times [0...t]
ๆๅป t ใซใใใ็ถๆ
ๅคๆฐใฎใใฃใซใฟๆๅพ
ๅค [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
V_filt [n_time, n_dim_sys, n_dim_sys] {numpy-array, float}
: covariance of hidden state at time t given observations
from times [0...t]
ๆๅป t ใซใใใ็ถๆ
ๅคๆฐใฎใใฃใซใฟๅ
ฑๅๆฃ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
K [n_dim_sys, n_dim_obs] {numpy-array, float}
: Kalman gain matrix for time t [็ถๆ
ๅคๆฐ่ปธ๏ผ่ฆณๆธฌๅคๆฐ่ปธ]
ใซใซใใณใฒใคใณ
'''
T = self.y.shape[0]
self.x_pred = np.zeros((T, self.n_dim_sys), dtype = self.dtype)
self.V_pred = np.zeros((T, self.n_dim_sys, self.n_dim_sys),
dtype = self.dtype)
self.x_filt = np.zeros((T, self.n_dim_sys), dtype = self.dtype)
self.V_filt = np.zeros((T, self.n_dim_sys, self.n_dim_sys),
dtype = self.dtype)
K = np.zeros((self.n_dim_sys, self.n_dim_obs), dtype = self.dtype)
# ๅๆๅปใงไบๆธฌใปใใฃใซใฟ่จ็ฎ
for t in range(T) :
# ่จ็ฎใใฆใใๆ้ใๅฏ่ฆๅ
print("\r filter calculating... t={}".format(t) + "/" + str(T), end="")
if t == 0:
# initial setting (ๅๆๅๅธ)
self.x_pred[0] = self.initial_mean
self.V_pred[0] = self.initial_covariance
else:
self.predict_update(t)
# y[t] ใฎไฝใใใใในใฏๅฆ็ใใใฆใใใฐ๏ผใใฃใซใฟใชใณใฐใฏใซใใใใ
if np.any(np.ma.getmask(self.y[t])) :
self.x_filt[t] = self.x_pred[t]
self.V_filt[t] = self.V_pred[t]
else :
# extract t parameters (ๆๅปtใฎใใฉใกใผใฟใๅใๅบใ)
H = _last_dims(self.H, t, 2)
R = _last_dims(self.R, t, 2)
d = _last_dims(self.d, t, 1)
# filtering (ใใฃใซใฟๅๅธใฎ่จ็ฎ)
K = self.V_pred[t] @ (
H.T @ linalg.pinv(H @ (self.V_pred[t] @ H.T + R))
)
self.x_filt[t] = self.x_pred[t] + K @ (
self.y[t] - (H @ self.x_pred[t] + d)
)
self.V_filt[t] = self.V_pred[t] - K @ (H @ self.V_pred[t])
# get predicted value (ไธๆๅ
ไบๆธฌๅคใ่ฟใ้ขๆฐ, Filter ้ขๆฐๅพใซๅคใๅพใใๆ)
def get_predicted_value(self, dim = None) :
# filter ใใใฆใชใใใฐๅฎ่ก
try :
self.x_pred[0]
except :
self.filter()
if dim is None:
return self.x_pred
elif dim <= self.x_pred.shape[1]:
return self.x_pred[:, int(dim)]
else:
raise ValueError('The dim must be less than '
+ self.x_pred.shape[1] + '.')
# get filtered value (ใใฃใซใฟๅคใ่ฟใ้ขๆฐ๏ผFilter ้ขๆฐๅพใซๅคใๅพใใๆ)
def get_filtered_value(self, dim = None) :
# filter ใใใฆใชใใใฐๅฎ่ก
try :
self.x_filt[0]
except :
self.filter()
if dim is None:
return self.x_filt
elif dim <= self.x_filt.shape[1]:
return self.x_filt[:, int(dim)]
else:
raise ValueError('The dim must be less than '
+ self.x_filt.shape[1] + '.')
# RTS smooth function (RTSในใ ใผใทใณใฐใ่จ็ฎใใ้ขๆฐ)
def smooth(self) :
'''
T : length of data y (ๆ็ณปๅใฎ้ทใ)
x_smooth [n_time, n_dim_sys] {numpy-array, float}
: mean of hidden state distributions for times
[0...n_timesteps-1] given all observations
ๆๅป t ใซใใใ็ถๆ
ๅคๆฐใฎๅนณๆปๅๆๅพ
ๅค [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float}
: covariances of hidden state distributions for times
[0...n_timesteps-1] given all observations
ๆๅป t ใซใใใ็ถๆ
ๅคๆฐใฎๅนณๆปๅๅ
ฑๅๆฃ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
A [n_dim_sys, n_dim_sys] {numpy-array, float}
: fixed interval smoothed gain
ๅบๅฎๅบ้ๅนณๆปๅใฒใคใณ [ๆ้่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ๏ผ็ถๆ
ๅคๆฐ่ปธ]
'''
# filter ใๅฎ่กใใใฆใใชใๅ ดๅใฏๅฎ่ก
try :
self.x_pred[0]
except :
self.filter()
T = self.y.shape[0]
self.x_smooth = np.zeros((T, self.n_dim_sys), dtype = self.dtype)
self.V_smooth = np.zeros((T, self.n_dim_sys, self.n_dim_sys),
dtype = self.dtype)
A = np.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)
self.x_smooth[-1] = self.x_filt[-1]
self.V_smooth[-1] = self.V_filt[-1]
# t in [0, T-2] (tใ1~Tใฎ้้ ใงใใใใจใซๆณจๆ)
for t in reversed(range(T - 1)) :
# ๆ้ใๅฏ่ฆๅ
print("\r smooth calculating... t={}".format(T - t)
+ "/" + str(T), end="")
# ๆๅป t ใฎใใฉใกใผใฟใๅใๅบใ
F = _last_dims(self.F, t, 2)
# ๅบๅฎๅบ้ๅนณๆปใฒใคใณใฎ่จ็ฎ
A = np.dot(self.V_filt[t], np.dot(F.T, linalg.pinv(self.V_pred[t + 1])))
# ๅบๅฎๅบ้ๅนณๆปๅ
self.x_smooth[t] = self.x_filt[t] \
+ np.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1])
self.V_smooth[t] = self.V_filt[t] \
+ np.dot(A, np.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T))
# get RTS smoothed value (RTS ในใ ใผใทใณใฐๅคใ่ฟใ้ขๆฐ๏ผsmooth ๅพใซ)
def get_smoothed_value(self, dim = None) :
# filter ใใใฆใชใใใฐๅฎ่ก
try :
self.x_smooth[0]
except :
self.smooth()
if dim is None:
return self.x_smooth
elif dim <= self.x_smooth.shape[1]:
return self.x_smooth[:, int(dim)]
else:
raise ValueError('The dim must be less than '
+ self.x_smooth.shape[1] + '.')
# em algorithm
def em(self, n_iter = 10, em_vars = None):
"""Apply the EM algorithm
Apply the EM algorithm to estimate all parameters specified by `em_vars`.
em_vars ใซๅ
ฅใใใใฆใใใใฉใกใผใฟ้ๅใซใคใใฆ EM algorithm ใ็จใใฆๆ้ฉๅใใ๏ผ
ใใ ใ๏ผๅ้ท็งปใใฉใกใผใฟใฏๆไธๅคใงใใใจใใ๏ผ
Parameters
----------
n_iter : int, optional
number of EM iterations to perform
EM algorithm ใซใใใใคใใฌใผใทใงใณๅๆฐ
em_vars : iterable of strings or 'all'
variables to perform EM over. Any variable not appearing here is
left untouched.
EM algorithm ใงๆ้ฉๅใใใใฉใกใผใฟ็พค
"""
# Create dictionary of variables not to perform EM on
# em_vars ใๅ
ฅๅใใใชใใฃใใใฏใฉในไฝๆๆใซๅ
ฅๅใใ em_vars ใไฝฟ็จ
if em_vars is None:
em_vars = self.em_vars
# em_vars ใ setting
if em_vars == 'all':
# all ใ ใฃใใๆขๅญๅคใไฝใไธใใใใฆใใชใ
given = {}
else:
given = {
'transition_matrices': self.F,
'observation_matrices': self.H,
'transition_offsets': self.b,
'observation_offsets': self.d,
'transition_covariance': self.Q,
'observation_covariance': self.R,
'initial_mean': self.initial_mean,
'initial_covariance': self.initial_covariance
}
# em_vars ใซ่ฆ็ด ใใใๅ ดๅ๏ผgiven dictionary ใใๅ้ค
em_vars = set(em_vars)
for k in list(given.keys()):
if k in em_vars:
given.pop(k)
# If a parameter is time varying, print a warning
# DIM ใซๅซใพใใฆใใใใฉใกใผใฟใ ใ get_params ใงๅๅพใใฆ่ใใ
# get_params ใงๅๅพใใใจใ๏ผ__init__ ้ขๆฐใฎๅ
ฅๅๅคใ่ใใ
# given ใซๅซใพใใฆใใชใใใฉใกใผใฟใๆไธๅคใงใชใใใฐ่ญฆๅใๅบใ
'''
for (k, v) in get_params(self).items():
if k in DIM and (not k in given) and len(v.shape) != DIM[k]:
warn_str = (
'{0} has {1} dimensions now; after fitting, '
+ 'it will have dimension {2}'
).format(k, len(v.shape), DIM[k])
warnings.warn(warn_str)
'''
# Actual EM iterations
# EM algorithm ใฎ่จ็ฎ
for i in range(n_iter):
print("EM calculating... i={}".format(i+1) + "/" + str(n_iter), end="")
# E step
self.filter()
# sigma pair smooth
# ๆๅป t,t-1 ใฎใทในใใ ใฎๅ
ฑๅๆฃ้ท็งป
self._sigma_pair_smooth()
# M step
self._calc_em(given = given)
return self
# calculate transition covariance (Q_new = GQG^T ใฎ่จ็ฎใใใฆใใ)
def _calc_transition_covariance(self, G, Q) :
if G.ndim == 2:
GT = G.T
elif G.ndim == 3:
GT = G.transpose(0,2,1)
else:
raise ValueError('The ndim of transition_noise_matrices'
+ ' should be 2 or 3,' + ' but your input is ' + str(G.ndim) + '.')
if Q.ndim == 2 or Q.ndim == 3:
return np.matmul(G, np.matmul(Q, GT))
else:
raise ValueError('The ndim of transition_covariance should be 2 or 3,'
+ ' but your input is ' + str(Q.ndim) + '.')
# ใใคใบใชใใฎไบๅ ฑใขใใใใผใ้ขๆฐ
def _predict_update_no_noise(self, t):
# extract t-1 parameters (ๆๅปt-1ใฎใใฉใกใผใฟๅใๅบใ)
F = _last_dims(self.F, t - 1, 2)
Q = _last_dims(self.Q, t - 1, 2)
b = _last_dims(self.b, t - 1, 1)
# predict t distribution (ๆๅปtใฎไบๆธฌๅๅธใฎ่จ็ฎ)
self.x_pred[t] = F @ self.x_filt[t-1] + b
self.V_pred[t] = F @ self.V_filt[t-1] @ F.T + Q
# ใใคใบใใใฎไบๅ ฑใขใใใใผใ้ขๆฐ
def _predict_update_noise(self, t):
if np.any(np.ma.getmask(self.y[t-1])) :
self._predict_update_no_noise(t)
else:
# extract t-1 parameters (ๆๅปt-1ใฎใใฉใกใผใฟๅใๅบใ)
F = _last_dims(self.F, t - 1, 2)
Q = _last_dims(self.Q, t - 1, 2)
b = _last_dims(self.b, t - 1, 1)
H = _last_dims(self.H, t - 1, 2)
d = _last_dims(self.d, t - 1, 1)
S = _last_dims(self.S, t - 1, 2)
R = _last_dims(self.R, t - 1, 2)
# predict t distribution (ๆๅปtใฎไบๆธฌๅๅธใฎ่จ็ฎ)
SR = S @ linalg.pinv(R)
F_SRH = F - SR @ H
self.x_pred[t] = F_SRH @ self.x_filt[t-1] + b + SR @ (self.y[t-1] - d)
self.V_pred[t] = F_SRH @ self.V_filt[t-1] @ F_SRH.T + Q - SR @ S.T
# sigma pair smooth ่จ็ฎ
# EM ใฎใกใขใชใปใผใใฎใใใซๅนณๆปๅใไธญใซ็ตใฟ่พผใ
def _sigma_pair_smooth(self):
'''
T {int} : length of y (ๆ็ณปๅใฎ้ทใ)
V_pair [n_time, n_dim_sys, n_dim_sys] {numpy-array, float}
: Covariance between hidden states at times t and t-1
for t = [1...n_timesteps-1]. Time 0 is ignored.
ๆๅปt,t-1้ใฎ็ถๆ
ใฎๅ
ฑๅๆฃ๏ผ0ใฏ็ก่ฆใใ
'''
# ๆ็ณปๅใฎ้ทใ
T = self.y.shape[0]
self.x_smooth = np.zeros((T, self.n_dim_sys), dtype = self.dtype)
self.V_smooth = np.zeros((T, self.n_dim_sys, self.n_dim_sys),
dtype = self.dtype)
# pairwise covariance
self.V_pair = np.zeros((T, self.n_dim_sys, self.n_dim_sys),
dtype = self.dtype)
A = np.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)
self.x_smooth[-1] = self.x_filt[-1]
self.V_smooth[-1] = self.V_filt[-1]
# t in [0, T-2] (tใ1~Tใฎ้้ ใงใใใใจใซๆณจๆ)
for t in reversed(range(T - 1)) :
# ๆ้ใๅฏ่ฆๅ
print("\r expectation step calculating... t={}".format(T - t)
+ "/" + str(T), end="")
# ๆๅป t ใฎใใฉใกใผใฟใๅใๅบใ
F = _last_dims(self.F, t, 2)
# ๅบๅฎๅบ้ๅนณๆปใฒใคใณใฎ่จ็ฎ
A = np.dot(self.V_filt[t], np.dot(F.T, linalg.pinv(self.V_pred[t + 1])))
# ๅบๅฎๅบ้ๅนณๆปๅ
self.x_smooth[t] = self.x_filt[t] \
+ np.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1])
self.V_smooth[t] = self.V_filt[t] \
+ np.dot(A, np.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T))
# ๆ็น้ๅ
ฑๅๆฃ่กๅ
self.V_pair[t + 1] = np.dot(self.V_smooth[t], A.T)
# calculate parameters by EM algorithm
# EM algorithm ใ็จใใใใฉใกใผใฟ่จ็ฎ
def _calc_em(self, given = {}):
'''
T {int} : length of observation y
'''
# length of y
T = self.y.shape[0]
# observation_matrices ใๆๅใซๆดๆฐ
if 'observation_matrices' not in given:
'''math
y_t : observation, d_t : observation_offsets
x_t : system, H : observation_matrices
H &= ( \sum_{t=0}^{T-1} (y_t - d_t) \mathbb{E}[x_t]^T )
( \sum_{t=0}^{T-1} \mathbb{E}[x_t x_t^T] )^-1
'''
res1 = np.zeros((self.n_dim_obs, self.n_dim_sys), dtype = self.dtype)
res2 = np.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)
for t in range(T):
# ๆฌ ๆธฌใใชใ y_t ใซ้ขใใฆ
if not np.any(np.ma.getmask(self.y[t])):
d = _last_dims(self.d, t, 1)
# ใใใใใฎ่ฆ็ด ๆฏใฎ็ฉใๅใใใใฎใง๏ผouter(ๅค็ฉ)ใไฝฟใ
res1 += np.outer(self.y[t] - d, self.x_smooth[t])
res2 += self.V_smooth[t] \
+ np.outer(self.x_smooth[t], self.x_smooth[t])
# observation_matrices (H) ใๆดๆฐ
self.H = np.dot(res1, linalg.pinv(res2))
# ๆฌกใซ observation_covariance ใๆดๆฐ
if 'observation_covariance' not in given:
'''math
R : observation_covariance, H_t : observation_matrices,
x_t : system, d_t : observation_offsets, y_t : observation
R &= \frac{1}{T} \sum_{t=0}^{T-1}
[y_t - H_t \mathbb{E}[x_t] - d_t]
[y_t - H_t \mathbb{E}[x_t] - d_t]^T
+ H_t Var(x_t) H_t^T
'''
# ่จ็ฎ่ฃๅฉ
res1 = np.zeros((self.n_dim_obs, self.n_dim_obs), dtype = self.dtype)
n_obs = 0
for t in range(T):
if not np.any(np.ma.getmask(self.y[t])):
H = _last_dims(self.H, t)
d = _last_dims(self.d, t, 1)
err = self.y[t] - np.dot(H, self.x_smooth[t]) - d
res1 += np.outer(err, err) \
+ np.dot(H, np.dot(self.V_smooth[t], H.T))
n_obs += 1
# temporary
# tmp = self.R
# ่ฆณๆธฌใ1ๅใงใ็ขบ่ชใงใใๅ ดๅ
if n_obs > 0:
self.R = (1.0 / n_obs) * res1
else:
self.R = res1
# covariance_structure ใซใใฃใฆๅ ดๅๅใ
if self.observation_cs == 'triD1':
# ๆฐใใ R ใๅฎ็พฉใใฆใใ
new_R = np.zeros_like(self.R, dtype=self.dtype)
# ๅฏพ่งๆๅใซ้ขใใฆๅนณๅใๅใ
np.fill_diagonal(new_R, self.R.diagonal().mean())
# ไธ้ๅฏพ่งๆๅใซ้ขใใฆๅนณๅใๅใ
rho = (self.R.diagonal(1).mean() + self.R.diagonal(-1).mean()) / 2
# ็ตๆใฎ็ตฑๅ
self.R = new_R + np.diag(rho * np.ones(self.n_dim_obs - 1), 1) \
+ np.diag(rho * np.ones(self.n_dim_obs - 1), -1)
elif self.observation_cs == 'triD2':
# ๆฐใใ R ใๅฎ็พฉใใฆใใ
new_R = np.zeros_like(self.R, dtype=self.dtype)
# ๅฏพ่งๆๅใซ้ขใใฆๅนณๅใๅใ
np.fill_diagonal(new_R, self.R.diagonal().mean())
# ไธ้ๅฏพ่งๆๅ, ้ฃๆฅๆๅใซ้ขใใฆๅนณๅใๅใ
td = np.ones(self.n_dim_obs - 1)
td[self.observation_v-1::self.observation_v-1] = 0
condition = np.diag(td, 1) + np.diag(td, -1) \
+ np.diag(
np.ones(self.n_dim_obs - self.observation_v),
self.observation_v
) \
+ np.diag(
np.ones(self.n_dim_obs - self.observation_v),
self.observation_v
)
rho = self.R[condition.astype(bool)].mean()
# ็ตๆใฎ็ตฑๅ
self.R = new_R + rho * condition.astype(self.dtype)
# ๆฌกใซ transition_matrices ใฎๆดๆฐ
if 'transition_matrices' not in given:
'''math
F : transition_matrices, x_t : system,
b_t : transition_offsets
F &= ( \sum_{t=1}^{T-1} \mathbb{E}[x_t x_{t-1}^{T}]
- b_{t-1} \mathbb{E}[x_{t-1}]^T )
( \sum_{t=1}^{T-1} \mathbb{E}[x_{t-1} x_{t-1}^T] )^{-1}
'''
#่จ็ฎ่ฃๅฉ
res1 = np.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)
res2 = np.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)
for t in range(1, T):
b = _last_dims(self.b, t - 1, 1)
res1 += self.V_pair[t] + np.outer(
self.x_smooth[t], self.x_smooth[t - 1]
)
res1 -= np.outer(b, self.x_smooth[t - 1])
res2 += self.V_smooth[t - 1] \
+ np.outer(self.x_smooth[t - 1], self.x_smooth[t - 1])
self.F = np.dot(res1, linalg.pinv(res2))
# ๆฌกใซ transition_covariance ใฎๆดๆฐ
if 'transition_covariance' not in given:
'''math
Q : transition_covariance, x_t : system,
b_t : transition_offsets, F_t : transition_matrices
Q &= \frac{1}{T-1} \sum_{t=0}^{T-2}
(\mathbb{E}[x_{t+1}] - A_t \mathbb{E}[x_t] - b_t)
(\mathbb{E}[x_{t+1}] - A_t \mathbb{E}[x_t] - b_t)^T
+ F_t Var(x_t) F_t^T + Var(x_{t+1})
- Cov(x_{t+1}, x_t) F_t^T - F_t Cov(x_t, x_{t+1})
'''
# ่จ็ฎ่ฃๅฉ
res1 = np.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)
# ๅ
จใฆใๆ้ฉๅใใใใใงใฏใชใใฎใง๏ผ็ด ๆดใช่จ็ฎใซใชใฃใฆใใ
for t in range(T - 1):
F = _last_dims(self.F, t)
b = _last_dims(self.b, t, 1)
err = self.x_smooth[t + 1] - np.dot(F, self.x_smooth[t]) - b
Vt1t_F = np.dot(self.V_pair[t + 1], F.T)
res1 += (
np.outer(err, err)
+ np.dot(F, np.dot(self.V_smooth[t], F.T))
+ self.V_smooth[t + 1]
- Vt1t_F - Vt1t_F.T
)
self.Q = (1.0 / (T - 1)) * res1
# covariance_structure ใซใใฃใฆๅ ดๅๅใ
if self.transition_cs == 'triD1':
# ๆฐใใ R ใๅฎ็พฉใใฆใใ
new_Q = np.zeros_like(self.Q, dtype=self.dtype)
# ๅฏพ่งๆๅใซ้ขใใฆๅนณๅใๅใ
np.fill_diagonal(new_Q, self.Q.diagonal().mean())
# ไธ้ๅฏพ่งๆๅใซ้ขใใฆๅนณๅใๅใ
rho = (self.Q.diagonal(1).mean() + self.Q.diagonal(-1).mean()) / 2
# ็ตๆใฎ็ตฑๅ
self.Q = new_Q + np.diag(rho * np.ones(self.n_dim_sys - 1), 1)\
+ np.diag(rho * np.ones(self.n_dim_sys - 1), -1)
elif self.transition_cs == 'triD2':
# ๆฐใใ R ใๅฎ็พฉใใฆใใ
new_Q = np.zeros_like(self.Q, dtype=self.dtype)
# ๅฏพ่งๆๅใซ้ขใใฆๅนณๅใๅใ
np.fill_diagonal(new_Q, self.Q.diagonal().mean())
# ไธ้ๅฏพ่งๆๅ, ้ฃๆฅๆๅใซ้ขใใฆๅนณๅใๅใ
td = np.ones(self.n_dim_sys - 1)
td[self.transition_v-1::self.transition_v-1] = 0
condition = np.diag(td, 1) + np.diag(td, -1) \
+ np.diag(
np.ones(self.n_dim_sys - self.transition_v),
self.transition_v
) \
+ np.diag(
np.ones(self.n_dim_sys - self.transition_v),
self.transition_v
)
rho = self.Q[condition.astype(bool)].mean()
# ็ตๆใฎ็ตฑๅ
self.Q = new_Q + rho * condition.astype(self.dtype)
# ๆฌกใซ initial_mean ใฎๆดๆฐ
if 'initial_mean' not in given:
'''math
x_0 : system of t=0
\mu_0 = \mathbb{E}[x_0]
'''
tmp = self.initial_mean
self.initial_mean = self.x_smooth[0]
# ๆฌกใซ initial_covariance ใฎๆดๆฐ
if 'initial_covariance' not in given:
'''math
mu_0 : system of t=0
\Sigma_0 = \mathbb{E}[x_0, x_0^T] - \mu_0 \mu_0^T
'''
x0 = self.x_smooth[0]
x0_x0 = self.V_smooth[0] + np.outer(x0, x0)
self.initial_covariance = x0_x0 - np.outer(self.initial_mean, x0)
self.initial_covariance += - np.outer(x0, self.initial_mean)\
+ np.outer(self.initial_mean, self.initial_mean)
# ๆฌกใซ transition_offsets ใฎๆดๆฐ
if 'transition_offsets' not in given:
'''math
b : transition_offsets, x_t : system
F_t : transition_matrices
b = \frac{1}{T-1} \sum_{t=1}^{T-1}
\mathbb{E}[x_t] - F_{t-1} \mathbb{E}[x_{t-1}]
'''
self.b = np.zeros(self.n_dim_sys, dtype = self.dtype)
# ๆไฝใงใ3็นใงใฎๅคใๅฟ
่ฆ
if T > 1:
for t in range(1, T):
F = _last_dims(self.F, t - 1)
self.b += self.x_smooth[t] - np.dot(F, self.x_smooth[t - 1])
self.b *= (1.0 / (T - 1))
# ๆๅพใซ observation_offsets ใฎๆดๆฐ
if 'observation_offsets' not in given:
'''math
d : observation_offsets, y_t : observation
H_t : observation_matrices, x_t : system
d = \frac{1}{T} \sum_{t=0}^{T-1} y_t - H_{t} \mathbb{E}[x_{t}]
'''
self.d = np.zeros(self.n_dim_obs, dtype = self.dtype)
n_obs = 0
for t in range(T):
if not np.any(np.ma.getmask(self.y[t])):
H = _last_dims(self.H, t)
self.d += self.y[t] - np.dot(H, self.x_smooth[t])
n_obs += 1
if n_obs > 0:
self.d *= (1.0 / n_obs)
| true
|
902c2a8fbb0fc2cd2eb8d4353e6e37aa2d7f7255
|
Python
|
koking0/Algorithm
|
/Interview/JD/JD2-ๆๅฐ็/solve.py
|
UTF-8
| 166
| 3.203125
| 3
|
[] |
no_license
|
class Balls:
def calcDistance(self, A, B, C, D):
return 3 * (A + B + C + D)
if __name__ == '__main__':
print(Balls().calcDistance(100, 90, 80, 70))
| true
|
f09f97284b978dc953a9299e42b9e6466ada3a26
|
Python
|
takecap/70puzzles
|
/src/q07.py
|
UTF-8
| 738
| 3.75
| 4
|
[] |
no_license
|
# Q07 ๆฅไปใฎ๏ผ้ฒๆฐๅคๆ
# ๅนดๆๆฅใYYYYMMDDใฎ๏ผๆกใฎๆดๆฐใง่กจใใใจใใใใใ๏ผ้ฒๆฐใซๅคๆใใฆ้ใซไธฆในใ
# ใใใซ๏ผ๏ผ้ฒๆฐใซๆปใใใจใใๅ
ใฎๆฅไปใจๅใๆฅไปใซใชใใใฎใๆขใใฆใใ ใใใ
# ๆข็ดขใใๆ้ใฏ19641010 ใฐ 20200724ใจใใพใใ
from datetime import date
from datetime import timedelta
# dt: datetime.date ใ๏ผ้ฒๆฐใซๅคๆใใฆ่ฟใ
def date2bin(dt):
date_str = dt.strftime('%Y%m%d')
return bin(int(date_str))[2:]
def main():
dt = date(1964, 10, 10)
while dt < date(2020, 7, 25):
date_bin = date2bin(dt)
if date_bin == date_bin[::-1]:
print(dt, date_bin)
dt += timedelta(days=1)
if __name__ == '__main__':
main()
| true
|
a709a5540df63fb94564aeb15a9877b634190e37
|
Python
|
bongho/codewar
|
/Build_Tower.py
|
UTF-8
| 415
| 3.34375
| 3
|
[] |
no_license
|
def tower_builder(n_floors):
floors = []
n = n_floors
for i in range(n_floors):
print(i)
n -= 1
floors.append(' ' * n + '*' * (i * 2 + 1) + ' ' * n)
return floors
def tower_builder(n):
return [("*" * (i*2-1)).center(n*2-1) for i in range(1, n+1)]
def tower_builder(n):
return [" " * (n - i - 1) + "*" * (2*i + 1) + " " * (n - i - 1) for i in range(n)]
| true
|
51071575356ec18dd7c1052239e290b2e2015306
|
Python
|
joohyun333/programmers
|
/๋ฐฑ์ค/ํฌํฌ์ธํฐ/์๊ณ ๋ฅด๊ธฐ.py
|
UTF-8
| 396
| 3.015625
| 3
|
[] |
no_license
|
import sys
input = sys.stdin.readline
num = []
N, M = map(int, input().split())
for i in range(N):
num.append(int(input()))
num.sort()
start = end = 0
min_result = sys.maxsize
while end<N and start <= end:
distance = num[end] - num[start]
if distance >=M :
if min_result> distance:
min_result = distance
start+=1
else:
end+=1
print(min_result)
| true
|
cb67199473c1cbcbe8260232357653b949126b9f
|
Python
|
sijichun/MathStatsCode
|
/code_in_notes/MCMC_independent_mh.py
|
UTF-8
| 594
| 2.96875
| 3
|
[] |
no_license
|
## mcmc_independent_mh.py
##็ฌ็ซ็MCMC็ฎๆณ๏ผ่พๅ
ฅ๏ผ
## N_samples : ๆฝๆ ทๆฌกๆฐ
## pai(x) : ็ฎๆ ๅฏๅบฆๅฝๆฐ
## q(y) : ๅทฅๅ
ทๅฏๅบฆๅฝๆฐ
## q_sampler : ็ปๅฎx๏ผไปqไธญๆฝๆ ท็ๅฝๆฐ
## x0 : ๅๅงๅผ
from numpy import random as nprd
def MH_independent(N_samples, pai, q, q_sampler, x0):
X = []
x = x0
for i in range(N_samples):
y = q_sampler()
rho = min(1, pai(y) * q(x) / (pai(x) * q(y)))
if nprd.uniform() <= rho:
X.append(y)
x = y
else:
X.append(x)
return X
| true
|
3d7f34be3b3e8b6c86a8af1977fef8333bc7e4ef
|
Python
|
ulbergc/EQlocator
|
/Processing/tools.py
|
UTF-8
| 2,759
| 2.859375
| 3
|
[] |
no_license
|
'''
Helper tools for process_data.py
Read and write datasets with Spark
Filename: tools.py
Cohort: Insight Data Engineering SEA '19C
Name: Carl Ulberg
'''
import pyspark.sql.types as T
from pyspark.sql import SQLContext
from pyspark.sql.functions import countDistinct
import os
def read_data(spark):
'''
Read earthquake arrival time data from S3
Return as Spark DataFrame
'''
# path_single is for testing a single 'day' ~37k events
path_single = 's3a://ulberg-insight/test1set/19800201.json'
archive_path = 's3a://ulberg-insight/archive'
molimit = 2 # 13 for all, 2 for single month
yrlimit = 1 # 5 for all, 1 for single year
mo = ['{:02d}01'.format(x) for x in range(1, molimit)]
months = []
for i in range(yrlimit):
tmpmo = ['198{}{}'.format(i, x) for x in mo]
months += tmpmo
path = ['{p1}/{mo}/{mo}.json'
.format(p1=archive_path, mo=month) for month in months]
df = spark.read.json(path)
df.printSchema()
return df
def read_station(spark):
'''
Read station location information from S3
Return as Spark DataFrame
'''
sql_context = SQLContext(spark)
sta = sql_context.read \
.load('s3a://ulberg-insight/info/stations_small.csv',
format='com.databricks.spark.csv')
# change stations schema
sta = sta.selectExpr("_c0 as sta",
"_c1 as Station_Longitude",
"_c2 as Station_Latitude",
"_c3 as Station_Depth")
sta = sta.withColumn("Station_Latitude", sta["Station_Latitude"]
.cast(T.DoubleType())) \
.withColumn("Station_Longitude", sta["Station_Longitude"]
.cast(T.DoubleType())) \
.withColumn("Station_Depth", sta["Station_Depth"]
.cast(T.DoubleType()))
return sta
def write_data(df, db_name, table_name):
'''
Write Spark DataFrame to database
'''
mysql_host = os.environ['MYSQL_PATH']
mysql_db = db_name
mysql_table = table_name
mysql_user = os.environ['MYSQL_USER']
mysql_pw = os.environ['MYSQL_PASSWORD']
df.write.format("jdbc").options(
url='jdbc:mysql://{}/{}'.format(mysql_host, mysql_db),
dbtable=mysql_table,
user=mysql_user,
password=mysql_pw,
driver="com.mysql.cj.jdbc.Driver") \
.save()
def count_station_obs(df):
'''
Count the number of times each station had an earthquake arrival time
'''
sta_count = df.groupby('sta').agg(countDistinct('Event_id'))
sta_count = sta_count \
.withColumn('count', sta_count['count(DISTINCT Event_id)'])
sta_count = sta_count.drop('count(DISTINCT Event_id)')
return sta_count
| true
|
d95136a7973f954feae348ededae8a1f49a0c4e2
|
Python
|
loganyu/leetcode
|
/problems/1347_minimum_number_of_steps_to_make_two_strings_anagram.py
|
UTF-8
| 1,118
| 3.8125
| 4
|
[] |
no_license
|
'''
You are given two strings of the same length s and t. In one step you can choose any character of t and replace it with another character.
Return the minimum number of steps to make t an anagram of s.
An Anagram of a string is a string that contains the same characters with a different (or the same) ordering.
Example 1:
Input: s = "bab", t = "aba"
Output: 1
Explanation: Replace the first 'a' in t with b, t = "bba" which is anagram of s.
Example 2:
Input: s = "leetcode", t = "practice"
Output: 5
Explanation: Replace 'p', 'r', 'a', 'i' and 'c' from t with proper characters to make t anagram of s.
Example 3:
Input: s = "anagram", t = "mangaar"
Output: 0
Explanation: "anagram" and "mangaar" are anagrams.
Constraints:
1 <= s.length <= 5 * 104
s.length == t.length
s and t consist of lowercase English letters only.
'''
class Solution:
def minSteps(self, s: str, t: str) -> int:
counts = Counter(s)
steps = 0
for char in t:
if counts[char] > 0:
counts[char] -= 1
else:
steps += 1
return steps
| true
|
cf361a0a26de4d4cff39cfb45d53ab251a650cf3
|
Python
|
Vyalkoff/Codewars
|
/vowel_counter.py
|
UTF-8
| 250
| 3.375
| 3
|
[] |
no_license
|
def get_count(input_str):
num_vowels = 0
list_vowels = ['a', 'e', 'i', 'o', 'u']
for i in input_str:
if i in list_vowels:
num_vowels += 1
return num_vowels
print(get_count('o a kak ushakov lil vo kashu kakao'))
| true
|
46a485720df409b95c58a9a9924d1b8ea7e76ff1
|
Python
|
11Vladimir/algoritm
|
/lesson_3/task_9.py
|
UTF-8
| 647
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/python3.8
# 9. ะะฐะนัะธ ะผะฐะบัะธะผะฐะปัะฝัะน ัะปะตะผะตะฝั ััะตะดะธ ะผะธะฝะธะผะฐะปัะฝัั
ัะปะตะผะตะฝัะพะฒ ััะพะปะฑัะพะฒ ะผะฐััะธัั.
from random import random
M = 10
N = 5
def matrix():
arr = []
for _ in range(N):
b = []
for _ in range(M):
n = int(random() * 200)
b.append(n)
arr.append(b)
return arr
mx = -1
for j in range(M):
mn = 200
for i in range(N):
if matrix()[i][j] < mn:
mn = matrix()[i][j]
if mn > mx:
mx = mn
print(matrix(),'\n', "ะะฐะบัะธะผะฐะปัะฝัะน ััะตะดะธ ะผะธะฝะธะผะฐะปัะฝัั
: ", mx)
| true
|
2f3c16f1e0f2e95d1f3f058dd4f98efd42a87035
|
Python
|
Rakshit-Bhatt/Practice_Codes
|
/demo_codes/HACKERRANK/itertools_product.py
|
UTF-8
| 597
| 3.6875
| 4
|
[] |
no_license
|
from itertools import product
#timeit() module to test the time lapse
import timeit
def cart_product(list1, list2):
return (product(list1 , list2))
if __name__=="__main__":
a=[int(item) for item in input("Enter values for first list: ").split()]
b=[int(item) for item in input("Enter values for second list: ").split()]
START_TIME= timeit.default_timer()
if len(a)<30 and len(b)<30:
ans=cart_product(a,b)
for i in ans:
print(i, end=" ")
print("\nTime of execution is ", (timeit.default_timer() - START_TIME))
| true
|
53b19f07da4bae654cfcc6d262566f8fe5c04cb1
|
Python
|
DeadHeadRussell/iacd_projects
|
/interactive_map/csv_trim.py
|
UTF-8
| 479
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/python
import csv
input_name = 'usa_hotels.csv'
columns = ['id', 'latitude', 'longitude']
delimiter = '\t'
reader = csv.reader(file(input_name), delimiter=delimiter)
header = reader.next()
column_ids = []
id = 0
for row in header:
if row in columns:
column_ids.append(id)
id += 1
print delimiter.join(columns)
for row in reader:
new_row = []
for id in column_ids:
if id < len(row):
new_row.append(row[id])
print delimiter.join(new_row)
| true
|
0f2664fb0852800fc5bb86f8963b419bcfdd3597
|
Python
|
ChinaChenp/Knowledge
|
/interview/interview_python/mianshixinde/lesson2/2.2.py
|
UTF-8
| 779
| 4.40625
| 4
|
[] |
no_license
|
#ๅฏปๆพๅไธบๅฎๅผ็ไธคไธชๆฐ
#่พๅ
ฅไธไธชๆฐ็ปๅไธไธชๆฐๅญ๏ผๅจๆฐ็ปไธญๆฅๆพไธคไธชๆฐ๏ผไฝฟๅพๅฎไปฌ็ๅๆญฃๅฅฝๆฏ่พๅ
ฅ็้ฃไธชๆฐๅญใ
#่ฆๆฑๆถ้ดๅคๆๅบฆๆฏO(N)ใๅฆๆๆๅคๅฏนๆฐๅญ็ๅ็ญไบ่พๅ
ฅ็ๆฐๅญ๏ผ่พๅบไปปๆไธๅฏนๅณๅฏใ
#ไพๅฆ่พๅ
ฅๆฐ็ป1ใ2ใ4ใ7ใ11ใ15ๅๆฐๅญ15ใ็ฑไบ4+11=15๏ผๅ ๆญค่พๅบ4ๅ11ใ
#้ป่ฎคๅๅบ
def two_number(arr, need):
beg, end = 0, len(arr) - 1
while beg < end:
#print(beg, end, arr[beg], arr[end])
total = arr[beg] + arr[end]
if total == need:
return arr[beg], arr[end]
elif total > need:
end -= 1
else:
beg += 1
return -1, -1
arr = [1, 2, 4, 7, 11, 15]
print(two_number(arr, 15))
print(two_number(arr, 20))
| true
|
2fb01107c035ce7e13d71ff8c89cbb1d1df594d1
|
Python
|
jtbish/ppl-da
|
/ppl/rng.py
|
UTF-8
| 256
| 2.59375
| 3
|
[] |
no_license
|
import numpy as np
_rng = np.random.RandomState()
_has_been_seeded = False
def seed_rng(seed):
seed = int(seed)
_rng.seed(seed)
global _has_been_seeded
_has_been_seeded = True
def get_rng():
assert _has_been_seeded
return _rng
| true
|
d52b78db0f093a05132c0044f7c7c41d7daf7a6a
|
Python
|
CateGitau/Python_programming
|
/ace_python_interview/sort_search/find_max_product.py
|
UTF-8
| 1,605
| 4.28125
| 4
|
[] |
no_license
|
'''
Implement a function find_max_prod(lst) that takes a list of numbers and returns a maximum product pair.
'''
# Decimal library to assign infinite numbers
from decimal import Decimal
#brute force approach
'''O(n^2)'''
def find_max_prod(lst):
"""
Finds the pair having maximum product in a given list
:param lst: A list of integers
:return: A pair of integer
"""
max_product = Decimal('-Infinity')
max_i = -1
max_j = -1
for i in lst:
for j in lst:
if max_product < i * j and i is not j:
max_product = i * j
max_i = i
max_j = j
return max_i, max_j
#traversing the list once
def find_max_prod2(lst):
"""
Finds the pair having maximum product in a given list
:param lst: A list of integers
:return: A pair of integer
"""
max1 = lst[0]
max2 = Decimal('-Infinity')
min1 = lst[0]
min2 = Decimal('Infinity')
for number in lst:
if number > max1:
max2 = max1 # Second highest
max1 = number # First highest
elif number > max2:
max2 = number
if number < min1:
min2 = min1 # Second lowest
min1 = number # First lowest
elif number < min2:
min2 = number
# Checking which pair has the highest product
if max1 * max2 > min1 * min2:
return max2, max1
else:
return min2, min1
#Driver to test above code
if __name__ == '__main__':
lst = [1, 3, 5, 2, 6]
num1, num2 = find_max_prod2(lst)
print (num1, num2)
| true
|
1aaea7829d891409223664cd2ffc97281ba0a80b
|
Python
|
whistle-boy/TIL-and-TIW
|
/python_nado_coding/practice11-3.py
|
UTF-8
| 2,510
| 3.34375
| 3
|
[] |
no_license
|
####### pip ๋ก ํจํค์ง ์ค์นํ๊ธฐ
# ๊ตฌ๊ธ์์ pypi ๊ฒ์
# beautifulsup4 4.8.2
# pip install beautifulsoup4 ์๋ ์ฐฝ์ ๋ฃ๊ณ ์ค์น
from bs4 import BeautifulSoup
soup = BeautifulSoup("<p>Some<b>bad<i>HTML")
print(soup.prettify())
# pip list ํ์ฌ ์ค์น๋์ด์๋ ๋ฆฌ์คํธ ํ์ธ๊ฐ๋ฅ
# pip show beautifulsoup4 ์ค์น๋์ด์๋ ํจํค์ง ์ ๋ณดํ์ธ
# pip install --upgrade beautifulsoup4
# pip uninstall beautifulsoup4
####### ๋ด์ฅํจ์
#๋ด์ฅ๋์ด์์ผ๋ ๋ฐ๋ก import ํ์์์
# input : ์ฌ์ฉ์ ์
๋ ฅ์ ๋ฐ๋ ํจ์
langauge = input("๋ฌด์จ ์ธ์ด๋ฅผ ์ข์ํ์ธ์?")
print("{0}์ ์์ฃผ ์ข์ ์ธ์ด์
๋๋ค!".format(langauge))
# dir : ์ด๋ค ๊ฐ์ฒด๋ฅผ ๋๊ฒจ์คฌ์ ๋ ๊ทธ ๊ฐ์ฒด๊ฐ ์ด๋ค ๋ณ์์ ํจ์๋ฅผ ๊ฐ์ง๊ณ ์๋์ง ํ์
import random #์ธ์ฅ ํจ์
print(dir())
#['BeautifulSoup', '__annotations__', '__builtins__', '__cached__', '__doc__',
# '__file__', '__loader__', '__name__', '__package__', '__spec__',
# '__warningregistry__', 'langauge', 'random', 'soup']
# ์๋จ์ random์ด ์ถ๊ฐ๋์ด์์
print(dir(random))
#๋๋ค ์์ ์ธ์์๋ ๋ด์ญ์ด ๋์ด ์)randint, randrange, sample, seed ๋ฑ
# ๊ตฌ๊ธ์ list of python builtins
# ํ์ด์ฌ ๋ด์์ ์ธ์์๋ ๋ด์ฅํจ์ ๋ณผ์์์
####### ์ธ์ฅํจ์
# ๊ตฌ๊ธ์ list of python modules
# ํ์ด์ฌ ๋ด์์ ์ธ์์๋ ์ธ์ฅํจ์ ๋ณผ์์์
# glob : ๊ฒฝ๋ก ๋ด์ ํด๋ / ํ์ผ ๋ชฉ๋ก ์กฐํ (์๋์ฐ dir)
import glob
print(glob.glob("*.py")) #ํ์ฅ์๊ฐ py ์ธ ๋ชจ๋ ํ์ผ
# os : ์ด์์ฒด์ ์์ ์ ๊ณตํ๋ ๊ธฐ๋ณธ ๊ธฐ๋ฅ
# ์๋ฅผ๋ค์ด , ํด๋๋ฅผ ๋ง๋ค๊ณ ์ญ์ ํ๋ ๊ธฐ๋ฅ
import os
print(os.getcwd()) #ํ์ฌ ๋๋ ํ ๋ฆฌ
folder = "sample_dir"
if os.path.exits(folder):
print("์ด๋ฏธ ์กด์ฌํ๋ ํด๋์
๋๋ค")
os.rmdir(folder) #ํด๋ ์ญ์
print(folder, "ํด๋๋ฅผ ์ญ์ ํ์์ต๋๋ค")
else:
os.makedirs(folder) #ํด๋ ์์ฑ
print(folder, "ํด๋๋ฅผ ์์ฑํ์์ต๋๋ค")
print(os.listdir()) #glob๊ณผ ๋น์ทํ ๊ธฐ๋ฅ
# time : ์๊ฐ๊ด๋ จ ์ธ์ฅํจ์
import time
print(time.localtime())
print(time.strftime("%Y-%m-%d %H:%M:%S"))
import datetime
print("์ค๋ ๋ ์ง๋", datetime.date.today())
#timedelta : ๋ ๋ ์ง ์ฌ์ด์ ๊ฐ๊ฒฉ
today = datetime.date.today()
td = datetime.timedelta(days=100) #์ค๋๋ ์ง๋ก๋ถํฐ 100์ผ ๋ค ์ ์ฅ
print("์ฐ๋ฆฌ๊ฐ ๋ง๋์ง 100์ผ์", today + td) #์ค๋๋ถํฐ 100์ผ ํ
| true
|
b41418bd45fda79cfee4042b22988f60137e434f
|
Python
|
sowrd299/SillyRobots
|
/localTextPlayerController.py
|
UTF-8
| 2,346
| 3.59375
| 4
|
[] |
no_license
|
from playerGameController import PlayerGameController
from boardTextDisplay import BoardTextDisplay
class LocalTextPlayerController(PlayerGameController):
'''
A player controller for local, human players
playing with a text-based interface
'''
card_chars_disp = "AbCdEFghIJklMNO" # characters to refer to cards
card_chars = card_chars_disp.upper()
end_turn_command = "T"
prompt = "~> "
def __init__(self, game, player_ind):
super().__init__(game, player_ind)
self.disp = BoardTextDisplay()
# DISPLAY FUNCTIONS
def disp_board(self):
hand_label = lambda i, _ : self.card_chars_disp[i]
for line in self.disp.disp(self.game, self.player_ind, hand_label = hand_label):
print("\t",line)
# TRANSITIONS
def hotseat_transition(player_controller, player):
# yes self is misnamed
prompt = player_controller.prompt
name = player.get_name()
prompt_str = "\n" * 100 + "{0}Here starts {1}'s next turn.\n{0}[ENTER]".format(prompt, name)
input(prompt_str)
def no_transition(self, _):
pass
# GAME CONTROL FUNCTIONS
def _take_actions(self):
# print a bunch of stuff
print(("\n"*5) + ("V"*100) + ("\n"*3)) # spacer between turns
self.disp_board()
print("\n"*2)
# TODO: input validating
# get the player's action
while True:
input_text = input(self.prompt+"[T] to end turn / Card [A...] and Position [1-4] to deploy: ").upper()
# handle end of turn
try:
if input_text == self.end_turn_command:
return True
# play cards
else:
card = self.card_chars.index(input_text[0])
pos = int(input_text[1])-1
if self.may_play_card(card, pos):
self.play_card(card, pos)
print(self.prompt+"Deploy sucessful")
return False
else:
print(self.prompt+"What nonsense is this? You can't put that there!")
except (ValueError, IndexError):
print(self.prompt+"Huh? What did you say?") # basic error handling
| true
|
cdf17767876180682da2be56aaeba4fd3c39d24b
|
Python
|
jstrasburger/Tweet_Sentiment_Analysis
|
/team.py
|
UTF-8
| 4,811
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
import dash_html_components as html
import dash_bootstrap_components as dbc
import os
import base64
### IMAGES ###
j_strasburger = "static/img/j_strasburger.jpg" # replace with your own image
jack_image = base64.b64encode(open(j_strasburger, 'rb').read())
luis_v = "static/img/luis_v.jpeg" # replace with your own image
luis_image = base64.b64encode(open(luis_v, 'rb').read())
sarah_z = "static/img/sarah_z.jpg" # replace with your own image
sarah_image = base64.b64encode(open(sarah_z, 'rb').read())
chris_k = "static/img/chris_k.jpg" # replace with your own image
chris_image = base64.b64encode(open(chris_k, 'rb').read())
murat_o = "static/img/murat_o.jpg" # replace with your own image
murat_image = base64.b64encode(open(murat_o, 'rb').read())
### BIOS ###
jack_bio = 'An avid outdoor adventurer, pilot and data analyst, Jack has spent many years refining his skills in data science. Jack works at General Motors in Austin, Texas.'
luis_bio = 'A fantastic musician, data scientist, and friend to all - Luis can be found spending his time pondering complex algorithms, writing new songs, and supporting the team. '
sarah_bio = 'Sarah leads with kindness and technological expertise, she specializes in Marketing Analytics and Statistics. Sarah works at Bolt in San Francisco, California.'
chris_bio = 'Our Social media analytics specialist, Chris got his start in esports and analytics as a self made entrepreneur. Chris works at General Motors in Austin, Texas.'
murat_bio = 'Murat is critical to our framework and web design. Murat is an active alumni at Tulane University where he graduated Cum Laude. Murat works at American First Finance.'
company_description = """Tweety Hunter is an application developed as a part of an Applied Machine Learning course at Tulane University. The Tweety Hunter application techonology can be applied to inform marketing teams in a wide range of industries. The application is a dash applicaiton, utilizes a flask framework, and is exclusivley written in Python. Much of the back-end processes are run on Amazon Web Services (AWS). We encourage future Tulane students and others to contact us with any questions."""
### LETS CODE SOME STYLED CARDS HERE ###
jack_card = dbc.Card(
[
dbc.CardImg(src=j_strasburger, top=True, style={'height':'100%', 'width':'100%'}),
dbc.CardBody(
[
html.H4("Jack Strasburger", className="card-title"),
html.P(jack_bio,className="card-text",
),
# dbc.Button("Jack' Linkedin", color="primary"),
]
)
]
)
luis_card = dbc.Card(
[
dbc.CardImg(src=luis_v, top=True, style={'height':'100%', 'width':'100%'}),
dbc.CardBody(
[
html.H4("Luis Villaseรฑor", className="card-title"),
html.P(luis_bio,className="card-text",
),
# dbc.Button("Luis' Linkedin", color="primary"),
]
)
]
)
sarah_card = dbc.Card(
[
dbc.CardImg(src=sarah_z, top=True, style={'height':'100%', 'width':'100%'}),
dbc.CardBody(
[
html.H4("Sarah Zimmerman", className="card-title"),
html.P(sarah_bio,className="card-text",
),
# dbc.Button("Sarah's Linkedin", color="primary"),
]
)
]
)
chris_card = dbc.Card(
[
dbc.CardImg(src=chris_k, top=True, style={'height':'100%', 'width':'100%'}),
dbc.CardBody(
[
html.H4("Chris Kornaros", className="card-title"),
html.P(chris_bio,className="card-text",
),
# dbc.Button("Chris' Linkedin", color="primary"),
]
)
]
)
murat_card = dbc.Card(
[
dbc.CardImg(src=murat_o, top=True, style={'height':'100%', 'width':'100%'}),
dbc.CardBody(
[
html.H4("Murat Ogeturk", className="card-title"),
html.P(murat_bio,className="card-text",
),
# dbc.Button("Murat's Linkedin", color="primary"),
]
)
]
)
### HTML PAGE LAYOUT ###
layout = html.Div([
dbc.Container([
dbc.Row([
dbc.Col([
html.H2("About Us"),
html.P(company_description),],className="mx-2 my-2")
]),
dbc.Row([
dbc.Col(html.H2("Team"), className="mx-2 my-2")
]),
dbc.Row([
dbc.Col(dbc.Card(jack_card), md=4),
dbc.Col(dbc.Card(sarah_card), md=4),
dbc.Col(dbc.Card(murat_card), md=4)
]),
dbc.Row([
dbc.Col(dbc.Card(chris_card), md=4),
dbc.Col(dbc.Card(luis_card), md=4)
], justify="center", className="mx-2 my-2")
])
])
| true
|
23b8b3401a55afae68248e9bd0fe7fe0564ecc29
|
Python
|
LikeLionSCH/9th_ASSIGNMENT
|
/20_์ด์๋น/session03/3.py
|
UTF-8
| 735
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
list=[] # ๋น ๋ฆฌ์คํธ ์์ฑ
sum=0 # ์ดํฉ sum ์ด๊น๊ฐ
for i in range(7): # ์ด 7๊ฐ์ ์ํ์ ๋ํ ๊ฐ๊ฒฉ ์
๋ ฅ ๋ฐ์
print(i, end="")
n=int(input("๋ฒ์งธ ์ํ ๊ฐ๊ฒฉ: "))
sum+=n # ์
๋ ฅ ๋ฐ์ ๊ฐ๊ฒฉ์ sum์ ์ ์ฅ
list.append(n) # ๋ฆฌ์คํธ์ ๋์ ์์ n๊ฐ์ ์ถ๊ฐ
print(list)
# ์ ์ฒด ์ํ ๊ตฌ๋งค ๋ถ๊ฐ๋ฅํ ๊ฒฝ์ฐ
price=int(input("๊ฐ์ง๊ณ ์๋ ๋์ ์
๋ ฅํ์ธ์ >> "))
if price<sum:
print("๋์ด ๋ชจ์๋๋๋ค. ", end="")
i=0
while(True):
price-=list[i] # ๋ฆฌ์คํธ์ ์ฒซ๋ฒ์งธ ์์๋ถํฐ price๊ฐ์์ ์ฐจ๋ก๋๋ก ๋ป
if(price<0): # price๊ฐ 0๋ณด๋ค ์์์ง๋ฉด ๋ฐ๋ณต๋ฌธ ๋น ์ ธ๋๊ฐ
break
i+=1
print(f"{i}๊ฐ์ ์ํ์ ์์ต๋๋ค.")
| true
|
72b879f2a4c4d5d89d43a0d819d143f732606264
|
Python
|
rkhal101/Thesis-Test-Results
|
/wapiti/wavsep/configured/report/extract-urls.py
|
UTF-8
| 1,080
| 2.578125
| 3
|
[] |
no_license
|
import itertools
input_file_loc = "/Users/ranakhalil/Desktop/git/Thesis/results/wapiti/wavsep/configured/report/wapiti-wavsep-configured.txt"
output_file_loc = "/Users/ranakhalil/Desktop/git/Thesis/results/wapiti/wavsep/wapiti-wavsep-configured.csv"
lines = []
new_vuln_string = "********************************************************************************"
new_vuln_url="cURL command PoC"
with open(input_file_loc, 'r+') as in_file:
i = 0
for line in in_file:
try:
while(i<19):
line = next(in_file)
i = i + 1
if(new_vuln_string in line):
next(in_file)
line = next(in_file)
lines.append(line)
if(new_vuln_url in line):
lines.append(line[26:line.find('.jsp')]) #remove the string cURL command PoC : "curl " from line
except StopIteration:
pass
with open(output_file_loc, 'w') as out_file:
for elem in lines:
out_file.write(elem + "\n")
"""
lines_seen = set()
with open(output_file_loc, 'w') as out_file:
for elem in lines:
if elem not in lines_seen:
out_file.write(elem + "\n")
lines_seen.add(elem)
"""
| true
|
41b33f6d91698868bb44727224c92d3d340743c6
|
Python
|
taoste/dirtysalt
|
/codes/leetcode/combinations.py
|
UTF-8
| 622
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
res = []
def f(idx, r):
if len(r) == k:
res.append(r[:])
return
rest = k - len(r)
for i in range(idx, n - rest + 1):
r.append(i + 1)
f(i + 1, r)
r.pop()
r = []
f(0, r)
return res
if __name__ == '__main__':
s = Solution()
print s.combine(4, 2)
| true
|
930110825171484f4635326371b7d3e0301eaf4e
|
Python
|
DruiadinMonk/OHLC_Candles
|
/OHLC.py
|
UTF-8
| 777
| 4.1875
| 4
|
[] |
no_license
|
# Creating a cndle (OHLC) price chart.
import random
prices_1 = [1.0000]
prices_2 = []
base_1 = 100
base_2 = 10
# Generate random prices in the 10'000's place.
for x in range(100):
r1 = random.randint(-5, 5) / 10000
r2 = round(r1 + prices_1[x], 4)
prices_1.append(r2)
# 2. For loop #2.
a = 0 # MIN
b = 9 # MAX
# Group up candles for their OHLC values.
for x in range(10):
# Set OHLC values.
o = prices_1[a] # o = prices[0]
h = max(prices_1[a:b]) # h = max(prices[0:9])
l = min(prices_1[a:b]) # l = min(prices[0:9])
c = prices_1[b] # c = prices[9]
ohlc = [o, h, l, c]
prices_2.append(ohlc)
# Move range over 10
a += 10
b += 10
print(prices_1)
print(' ')
print(prices_2)
| true
|
a90de5a7f58af22ee5a75b2bd64af7dae466d9f9
|
Python
|
pankas87/Project-Euler---Python
|
/Problem-6/run.py
|
UTF-8
| 252
| 3.21875
| 3
|
[] |
no_license
|
edge = 100
sum = edge * (edge + 1) / 2
sum_squares = (2 * edge + 1) * (edge + 1) * edge / 6
print('sum', sum)
print('sum_squared', sum * sum)
print('sum_squares', sum_squares)
print('sum_squared - sum_squares', sum * sum - sum_squares )
| true
|
c375be29d567426312ab5b8c4297d9abb0549117
|
Python
|
korz/ml-in-csharp
|
/src/Python/xor.py
|
UTF-8
| 1,254
| 3.03125
| 3
|
[] |
no_license
|
import numpy as np
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers.core import Dense
from keras.models import model_from_json
from keras import backend as K
def main():
#Get Training Data
input = np.array([ [0, 0], [0, 1], [1, 0], [1, 1] ])
output = np.array([ 0, 1, 1, 0 ])
#Create Model Structure
model = Sequential()
model.add(Dense(2))
model.add(Dense(32, activation= "relu"))
model.add(Dense(64, activation= "relu"))
model.add(Dense(1, activation= "sigmoid"))
model.compile(optimizer="sgd", loss="binary_crossentropy", metrics=["accuracy"])
#Train model with the data
model.fit(input, output, batch_size=2, epochs=1_000, verbose=1)
#Make Prediction
print("[%i, %i] = %i" % (0,0, Predict(model, 0,0)))
print("[%i, %i] = %i" % (0,1, Predict(model, 0,1)))
print("[%i, %i] = %i" % (1,0, Predict(model, 1,0)))
print("[%i, %i] = %i" % (1,1, Predict(model, 1,1)))
def Predict(model: Sequential, p: int, q: int) -> int:
prediction = model.predict(np.array([[p, q]]))
predictedValue = prediction[0][0]
roundedPredictedValue = int(round(predictedValue))
return roundedPredictedValue
if __name__ == "__main__":
main()
| true
|
57ef705fe19007ceb2a046933eaeb3f9ecd509ee
|
Python
|
martey/django-redis-cache
|
/tests/benchmark.py
|
UTF-8
| 2,779
| 3.015625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
A quick and dirty benchmarking script. GitPython is an optional dependency
which you can use to change branches via the command line.
Usage::
python benchmark.py
python benchmark.py master
python benchamrk.py some-branch
"""
import os
import sys
from time import time
from django.core import cache
from hashlib import sha1 as sha
try:
from git import Repo
except ImportError:
pass
else:
if len(sys.argv) > 1:
repo_path = os.path.dirname(__file__)
repo = Repo(repo_path)
repo.branches[sys.argv[1]].checkout()
print "Testing %s" % repo.active_branch
def h(value):
return sha(str(value)).hexdigest()
class BenchmarkRegistry(type):
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'benchmarks'):
cls.benchmarks = []
else:
cls.benchmarks.append(cls)
class Benchmark(object):
__metaclass__ = BenchmarkRegistry
def setUp(self):
pass
def tearDown(self):
pass
def timetrial(self):
self.setUp()
start = time()
self.run()
t = time() - start
self.tearDown()
return t
def run(self):
pass
@classmethod
def run_benchmarks(cls):
for benchmark in cls.benchmarks:
benchmark = benchmark()
print benchmark.__doc__
print "Time: %s" % (benchmark.timetrial())
class GetAndSetBenchmark(Benchmark):
"Settings and Getting Mixed"
def setUp(self):
self.cache = cache.get_cache('default')
self.values = {}
for i in range(30000):
self.values[h(i)] = i
self.values[h(h(i))] = h(i)
def run(self):
for k, v in self.values.items():
self.cache.set(k, v)
for k, v in self.values.items():
value = self.cache.get(k)
class IncrBenchmark(Benchmark):
"Incrementing integers"
def setUp(self):
self.cache = cache.get_cache('default')
self.values = {}
self.ints = []
self.strings = []
for i in range(30000):
self.values[h(i)] = i
self.values[h(h(i))] = h(i)
self.ints.append(i)
self.strings.append(h(i))
def run(self):
for i in self.ints:
self.cache.incr(h(i), 100)
class MsetAndMGet(Benchmark):
"Getting and setting many mixed values"
def setUp(self):
self.cache = cache.get_cache('default')
self.values = {}
for i in range(30000):
self.values[h(i)] = i
self.values[h(h(i))] = h(i)
def run(self):
self.cache.set_many(self.values)
value = self.cache.get_many(self.values.keys())
if __name__ == "__main__":
Benchmark.run_benchmarks()
| true
|