text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import subprocess
import os
import click
import yaml
from dgl import library_constants
from Bio import AlignIO
@click.group()
def cli():
pass
#@cli.command()
#@click.argument('embl_gtf')
def parse_gtf(embl_gtf):
"""
organise embl gtf into genes
output in yaml format
"""
with open(embl_gtf) as file:
contents = file.readlines()
anns = [line.strip().split(
'\t') for line in contents if line.startswith('#') == False]
strand = {'+':'1',
'-':'-1'}
gene_dict = {}
for ann in anns:
info_parsed = ann[8].split('; ')
info_dict = {}
for info in info_parsed:
info_dict[info.split(' "')[0]] = info.split('"')[1]
ann_info = [ann[2], # annotation type
ann[0], # chr
int(ann[3]), # start
int(ann[4]), # end
strand[ann[6]], # strand
info_dict] # details
if ann[2] == 'gene':
gene_dict[info_dict['gene_id']] = ann_info
elif ann[2] == 'transcript':
gene_dict[info_dict['gene_id'] + '|' +
info_dict['transcript_id']] = ann_info
elif ann[2] == 'exon':
gene_dict[info_dict['gene_id'] + '|' +
info_dict['exon_id']] = ann_info
elif ann[2] == 'CDS':
gene_dict[info_dict['gene_id'] + '|' +
info_dict['protein_id'] + '|' +
info_dict['exon_number']] = ann_info
return gene_dict
@cli.command()
@click.argument('embl_gtf')
@click.argument('fasta_prefix')
@click.argument('nt')
@click.argument('type')
@click.argument('biodata')
@click.argument('chromosome_constant')
def get_ann_seqs_fasta(embl_gtf, fasta_prefix, nt, type, biodata, chromosome_constant):
"""
get annotation sequences and output as fasta
nt = range of nts (a-b)
type = gene, transcript, exon or CDS
"""
chromosomes = getattr(library_constants, chromosome_constant)
if embl_gtf.endswith('.gtf'):
ann_dict = parse_gtf(embl_gtf)
elif embl_gtf.endswith('.yaml'):
with open(embl_gtf, 'r') as file:
contents = file.read()
ann_dict = yaml.load(contents)
for chr in chromosomes:
f=open(fasta_prefix + chr + '.fasta', 'w')
f.close()
with open(biodata + chr + '.fasta') as file:
sequence = file.read()
chr_fasta = sequence.strip().split('\n')[1]
for gene_id in ann_dict:
if ann_dict[gene_id][1] == chr:
if ann_dict[gene_id][0] == type:
length = ann_dict[gene_id][3] - ann_dict[gene_id][2]
nt_range = nt.split('-')
if int(nt_range[0]) <= length:
if int(nt_range[1]) >= length:
with open(fasta_prefix + chr + '.fasta', 'a') as file:
file.write('>' + gene_id + '\n')
file.write(
chr_fasta[ann_dict[gene_id][2]-1:
ann_dict[gene_id][3]] + '\n')
@cli.command()
@click.argument('index_prefix')
@click.argument('fasta_prefix')
@click.argument('chromosome_constant')
@click.argument('fasta_number', default='many')
def run_bwa_mem(index_prefix, fasta_prefix, chromosome_constant, fasta_number):
"""
run bwa-mem
"""
chromosomes = getattr(library_constants, chromosome_constant)
if fasta_number != '1':
for chr in chromosomes:
outfile = open(fasta_prefix + chr + '.sam', 'w')
subprocess.check_call([
'bwa', 'mem',
'-k', '10', # minimum seed length
'-a', # show all alignments
index_prefix + chr,
fasta_prefix + chr + '.fasta'],
stdout=outfile)
else:
for chr in chromosomes:
outfile = open(fasta_prefix + chr + '.sam', 'w')
subprocess.check_call([
'bwa', 'mem',
'-k', '10', # minimum seed length
'-a', # show all alignments
index_prefix + chr,
fasta_prefix + '.fasta'],
stdout=outfile)
@cli.command()
@click.argument('fasta_in')
@click.argument('index_prefix')
@click.argument('mm')
@click.argument('chromosome_constant')
@click.argument('fasta_number')
@click.argument('five_prime_trim', default='0')
def run_bowtie(fasta_in, index_prefix, mm, chromosome_constant, fasta_number, five_prime_trim):
"""
use bowtie with mm mismatches to find sequences
"""
chromosomes = getattr(library_constants, chromosome_constant)
if fasta_number != '1':
for chr in chromosomes:
subprocess.call(
['bowtie', index_prefix + chr, # specify index
'-f', fasta_in + chr + '.fasta', # input is fasta
'-S', # output is sam
'-n', str(mm), # allow mismatches
'-p', '8', # parallel search threads
'-k', '10', # allow 10 alignments per read
fasta_in + chr + '.sam'])
else:
for chr in chromosomes:
subprocess.call(
['bowtie', index_prefix + chr, # specify index
'-f', fasta_in + '.fasta', # input is fasta
'-S', # output is sam
'-n', str(mm), # allow mismatches
'-p', '8', # parallel search threads
'-k', '10', # allow 10 alignments per read
fasta_in + chr + '.sam'])
@cli.command()
@click.argument('fasta_prefix')
@click.argument('embl_gtf')
@click.argument('nt')
@click.argument('outfile')
@click.argument('chromosome_constant')
def confirm_features(fasta_prefix, embl_gtf, nt, outfile, chromosome_constant):
"""
find unique alignments from bwa output.
outfile = new, multi or not_found
nt = range of nts (a-b)
"""
chromosomes = getattr(library_constants, chromosome_constant)
if embl_gtf.endswith('.gtf'):
ann_dict = parse_gtf(embl_gtf)
elif embl_gtf.endswith('.yaml'):
with open(embl_gtf, 'r') as file:
contents = file.read()
ann_dict = yaml.load(contents)
strand = {'+':'1',
'-':'-1',
'0':'1',
'16':'-1'}
aln_dict = {}
for chr in chromosomes:
with open(fasta_prefix + chr + '.sam', 'r') as file:
contents = file.readlines()
sam_lines = [line.strip().split('\t') for line in contents \
if line.startswith("@") == False]
for aln in sam_lines:
if aln[1] == '0':
details = {'chr':chr,
'start':int(aln[3]) - 1,
'end':int(aln[3]) + len(aln[9]),
'strand':ann_dict[aln[0]][4]
}
if aln_dict.get(aln[0]):
aln_dict[aln[0]].append(details)
else:
aln_dict[aln[0]] = [details]
multi = {}
new_anns = {}
for feature_id in aln_dict:
if len(aln_dict[feature_id]) > 1:
multi[feature_id] = ann_dict[feature_id]
elif len(aln_dict[feature_id]) == 1:
new_anns[feature_id] = aln_dict[feature_id][0]
if outfile == 'new':
print yaml.dump(new_anns)
elif outfile == 'multi':
print yaml.dump(multi)
elif outfile == 'not_found':
not_found = {}
for feature_id in ann_dict:
if aln_dict.get(feature_id):
continue
else:
if ann_dict[feature_id][0] == 'exon':
nt_range = nt.split('-')
length = ann_dict[feature_id][3] - ann_dict[feature_id][2]
if int(nt_range[0]) <= length:
if int(nt_range[1]) >= length:
not_found[feature_id] = ann_dict[feature_id]
print yaml.dump(not_found)
@cli.command()
@click.argument('mapped')
@click.argument('exon_number_file')
def check_gene_representation(mapped, exon_number_file):
"""
identify which genes are lacking representation by mapped exons
"""
with open('/home/neil/deskgen_projects/rat_genome/Rnor_gene_list.txt', 'r') as file:
contents = file.read()
genes = contents.strip().split('\n')
with open(mapped, 'r') as file:
contents = file.read()
all_mapped = yaml.load(contents)
gene_rep = {}
for gene in genes:
n = 0
for exon in all_mapped:
if exon.startswith(gene):
n += 1
gene_rep[gene] = [n]
with open(exon_number_file, 'r') as file:
contents = file.readlines()
exon_numbers = {i.strip().split('\t')[0]:[i.strip().split('\t')[1]] for i in contents}
for gene in exon_numbers:
exon_numbers[gene].append(str(gene_rep[gene][0]))
exon_numbers[gene].append(str(int(exon_numbers[gene][0])-int(exon_numbers[gene][1])))
exon_numbers[gene].append(str(float(exon_numbers[gene][1])/float(exon_numbers[gene][0])))
lines=[]
for i in exon_numbers:
lines.append(i + '\t' + '\t'.join(exon_numbers[i]))
with open('exon_representation.txt','w') as file:
file.write('\n'.join(lines))
@cli.command()
def count_exons():
with open('/home/neil/biodata/embl_annotations/Rattus_norvegicus.Rnor_5.0.79.gtf','r') as file:
contents = file.readlines()
anns = [line.strip().split('\t') for line in contents if line.startswith('#') == False]
with open('/home/neil/deskgen_projects/rat_genome/Rnor_gene_list.txt', 'r') as file:
contents = file.read()
genes = contents.strip().split('\n')
exon_no = {}
for gene in genes:
n = 0
for ann in anns:
if ann[2] == 'exon':
if ann[8].find(gene) > -1:
n += 1
exon_no[gene] = n
lines = []
for i in exon_no:
lines.append(i + '\t' + str(exon_no[i]))
with open('exon_numbers.txt', 'w') as file:
file.write('\n'.join(lines))
@cli.command()
@click.argument('exon_rep_file')
@click.argument('mapped')
@click.argument('not_mapped')
@click.argument('list_out')
def get_exon_lists(exon_rep_file, mapped, not_mapped, list_out):
"""
list_out = complete, unrepresented, incomplete_yes or incomplete_no
"""
with open(exon_rep_file, 'r') as file:
contents = file.readlines()
exon_details = [line.strip().split('\t') for line in contents]
with open(mapped, 'r') as file:
contents = file.read()
mapped_dict = yaml.load(contents)
with open(not_mapped, 'r') as file:
contents = file.read()
not_mapped_dict = yaml.load(contents)
complete = {}
incomplete_yes = {}
incomplete_no = {}
unrepresented = {}
if list_out == 'complete':
for gene in exon_details:
if gene[1] == gene[2]:
for exon in mapped_dict:
if exon.startswith(gene[0]):
complete[exon] = mapped_dict[exon]
print yaml.dump(complete)
if list_out == 'unrepresented':
for gene in exon_details:
if gene[2] == '0':
for exon in not_mapped_dict:
if exon.startswith(gene[0]):
unrepresented[exon] = not_mapped_dict[exon]
print yaml.dump(unrepresented)
if list_out == 'incomplete_yes':
for gene in exon_details:
if gene[2] != '0' and gene[1] != gene[2]:
for exon in mapped_dict:
if exon.startswith(gene[0]):
incomplete_yes[exon] = mapped_dict[exon]
print yaml.dump(incomplete_yes)
if list_out == 'incomplete_no':
for gene in exon_details:
if gene[2] != '0' and gene[1] != gene[2]:
for exon in not_mapped_dict:
if exon.startswith(gene[0]):
incomplete_no[exon] = not_mapped_dict[exon]
print yaml.dump(incomplete_no)
@cli.command()
@click.argument('complete_list')
@click.argument('gene_list')
@click.argument('embl_gtf')
def check_complete_genes(complete_list, gene_list, embl_gtf):
"""
confirm that the order of exons is correct on the same
chromosome for complete genes
"""
with open(gene_list, 'r') as file:
contents = file.read()
genes = contents.strip().split('\n')
with open(complete_list, 'r') as file:
contents = file.read()
complete = yaml.load(contents)
ann_dict = parse_gtf(embl_gtf)
incorrect = {}
for gene in genes:
gene_exons = {exon_id:complete[exon_id] \
for exon_id in complete \
if exon_id.startswith(gene)}
gtf_exons = {exon_id:ann_dict[exon_id] \
for exon_id in ann_dict \
if exon_id.startswith(gene)}
# organise exons into transcripts
transcripts = {}
for exon in gene_exons:
transcript_id = gtf_exons[exon][5]['transcript_id']
if transcripts.get(transcript_id):
pass
else:
transcripts[transcript_id] = {}
transcripts[transcript_id][gtf_exons[exon][5]['exon_number']] = [
gene_exons[exon]['chr'],
gene_exons[exon]['start'],
gene_exons[exon]['end'],
gene_exons[exon]['strand'],
exon]
# check if exons are in correct order for each transcript
for transcript in transcripts:
exon_nos = [int(i) for i in transcripts[transcript]]
exon_nos.sort()
t_strand = transcripts[transcript_id][str(exon_nos[0])][3]
t_chr = transcripts[transcript_id][str(exon_nos[0])][0]
if t_strand == '1':
for exon_n in exon_nos[1:]:
if transcripts[transcript][str(exon_n)][0] == t_chr:
if transcripts[transcript][str(exon_n)][3] == t_strand:
if transcripts[transcript][str(exon_n -1)][2] < transcripts[transcript][str(exon_n)][1]:
continue
else:
incorrect[transcripts[transcript][str(exon_n)][4]] = gene_exons[transcripts[transcript][str(exon_n)][4]]
elif t_strand == '-1':
for exon_n in exon_nos[:len(exon_nos)-1]:
if transcripts[transcript][str(exon_n)][0] == t_chr:
if transcripts[transcript][str(exon_n)][3] == t_strand:
if transcripts[transcript][str(exon_n)][2] > transcripts[transcript][str(exon_n+1)][1]:
continue
else:
incorrect[transcripts[transcript][str(exon_n)][4]] = gene_exons[transcripts[transcript][str(exon_n)][4]]
print yaml.dump(incorrect)
def get_complete_gene_list(complete_list, incorrect_list):
with open(complete_list, 'r') as file:
contents = file.read()
complete = yaml.load(contents)
gene_complete = set([exon_id.split('|')[0] for exon_id in complete])
with open(incorrect_list, 'r') as file:
contents = file.read()
incorrect = yaml.load(contents)
gene_incorrect = set([exon_id.split('|')[0] for exon_id in incorrect])
complete_genes = []
for gene in gene_complete:
if gene in gene_incorrect:
continue
else:
complete_genes.append(gene)
with open('complete_gene_list.txt', 'w') as file:
file.write('\n'.join(complete_genes))
@cli.command()
@click.argument('complete_gene_list')
@click.argument('complete_exons')
@click.argument('embl_gtf')
def get_cdss_from_exons(complete_gene_list, complete_exons, embl_gtf):
"""
map cdss by comparing to exon positions,
if both boundaries are exactly the same, they are middle exons
otherwise they are start or end exon and can be adjusted
"""
with open(complete_exons, 'r') as file:
contents = file.read()
complete = yaml.load(contents)
ann_dict = parse_gtf(embl_gtf)
with open(complete_gene_list, 'r') as file:
contents = file.read()
genes = contents.strip().split('\n')
cdss = {}
not_same = []
for gene in genes:
gene_exons = {exon_id:complete[exon_id] \
for exon_id in complete \
if exon_id.startswith(gene)}
gtf_cdss = {exon_id:ann_dict[exon_id] \
for exon_id in ann_dict \
if exon_id.startswith(gene + '|ENSRNOP')}
gtf_exons = {exon_id:ann_dict[exon_id] \
for exon_id in ann_dict \
if exon_id.startswith(gene + '|ENSRNOE')}
for exon in gene_exons:
exon_start = gtf_exons[exon][2]
exon_end = gtf_exons[exon][3]
transcript = gtf_exons[exon][5]['transcript_id']
for cds in gtf_cdss:
if transcript == gtf_cdss[cds][5]['transcript_id']:
cds_start = gtf_cdss[cds][2]
cds_end = gtf_cdss[cds][3]
# middle exon
if cds_start == exon_start and cds_end == exon_end:
cdss[cds] = [
gene_exons[exon]['chr'],
gene_exons[exon]['start'],
gene_exons[exon]['end'],
gene_exons[exon]['strand']]
# 5' exon
elif cds_start > exon_start and cds_end == exon_end:
cdss[cds] = [
gene_exons[exon]['chr'],
gene_exons[exon]['end'] - (exon_end-exon_start+1),
gene_exons[exon]['end'],
gene_exons[exon]['strand']]
# 3' exon
elif cds_start == exon_start and cds_end < exon_end:
cdss[cds] = [
gene_exons[exon]['chr'],
gene_exons[exon]['start'],
gene_exons[exon]['start'] + (exon_end-exon_start+1),
gene_exons[exon]['strand']]
#with open('no_matching_CDS.txt', 'w') as file:
# file.write('\n'.join(not_same))
print yaml.dump(cdss)
@cli.command()
@click.argument('incomplete_list')
@click.argument('embl_gtf')
@click.argument('biodata_prefix_fischer')
@click.argument('biodata_prefix_rnor')
@click.argument('chromosome_constant')
def align_transcripts(incomplete_list, embl_gtf, biodata_prefix_fischer, biodata_prefix_rnor, chromosome_constant):
"""
get hypothetical transcripts to search for other exons
"""
with open(incomplete_list, 'r') as file:
contents = file.read()
incomplete = yaml.load(contents)
ann_dict = parse_gtf(embl_gtf)
genes = set([id.split('|')[0] for id in incomplete])
new_exon_ann = {}
transcripts = {}
for gene in genes:
gene_exons = {exon_id:incomplete[exon_id] \
for exon_id in incomplete \
if exon_id.startswith(gene)}
gtf_exons = {exon_id:ann_dict[exon_id] \
for exon_id in ann_dict \
if exon_id.startswith(gene + '|ENSRNOE')}
gtf_transcripts = {exon_id:ann_dict[exon_id] \
for exon_id in ann_dict \
if exon_id.startswith(gene + '|ENSRNOT')}
for exon in gene_exons:
tranny = gene + '|' + gtf_exons[exon][5]['transcript_id']
if transcripts.get(tranny) == None:
start_diff = gtf_exons[exon][2] - gtf_transcripts[tranny][2] + 1000
end_diff = gtf_transcripts[tranny][2] - gtf_exons[exon][3] + 1000
transcripts[tranny] = {'chr':gtf_transcripts[tranny][1],
'start_Fischer':gene_exons[exon]['start'] - start_diff,
'end_Fischer':gene_exons[exon]['end'] + end_diff,
'start_Rnor':gtf_transcripts[tranny][2],
'end_Rnor':gtf_transcripts[tranny][3],
'strand':gtf_transcripts[tranny][4],
'exon':exon}
chromosomes = getattr(library_constants, chromosome_constant)
for chr in chromosomes:
with open(biodata_prefix_fischer + chr + '.fasta') as file:
sequence = file.read()
chr_fasta_Fischer = sequence.strip().split('\n')[1]
with open(biodata_prefix_rnor + chr + '.fasta') as file:
sequence = file.read()
chr_fasta_Rnor = sequence.strip().split('\n')[1]
chr_transcripts = {t:transcripts[t] for t in transcripts \
if transcripts[t]['chr'] == chr}
for trans in chr_transcripts:
with open('transcript_sequences_Fischer.fasta', 'w') as file:
file.write('>' + '|'.join([trans,
chr,
str(transcripts[trans]['start_Fischer']),
str(transcripts[trans]['end_Fischer']),
transcripts[trans]['strand'],
transcripts[trans]['exon'],
'Fischer']) + '\n')
file.write(chr_fasta_Fischer[transcripts[trans]['start_Fischer']:
transcripts[trans]['end_Fischer']] + '\n')
with open('transcript_sequences_Rnor.fasta', 'w') as file:
file.write('>' + '|'.join([trans,
chr,
str(transcripts[trans]['start_Rnor']),
str(transcripts[trans]['end_Rnor']),
transcripts[trans]['strand'],
transcripts[trans]['exon'],
'Rnor']) + '\n')
file.write(chr_fasta_Rnor[transcripts[trans]['start_Rnor']-1:
transcripts[trans]['end_Rnor']] + '\n')
subprocess.call(['needle',
'-asequence', 'transcript_sequences_Fischer.fasta',
'-bsequence', 'transcript_sequences_Rnor.fasta',
'-gapopen', '1.0',
'-gapextend', '0.5',
'-outfile', 'Fischer-Rnor.align'])
alignment = AlignIO.read('Fischer-Rnor.align', 'emboss')
fischer_seq = str(alignment.get_all_seqs()[0].seq)
rnor_seq = str(alignment.get_all_seqs()[1].seq)
fischer_pos = transcripts[trans]['start_Fischer']
rnor_pos = transcripts[trans]['start_Rnor']-1
gap_dict = {rnor_pos:fischer_pos}
for nt in range(1,len(fischer_seq),1):
if fischer_seq[nt] != '-':
fischer_pos += 1
if rnor_seq[nt] != '-':
rnor_pos += 1
if gap_dict.get(rnor_pos) == None:
gap_dict[rnor_pos] = [fischer_pos]
for exon in gtf_exons:
if trans.split('|')[0] == gtf_exons[exon][5]['transcript_id']:
new_exon_ann[exon] = {'chr':gtf_exons[exon][1],
'start':gap_dict[gtf_exons[exon][2]],
'end':gap_dict[gtf_exons[exon][3]],
'strand':gtf_exons[exon][4]
}
print yaml.dump(new_exon_ann)
if __name__ == '__main__':
cli()
|
DeskGen/dgcli
|
library_design/dgl/genome_remap3.py
|
Python
|
gpl-2.0
| 24,336
|
[
"BWA",
"Bowtie"
] |
024affc3702da5d631c840a58ea0d8d50c17b24eeaf3335b78770ae45dcbcf46
|
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to use the Closure Compiler CLI from Python."""
import logging
import os
import re
import subprocess
import tempfile
# Pulls just the major and minor version numbers from the first line of
# 'java -version'. Versions are in the format of [0-9]+(\.[0-9]+)? See:
# http://openjdk.java.net/jeps/223
_VERSION_REGEX = re.compile(r'"([0-9]+)(?:\.([0-9]+))?')
class JsCompilerError(Exception):
"""Raised if there's an error in calling the compiler."""
pass
def _GetJavaVersionString():
"""Get the version string from the Java VM."""
return subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
def _ParseJavaVersion(version_string):
"""Returns a 2-tuple for the current version of Java installed.
Args:
version_string: String of the Java version (e.g. '1.7.2-ea').
Returns:
The major and minor versions, as a 2-tuple (e.g. (1, 7)).
"""
match = _VERSION_REGEX.search(version_string)
if match:
version = tuple(int(x or 0) for x in match.groups())
assert len(version) == 2
return version
def _JavaSupports32BitMode():
"""Determines whether the JVM supports 32-bit mode on the platform."""
# Suppresses process output to stderr and stdout from showing up in the
# console as we're only trying to determine 32-bit JVM support.
supported = False
try:
devnull = open(os.devnull, 'wb')
return subprocess.call(
['java', '-d32', '-version'], stdout=devnull, stderr=devnull) == 0
except IOError:
pass
else:
devnull.close()
return supported
def _GetJsCompilerArgs(compiler_jar_path, java_version, jvm_flags):
"""Assembles arguments for call to JsCompiler."""
if java_version < (1, 7):
raise JsCompilerError('Closure Compiler requires Java 1.7 or higher. '
'Please visit http://www.java.com/getjava')
args = ['java']
# Add JVM flags we believe will produce the best performance. See
# https://groups.google.com/forum/#!topic/closure-library-discuss/7w_O9-vzlj4
# Attempt 32-bit mode if available (Java 7 on Mac OS X does not support 32-bit
# mode, for example).
if _JavaSupports32BitMode():
args += ['-d32']
# Prefer the "client" VM.
args += ['-client']
# Add JVM flags, if any
if jvm_flags:
args += jvm_flags
# Add the application JAR.
args += ['-jar', compiler_jar_path]
return args
def _GetFlagFile(source_paths, compiler_flags):
"""Writes given source paths and compiler flags to a --flagfile.
The given source_paths will be written as '--js' flags and the compiler_flags
are written as-is.
Args:
source_paths: List of string js source paths.
compiler_flags: List of string compiler flags.
Returns:
The file to which the flags were written.
"""
args = []
for path in source_paths:
args += ['--js', path]
# Add compiler flags, if any.
if compiler_flags:
args += compiler_flags
flags_file = tempfile.NamedTemporaryFile(delete=False)
flags_file.write(' '.join(args))
flags_file.close()
return flags_file
def Compile(compiler_jar_path,
source_paths,
jvm_flags=None,
compiler_flags=None):
"""Prepares command-line call to Closure Compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
jvm_flags: A list of additional flags to pass on to JVM.
compiler_flags: A list of additional flags to pass on to Closure Compiler.
Returns:
The compiled source, as a string, or None if compilation failed.
"""
java_version = _ParseJavaVersion(str(_GetJavaVersionString()))
args = _GetJsCompilerArgs(compiler_jar_path, java_version, jvm_flags)
# Write source path arguments to flag file for avoiding "The filename or
# extension is too long" error in big projects. See
# https://github.com/google/closure-library/pull/678
flags_file = _GetFlagFile(source_paths, compiler_flags)
args += ['--flagfile', flags_file.name]
logging.info('Compiling with the following command: %s', ' '.join(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError:
raise JsCompilerError('JavaScript compilation failed.')
finally:
os.remove(flags_file.name)
|
isabela-angelo/scratch-tangible-blocks
|
scratch-blocks/node_modules/google-closure-library/closure/bin/build/jscompiler.py
|
Python
|
bsd-3-clause
| 4,870
|
[
"VisIt"
] |
7f0266e4755723336d6d12191dfba34bdfca5f2f123a4249de781ebfe21e0447
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska and Motjaba Sadegh
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import numpy as np
import random
import time
class dream(_algorithm):
"""
Implements the DiffeRential Evolution Adaptive Metropolis (DREAM) algorithhm
based on:
Vrugt, J. A. (2016) Markov chain Monte Carlo simulation using the DREAM software package.
"""
def __init__(self, *args, **kwargs):
"""
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
* True: Simulation results will be saved
* False: Simulation results will not be saved
"""
kwargs['optimization_direction'] = 'maximize'
kwargs['algorithm_name'] = 'DiffeRential Evolution Adaptive Metropolis (DREAM) algorithm'
super(dream, self).__init__(*args, **kwargs)
def check_par_validity_bound(self, par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i]
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print('ERROR: Bounds have not the same lenghts as Parameterarray')
return par
def get_regular_startingpoint(self,nChains):
randompar=self.parameter()['random']
for i in range(1000):
randompar=np.column_stack((randompar,self.parameter()['random']))
startpoints = []
for j in range(nChains):
startpoints.append(np.percentile(randompar,(j+1)/float(nChains+1)*100,axis=1))#,np.amax(randompar,axis=1)
startpoints = np.array(startpoints)
for k in range(len(randompar)):
random.shuffle(startpoints[:, k])
return startpoints
def check_par_validity_reflect(self, par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i] + (self.min_bound[i]- par[i])
elif par[i] > self.max_bound[i]:
par[i] = self.max_bound[i] - (par[i] - self.max_bound[i])
# Postprocessing if reflecting jumped out of bounds
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i]
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print('ERROR: Bounds have not the same lenghts as Parameterarray')
return par
def _get_gamma(self,N):
#N = Number of parameters
p = np.random.uniform(low=0,high=1)
if p >=0.2:
gamma = 2.38/np.sqrt(2*int(N))#/self.gammalevel
else:
gamma = 1
return gamma
def get_other_random_chains(self,cur_chain):
valid=False
while valid == False:
random_chain1 = np.random.randint(0,self.nChains)
random_chain2 = np.random.randint(0,self.nChains)
if random_chain1!=cur_chain and random_chain2!=cur_chain and random_chain1!=random_chain2:
valid=True
return random_chain1, random_chain2
def get_new_proposal_vector(self,cur_chain,newN,nrN):
gamma = self._get_gamma(nrN)
random_chain1,random_chain2 = self.get_other_random_chains(cur_chain)
new_parameterset=[]
#position = self.chain_samples-1#self.nChains*self.chain_samples+self.chain_samples+cur_chain-1
cur_par_set = list(self.bestpar[cur_chain][self.nChainruns[cur_chain]-1])
random_par_set1 = list(self.bestpar[random_chain1][self.nChainruns[random_chain1]-1])
random_par_set2 = list(self.bestpar[random_chain2][self.nChainruns[random_chain2]-1])
for i in range(self.N):#Go through parameters
if newN[i] == True:
new_parameterset.append(cur_par_set[i] + gamma*np.array(random_par_set1[i]-random_par_set2[i]) + np.random.normal(0,self.eps))
else:
new_parameterset.append(cur_par_set[i])
new_parameter=self.check_par_validity_reflect(new_parameterset)
#new_parameter=self.check_par_validity_bound(new_parameterset)
return new_parameter
# new_par = np.random.normal(loc=old_par, scale=self.stepsizes)
# new_par = self.check_par_validity_reflect(new_par)
# return new_par
def update_mcmc_status(self,par,like,sim,cur_chain):
self.bestpar[cur_chain][self.nChainruns[cur_chain]]=list(par)
self.bestlike[cur_chain]=like
self.bestsim[cur_chain]=list(sim)
def get_r_hat(self, parameter_array):
"""
Based on some fancy mathlab code, it return an array [R_stat, MR_stat]
:param parameter_array: 3 dim array of parameter estimation sets
:type parameter_array: list
:return: [R_stat, MR_stat]
:rtype: list
"""
n, d, N = parameter_array.shape
# Use only the last 50% of each chain (vrugt 2009), that means only the half of "d". Cause "d" ist the count
# of the repetition and we use the d/2 to d of those values which are already not NAN
whereIsNoNAN = np.logical_not(np.isnan(parameter_array))
alreadyToNum = np.sum(whereIsNoNAN[0, :, 0])
if alreadyToNum > 3:
parameter_array = parameter_array[:, int(np.floor(alreadyToNum / 2)): alreadyToNum, :]
else:
# the later functions need some data to work right, so we use in this case 100% of NON NAN values
parameter_array = parameter_array[:, 0: alreadyToNum, :]
# I made a big confusion with d, n and N, I figured it out by tests
if n > 3:
mean_chains = np.zeros((n, N))
for i in range(n):
for j in range(N):
mean_chains[i, j] = np.nanmean(parameter_array[i, :, j])
B_uni = np.zeros(N)
for i in range(N):
B_uni[i] = d * np.nanvar(mean_chains[:, i],
ddof=1) # make numpy Mathalab like: https://stackoverflow.com/a/27600240/5885054
var_chains = np.zeros((n, N))
for i in range(n):
for j in range(N):
var_chains[i, j] = np.nanvar(parameter_array[i, :, j], ddof=1)
W_uni = np.zeros(N)
for i in range(N):
W_uni[i] = np.mean(var_chains[:, i])
sigma2 = ((d - 1) / d) * W_uni + (1 / d) * B_uni
whichW_UNIIsNull = W_uni == 0.0
W_uni[whichW_UNIIsNull] = np.random.uniform(0.1,1,1)
R_stat = np.sqrt((n + 1) / n * (np.divide(sigma2, W_uni)) - (d - 1) / (n * d))
# W_mult = 0
# for ii in range(n):
# W_mult = W_mult + np.cov(np.nan_to_num(np.transpose(parameter_array[ii, :, :])), ddof=1)
#
# W_mult = W_mult / n + 2e-52 * np.eye(N)
#
# # Note that numpy.cov() considers its input data matrix to have observations in each column,
# # and variables in each row, so to get numpy.cov() to return what other packages do,
# # you have to pass the transpose of the data matrix to numpy.cov().
# # https://stats.stackexchange.com/a/263508/168054
#
# B_mult = np.cov(np.nan_to_num(np.transpose(mean_chains))) + 2e-52 * np.eye(N) # 2e-52 avoids problems with eig if var = 0
# M = np.linalg.lstsq(W_mult, B_mult)
# R = np.max(np.abs(np.linalg.eigvals(M[0])))
# MR_stat = np.sqrt((n + 1) / n * R + (d - 1) / d)
return R_stat#[R_stat, MR_stat]
def sample(self, repetitions,nChains=5, nCr=3, eps=10e-6, convergence_limit=1.2, runs_after_convergence=100,acceptance_test_option=6):
self.set_repetiton(repetitions)
print('Starting the DREAM algotrithm with '+str(repetitions)+ ' repetitions...')
if nChains <3:
print('Please use at least n=3 chains!')
return None
# Prepare storing MCMC chain as array of arrays.
# define stepsize of MCMC.
self.repetitions = int(repetitions)
self.nChains = int(nChains)
#Ensure initialisation of chains and database
self.burnIn = self.nChains
self.stepsizes = self.parameter()['step'] # array of stepsizes
self.nr_of_pars = len(self.stepsizes)
self.gammalevel=1
starttime = time.time()
intervaltime = starttime
# Metropolis-Hastings iterations.
self.bestpar=np.array([[[np.nan]*self.nr_of_pars]*self.repetitions]*self.nChains)
#[0]->chain #[0][0]->parameter #[0][0][0]->repetitons
self.bestlike=[[-np.inf]]*self.nChains
self.bestsim=[[np.nan]]*self.nChains
self.accepted=np.zeros(self.nChains)
self.nChainruns=[0]*self.nChains
self.min_bound, self.max_bound = self.parameter(
)['minbound'], self.parameter()['maxbound']
#firstcall = True
print('Initialize ', self.nChains, ' chain(s)...')
self.iter=0
#for i in range(10):
startpoints = self.get_regular_startingpoint(nChains)
#param_generator = ((curChain,list(self.parameter()['random'])) for curChain in range(int(self.nChains))) #TODO: Start with regular interval raster
param_generator = ((curChain,list(startpoints[curChain])) for curChain in range(int(self.nChains))) #TODO: Start with regular interval raster
for curChain,par,sim in self.repeat(param_generator):
like = self.postprocessing(self.iter, par, sim, chains=curChain)
self.update_mcmc_status(par,like,sim,curChain)
self.iter+=1
self.nChainruns[curChain] +=1
print('Beginn of Random Walk')
convergence = False
#Walf through chains
self.r_hats=[]
self.eps = eps
self.CR = []
for i in range(nCr):
self.CR.append((i+1)/nCr)
self.N = len(self.parameter()['random'])
nrN=1
newN = [True]*self.N
while self.iter < self.repetitions:
param_generator = ((curChain,self.get_new_proposal_vector(curChain,newN,nrN)) for curChain in range(int(self.nChains)))
for cChain,par,sim in self.repeat(param_generator):
pCr = np.random.randint(0,nCr)
ids=[]
for i in range(self.N):
ids.append(np.random.uniform(low=0,high=1))
newN = []
nrN = 0
for i in range(len(ids)):
if ids[i] < self.CR[pCr]:
newN.append(True)
nrN+=1
else:
newN.append(False)
if nrN == 0:
ids=[np.random.randint(0,self.N)]
nrN=1
like = self.postprocessing(self.iter, par, sim, chains=cChain)
# set a option which type of comparision should be choose:
metro_opt=acceptance_test_option
if metro_opt == 1:
logMetropHastRatio = like/self.bestlike[cChain]
elif metro_opt == 2 or metro_opt == 4:
logMetropHastRatio = np.exp(like - self.bestlike[cChain])
elif metro_opt == 3:
# SSR probability evaluation
# nrN is defined in this loop so it will increase every step
logMetropHastRatio = (like / self.bestlike[cChain]) ** (-nrN * (1 + self._get_gamma(nrN)) / 2)
elif metro_opt == 5:
# SSR probability evaluation, but now weighted with mesurement error
# Note that measurement error is single number --> homoscedastic; variance can be taken out of sum sign
# SIGMA will be calculated from the orginal data
Sigma = np.mean(np.array(self.evaluation)*0.1)
logMetropHastRatio = np.exp(-0.5 * (-like + self.bestlike[cChain])/ (Sigma ** 2)) # signs are different because we write -SSR
elif metro_opt == 6: # SSR probability evaluation, but now weighted with mesurement error
# Note that measurement error is a vector --> heteroscedastic; variance within sum sign -- see CompDensity.m
logMetropHastRatio = np.exp(-0.5 * (-like + self.bestlike[cChain])) # signs are different because we write -SSR
u = np.random.uniform(low=0.0, high=1)
if logMetropHastRatio>u:
self.update_mcmc_status(par,like,sim,cChain)
self.accepted[cChain] += 1 # monitor acceptance
else:
self.update_mcmc_status(self.bestpar[cChain][self.nChainruns[cChain]-1],self.bestlike[cChain],self.bestsim[cChain],cChain)
if self.status.stop:
self.iter = self.repetitions
print('Stopping samplig')
break
self.iter+=1
self.nChainruns[cChain] +=1
r_hat = self.get_r_hat(self.bestpar)
self.r_hats.append(r_hat)
# Refresh progressbar every two seconds
acttime = time.time()
if acttime - intervaltime >= 2 and self.iter >=2 and self.nChainruns[-1] >=3:
text = "Acceptance rates [%] =" +str(np.around((self.accepted)/float(((self.iter-self.burnIn)/self.nChains)),decimals=4)*100).strip('array([])')
print(text)
text = "Convergence rates =" +str(np.around((r_hat),decimals=4)).strip('array([])')
print(text)
intervaltime = time.time()
if (np.array(r_hat) < convergence_limit).all() and not convergence and self.nChainruns[-1] >=5:
#Stop sampling
print('#############')
print('Convergence has been achieved after '+str(self.iter)+' of '+str(self.repetitions)+' runs! Finally, '+str(runs_after_convergence)+' runs will be additionally sampled to form the posterior distribution')
print('#############')
self.repetitions = self.iter + runs_after_convergence
self.set_repetiton(self.repetitions)
#self.iter =self.repetitions - runs_after_convergence
convergence=True
self.final_call()
#try:
# self.datawriter.finalize()
#except AttributeError: # Happens if no database was assigned
# pass
#print('End of sampling')
#text = '%i of %i (best like=%g)' % (
# self.status.rep, repetitions, self.status.objectivefunction)
#print(text)
#print('Best parameter set')
#print(self.status.params)
#text = 'Duration:' + str(round((acttime - starttime), 2)) + ' s'
#print(text)
return self.r_hats
|
bees4ever/spotpy
|
spotpy/algorithms/dream.py
|
Python
|
mit
| 16,847
|
[
"Gaussian"
] |
7a351bfc001547e477496f846ebe70a0deca3f8d102b80b702f6ad4799d14574
|
# LIBTBX_SET_DISPATCHER_NAME dev.dials.ssx_index
"""
This program runs indexing on the spotfinding results from a
still sequence i.e. SSX data. This wraps a call to the regular
indexing code, and so all parameters from dials.index can be set.
If a unit cell is given, indexing of each image will be attempted with the
fft1d algorithm, followed by the real_space_grid_search algorithm if indexing
with the fft1d algorithm was unsuccessful. If no unit cell is given, only fft1d
indexing can be attempted.
Indexing statistics are reported in a table and a unit cell clustering analysis
is performed, which can be useful for assessing the crystal symmetry if the unit
cell is unknown. An extensive html report is generated showing the indexing
and clustering statistics. The indexed data are saved into a single reflection
file and a single experiment list file, with a joint detector and beam model.
Further program documentation can be found at dials.github.io/ssx_processing_guide.html
Usage:
dev.dials.ssx_index imported.expt strong.refl
dev.dials.ssx_index imported.expt strong.refl unit_cell=x space_group=y
"""
from __future__ import annotations
import json
import logging
import sys
import time
from cctbx import crystal
from libtbx import Auto, phil
from libtbx.introspection import number_of_processors
from dials.algorithms.indexing.ssx.analysis import (
generate_html_report,
generate_plots,
make_summary_table,
report_on_crystal_clusters,
)
from dials.algorithms.indexing.ssx.processing import index
from dials.util import log, show_mail_handle_errors
from dials.util.options import ArgumentParser, reflections_and_experiments_from_files
from dials.util.version import dials_version
try:
from typing import List
except ImportError:
pass
logger = logging.getLogger("dials")
program_defaults_phil_str = """
indexing {
method = fft1d
stills {
indexer = stills
}
}
output.log = dials.ssx_index.log
refinement {
parameterisation {
auto_reduction {
min_nref_per_parameter = 1
action = fix
}
beam.fix = all
detector.fix = all
scan_varying = False
}
reflections {
weighting_strategy.override = stills
outlier.algorithm = null
}
}
"""
phil_scope = phil.parse(
"""
method = *fft1d *real_space_grid_search
.type = choice(multi=True)
nproc = Auto
.type = int
.expert_level = 1
.help = "Set the number of processors to use in indexing"
output.html = dials.ssx_index.html
.type = str
output.json = None
.type = str
include scope dials.command_line.index.phil_scope
""",
process_includes=True,
).fetch(phil.parse(program_defaults_phil_str))
phil_scope.adopt_scope(
phil.parse(
"""
individual_log_verbosity = 1
.type =int
"""
)
)
@show_mail_handle_errors()
def run(args: List[str] = None, phil: phil.scope = phil_scope) -> None:
"""
Run dev.dials.ssx_index as from the command line.
This program takes an imported experiment list and a reflection table
of strong spots and performs parallelised indexing for synchrotron
serial crystallography experiments. This is done by calling the regular
dials indexing code and capturing output to provide a html report, and
outputs a multi-image indexed.expt and indexed.refl file containing the
indexed data.
"""
parser = ArgumentParser(
usage="dev.dials.ssx_index imported.expt strong.refl [options]",
read_experiments=True,
read_reflections=True,
phil=phil_scope,
check_format=False,
epilog=__doc__,
)
params, options = parser.parse_args(args=args, show_diff_phil=False)
if not params.input.experiments or not params.input.reflections:
parser.print_help()
sys.exit()
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
log.config(verbosity=options.verbose, logfile=params.output.log)
params.individual_log_verbosity = options.verbose
logger.info(dials_version())
diff_phil = parser.diff_phil.as_str()
if diff_phil:
logger.info("The following parameters have been modified:\n%s", diff_phil)
if params.nproc is Auto:
params.nproc = number_of_processors(return_value_if_unknown=1)
if params.nproc > 1:
params.indexing.nproc = params.nproc
logger.info(f"Using {params.indexing.nproc} processes for indexing")
st = time.time()
indexed_experiments, indexed_reflections, summary_data = index(
experiments, reflections[0], params
)
summary_table = make_summary_table(summary_data)
logger.info("\nSummary of images sucessfully indexed\n" + summary_table)
n_images = len({e.imageset.get_path(0) for e in indexed_experiments})
logger.info(f"{indexed_reflections.size()} spots indexed on {n_images} images\n")
crystal_symmetries = [
crystal.symmetry(
unit_cell=expt.crystal.get_unit_cell(),
space_group=expt.crystal.get_space_group(),
)
for expt in indexed_experiments
]
cluster_plots, _ = report_on_crystal_clusters(
crystal_symmetries,
make_plots=(params.output.html or params.output.json),
)
logger.info(f"Saving indexed experiments to {params.output.experiments}")
indexed_experiments.as_file(params.output.experiments)
logger.info(f"Saving indexed reflections to {params.output.reflections}")
indexed_reflections.as_file(params.output.reflections)
if params.output.html or params.output.json:
summary_plots = generate_plots(summary_data)
if cluster_plots:
summary_plots.update(cluster_plots)
if params.output.html:
generate_html_report(summary_plots, params.output.html)
if params.output.json:
with open(params.output.json, "w") as outfile:
json.dump(summary_plots, outfile)
logger.info(f"Total time: {time.time() - st:.2f}s")
logger.info(
"Further program documentation can be found at dials.github.io/ssx_processing_guide.html"
)
if __name__ == "__main__":
run()
|
dials/dials
|
command_line/ssx_index.py
|
Python
|
bsd-3-clause
| 6,171
|
[
"CRYSTAL"
] |
2b32dd22c152d69e951f0ec6713e1bde0aba3e0c5b381151f66d1afed65dac95
|
# -*- coding: utf-8 -*-
"""Vibrational modes."""
import pickle
from math import sin, pi, sqrt
from os import remove
from os.path import isfile
import sys
import numpy as np
import ase.units as units
from ase.io.trajectory import PickleTrajectory
from ase.parallel import rank, barrier, paropen
class Vibrations:
"""Class for calculating vibrational modes using finite difference.
The vibrational modes are calculated from a finite difference approximation
of the Hessian matrix.
The *summary()*, *get_energies()* and *get_frequencies()* methods all take
an optional *method* keyword. Use method='Frederiksen' to use the method
described in:
T. Frederiksen, M. Paulsson, M. Brandbyge, A. P. Jauho:
"Inelastic transport theory from first-principles: methodology and
applications for nanoscale devices",
Phys. Rev. B 75, 205413 (2007)
atoms: Atoms object
The atoms to work on.
indices: list of int
List of indices of atoms to vibrate. Default behavior is
to vibrate all atoms.
name: str
Name to use for files.
delta: float
Magnitude of displacements.
nfree: int
Number of displacements per atom and cartesian coordinate, 2 and 4 are
supported. Default is 2 which will displace each atom +delta and -delta
for each cartesian coordinate.
Example:
>>> from ase import Atoms
>>> from ase.calculators.emt import EMT
>>> from ase.optimizers import BFGS
>>> from ase.vibrations import Vibrations
>>> n2 = Atoms('N2', [(0, 0, 0), (0, 0, 1.1)],
... calculator=EMT())
>>> BFGS(n2).run(fmax=0.01)
BFGS: 0 19:16:06 0.042171 2.9357
BFGS: 1 19:16:07 0.104197 3.9270
BFGS: 2 19:16:07 0.000963 0.4142
BFGS: 3 19:16:07 0.000027 0.0698
BFGS: 4 19:16:07 0.000000 0.0010
>>> vib = Vibrations(n2)
>>> vib.run()
>>> vib.summary()
---------------------
# meV cm^-1
---------------------
0 0.0i 0.0i
1 0.0i 0.0i
2 0.0i 0.0i
3 1.6 13.1
4 1.6 13.1
5 232.7 1877.2
---------------------
Zero-point energy: 0.118 eV
Thermodynamic properties at 298.00 K
Enthalpy: 0.050 eV
Entropy : 0.648 meV/K
T*S : 0.193 eV
E->G : -0.025 eV
>>> vib.write_mode(-1) # write last mode to trajectory file
"""
def __init__(self, atoms, indices=None, name='vib', delta=0.01, nfree=2):
assert nfree in [2, 4]
self.atoms = atoms
if indices is None:
indices = range(len(atoms))
self.indices = np.asarray(indices)
self.name = name
self.delta = delta
self.nfree = nfree
self.H = None
self.ir = None
def run(self):
"""Run the vibration calculations.
This will calculate the forces for 6 displacements per atom ±x, ±y, ±z.
Only those calculations that are not already done will be started. Be
aware that an interrupted calculation may produce an empty file (ending
with .pckl), which must be deleted before restarting the job. Otherwise
the forces will not be calculated for that displacement.
Note that the calculations for the different displacements can be done
simultaneously by several independent processes. This feature relies on
the existence of files and the subsequent creation of the file in case
it is not found.
"""
filename = self.name + '.eq.pckl'
if not isfile(filename):
barrier()
if rank == 0:
fd = open(filename, 'w')
forces = self.atoms.get_forces()
if self.ir:
dipole = self.calc.get_dipole_moment(self.atoms)
if rank == 0:
if self.ir:
pickle.dump([forces, dipole], fd)
sys.stdout.write(
'Writing %s, dipole moment = (%.6f %.6f %.6f)\n' %
(filename, dipole[0], dipole[1], dipole[2]))
else:
pickle.dump(forces, fd)
sys.stdout.write('Writing %s\n' % filename)
fd.close()
sys.stdout.flush()
p = self.atoms.positions.copy()
for a in self.indices:
for i in range(3):
for sign in [-1, 1]:
for ndis in range(1, self.nfree//2+1):
filename = ('%s.%d%s%s.pckl' %
(self.name, a, 'xyz'[i], ndis*' +-'[sign]))
if isfile(filename):
continue
barrier()
if rank == 0:
fd = open(filename, 'w')
self.atoms.positions[a, i] = (p[a, i] +
ndis * sign * self.delta)
forces = self.atoms.get_forces()
if self.ir:
dipole = self.calc.get_dipole_moment(self.atoms)
if rank == 0:
if self.ir:
pickle.dump([forces, dipole], fd)
sys.stdout.write(
'Writing %s, ' % filename +
'dipole moment = (%.6f %.6f %.6f)\n' %
(dipole[0], dipole[1], dipole[2]))
else:
pickle.dump(forces, fd)
sys.stdout.write('Writing %s\n' % filename)
fd.close()
sys.stdout.flush()
self.atoms.positions[a, i] = p[a, i]
self.atoms.set_positions(p)
def clean(self):
if isfile(self.name + '.eq.pckl'):
remove(self.name + '.eq.pckl')
for a in self.indices:
for i in 'xyz':
for sign in '-+':
for ndis in range(1, self.nfree/2+1):
name = '%s.%d%s%s.pckl' % (self.name, a, i, ndis*sign)
if isfile(name):
remove(name)
def read(self, method='standard', direction='central'):
self.method = method.lower()
self.direction = direction.lower()
assert self.method in ['standard', 'frederiksen']
assert self.direction in ['central', 'forward', 'backward']
n = 3 * len(self.indices)
H = np.empty((n, n))
r = 0
if direction != 'central':
feq = pickle.load(open(self.name + '.eq.pckl'))
for a in self.indices:
for i in 'xyz':
name = '%s.%d%s' % (self.name, a, i)
fminus = pickle.load(open(name + '-.pckl'))
fplus = pickle.load(open(name + '+.pckl'))
if self.method == 'frederiksen':
fminus[a] -= fminus.sum(0)
fplus[a] -= fplus.sum(0)
if self.nfree == 4:
fminusminus = pickle.load(open(name + '--.pckl'))
fplusplus = pickle.load(open(name + '++.pckl'))
if self.method == 'frederiksen':
fminusminus[a] -= fminusminus.sum(0)
fplusplus[a] -= fplusplus.sum(0)
if self.direction == 'central':
if self.nfree == 2:
H[r] = .5 * (fminus - fplus)[self.indices].ravel()
else:
H[r] = H[r] = (-fminusminus +
8 * fminus -
8 * fplus +
fplusplus)[self.indices].ravel() / 12.0
elif self.direction == 'forward':
H[r] = (feq - fplus)[self.indices].ravel()
else: # self.direction == 'backward':
H[r] = (fminus - feq)[self.indices].ravel()
H[r] /= 2 * self.delta
r += 1
H += H.copy().T
self.H = H
m = self.atoms.get_masses()
if 0 in [m[index] for index in self.indices]:
raise RuntimeError('Zero mass encountered in one or more of '
'the vibrated atoms. Use Atoms.set_masses()'
' to set all masses to non-zero values.')
self.im = np.repeat(m[self.indices]**-0.5, 3)
omega2, modes = np.linalg.eigh(self.im[:, None] * H * self.im)
self.modes = modes.T.copy()
# Conversion factor:
s = units._hbar * 1e10 / sqrt(units._e * units._amu)
self.hnu = s * omega2.astype(complex)**0.5
def get_energies(self, method='standard', direction='central'):
"""Get vibration energies in eV."""
if (self.H is None or method.lower() != self.method or
direction.lower() != self.direction):
self.read(method, direction)
return self.hnu
def get_frequencies(self, method='standard', direction='central'):
"""Get vibration frequencies in cm^-1."""
s = 0.01 * units._e / units._c / units._hplanck
return s * self.get_energies(method, direction)
def summary(self, method='standard', direction='central', T=298.,
threshold=10, freq=None, log=sys.stdout):
"""Print a summary of the vibrational frequencies.
Parameters:
method : string
Can be 'standard'(default) or 'Frederiksen'.
direction: string
Direction for finite differences. Can be one of 'central'
(default), 'forward', 'backward'.
freq : numpy array
Optional. Can be used to create a summary on a set of known
frequencies.
log : if specified, write output to a different location than
stdout. Can be an object with a write() method or the name of a
file to create.
"""
if isinstance(log, str):
log = paropen(log, 'a')
write = log.write
s = 0.01 * units._e / units._c / units._hplanck
if freq != None:
hnu = freq / s
else:
hnu = self.get_energies(method, direction)
write('---------------------\n')
write(' # meV cm^-1\n')
write('---------------------\n')
for n, e in enumerate(hnu):
if e.imag != 0:
c = 'i'
e = e.imag
else:
c = ' '
e = e.real
write('%3d %6.1f%s %7.1f%s\n' % (n, 1000 * e, c, s * e, c))
write('---------------------\n')
write('Zero-point energy: %.3f eV\n' %
self.get_zero_point_energy(freq=freq))
def get_zero_point_energy(self, freq=None):
if freq is None:
return 0.5 * self.hnu.real.sum()
else:
s = 0.01 * units._e / units._c / units._hplanck
return 0.5 * freq.real.sum() / s
def get_mode(self, n):
mode = np.zeros((len(self.atoms), 3))
mode[self.indices] = (self.modes[n] * self.im).reshape((-1, 3))
return mode
def write_mode(self, n, kT=units.kB * 300, nimages=30):
"""Write mode to trajectory file."""
mode = self.get_mode(n) * sqrt(kT / abs(self.hnu[n]))
p = self.atoms.positions.copy()
n %= 3 * len(self.indices)
traj = PickleTrajectory('%s.%d.traj' % (self.name, n), 'w')
calc = self.atoms.get_calculator()
self.atoms.set_calculator()
for x in np.linspace(0, 2 * pi, nimages, endpoint=False):
self.atoms.set_positions(p + sin(x) * mode)
traj.write(self.atoms)
self.atoms.set_positions(p)
self.atoms.set_calculator(calc)
traj.close()
def write_jmol(self):
"""Writes file for viewing of the modes with jmol."""
fd = open(self.name + '.xyz', 'w')
symbols = self.atoms.get_chemical_symbols()
f = self.get_frequencies()
for n in range(3 * len(self.indices)):
fd.write('%6d\n' % len(self.atoms))
if f[n].imag != 0:
c = 'i'
f[n] = f[n].imag
else:
c = ' '
fd.write('Mode #%d, f = %.1f%s cm^-1' % (n, f[n], c))
if self.ir:
fd.write(', I = %.4f (D/Å)^2 amu^-1.\n' % self.intensities[n])
else:
fd.write('.\n')
mode = self.get_mode(n)
for i, pos in enumerate(self.atoms.positions):
fd.write('%2s %12.5f %12.5f %12.5f %12.5f %12.5f %12.5f \n' %
(symbols[i], pos[0], pos[1], pos[2],
mode[i,0], mode[i,1], mode[i,2]))
fd.close()
|
slabanja/ase
|
ase/vibrations.py
|
Python
|
gpl-2.0
| 13,163
|
[
"ASE",
"Jmol"
] |
8e3dae8f0c4aa66f07f33741e2e2b7a2831364558a63b86b6fcc97c9a4aef260
|
# Generated by Django 3.1.8 on 2021-06-10 17:39
import TWLight.resources.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resources", "0076_auto_20210607_1312"),
]
operations = [
migrations.AlterField(
model_name="language",
name="language",
field=models.CharField(
choices=[
("aa", "Qafár af"),
("ab", "Аҧсшәа"),
("abe", "Wôbanakiôdwawôgan"),
("abs", "Bahasa Ambon"),
("ace", "Acèh"),
("acf", "kwéyòl"),
("ady", "Адыгабзэ"),
("ady-cyrl", "ady"),
("ady-latn", "Adygabze"),
("aeb", "aeb-arab"),
("aeb-arab", "تونسي"),
("aeb-latn", "Tûnsî"),
("af", "Afrikaans"),
("ahr", "अहिराणी"),
("ak", "Akan"),
("akz", "Albaamo innaaɬiilka"),
("aln", "Gegë"),
("alt", "алтай тил"),
("am", "አማርኛ"),
("ami", "Pangcah"),
("an", "aragonés"),
("ang", "Ænglisc"),
("ann", "Obolo"),
("anp", "अङ्गिका"),
("ar", "العربية"),
("arc", "ܐܪܡܝܐ"),
("arn", "mapudungun"),
("aro", "Araona"),
("arq", "جازايرية"),
("ary", "الدارجة"),
("ary-arab", "ary"),
("ary-latn", "ed-dārija"),
("arz", "مصرى"),
("as", "অসমীয়া"),
("ase", "American sign language"),
("ast", "asturianu"),
("atj", "atikamekw"),
("atv", "тÿндÿк алтай тил"),
("av", "авар"),
("avk", "Kotava"),
("awa", "अवधी"),
("ay", "Aymar aru"),
("az", "az-latn"),
("az-arab", "تۆرکجه"),
("az-latn", "azərbaycanca"),
("az-cyrl", "азәрбајҹанҹа"),
("azb", "az-arab"),
("azj", "az-latn"),
("ba", "башҡортса"),
("ban", "Bali"),
("ban-bali", "ᬩᬮᬶ"),
("bar", "Boarisch"),
("bas", "ɓasaá"),
("bat-smg", "sgs"),
("bbc-latn", "Batak Toba"),
("bbc-batk", "ᯅᯖᯂ᯲ ᯖᯬᯅ"),
("bbc", "bbc-latn"),
("bcc", "جهلسری بلوچی"),
("bci", "wawle"),
("bcl", "Bikol Central"),
("bdr", "Bajau Sama"),
("be-tarask", "беларуская (тарашкевіца)"),
("be-x-old", "be-tarask"),
("be", "беларуская"),
("bew", "Bahasa Betawi"),
("bfa", "Bari"),
("bft", "بلتی"),
("bfq", "படகா"),
("bg", "български"),
("bgn", "روچ کپتین بلوچی"),
("bh", "bho"),
("bho", "भोजपुरी"),
("bi", "Bislama"),
("bjn", "Banjar"),
("bkm", "Itaŋikom"),
("blc", "ItNuxalkmc"),
("bm", "bamanankan"),
("bn", "বাংলা"),
("bnn", "Bunun"),
("bo", "བོད་ཡིག"),
("bpy", "বিষ্ণুপ্রিয়া মণিপুরী"),
("bqi", "بختیاری"),
("br", "brezhoneg"),
("brh", "Bráhuí"),
("brx", "बर'"),
("bs", "bosanski"),
("btm", "Mandailing"),
("bto", "Iriga Bicolano"),
("bug", "ᨅᨔ ᨕᨘᨁᨗ"),
("bxr", "буряад"),
("byn", "ብሊን"),
("bzj", "Bileez Kriol"),
("ca", "català"),
("cak", "Kaqchikel"),
("cbk", "Chavacano de Zamboanga"),
("cbk-zam", "cbk"),
("ccp", "𑄌𑄋𑄴𑄟𑄳𑄦"),
("cdo", "Mìng-dĕ̤ng-ngṳ̄"),
("cdo-latn", "Mìng-dĕ̤ng-ngṳ̄ Bàng-uâ-cê"),
("cdo-hani", "閩東語(漢字)"),
("ce", "нохчийн"),
("ceb", "Cebuano"),
("ch", "Chamoru"),
("chm", "mhr"),
("chn", "chinuk wawa"),
("cho", "Choctaw"),
("chr", "ᏣᎳᎩ"),
("chy", "Tsetsêhestâhese"),
("ciw", "Anishinaabemowin"),
("cjy", "cjy-hant"),
("cjy-hans", "晋语(简化字)"),
("cjy-hant", "晉語"),
("ckb", "کوردی"),
("ckt", "ԓыгъоравэтԓьэн"),
("cnh", "Lai holh"),
("cnr", "cnr-latn"),
("cnr-cyrl", "црногорски"),
("cnr-latn", "crnogorski"),
("co", "corsu"),
("cop", "ϯⲙⲉⲧⲣⲉⲙⲛ̀ⲭⲏⲙⲓ"),
("cps", "Capiceño"),
("cr", "ᓀᐦᐃᔭᐍᐏᐣ"),
("cr-cans", "cr"),
("cr-latn", "Nēhiyawēwin"),
("crh", "qırımtatarca"),
("crh-cyrl", "къырымтатарджа"),
("crh-latn", "crh"),
("cs", "čeština"),
("csb", "kaszëbsczi"),
("cu", "словѣньскъ / ⰔⰎⰑⰂⰡⰐⰠⰔⰍⰟ"),
("cv", "Чӑвашла"),
("cy", "Cymraeg"),
("da", "dansk"),
("dag", "dagbanli"),
("dar", "дарган"),
("de-at", "Österreichisches Deutsch"),
("de-ch", "Schweizer Hochdeutsch"),
("de-formal", "Deutsch (Sie-Form)"),
("de", "Deutsch"),
("din", "Thuɔŋjäŋ"),
("diq", "Zazaki"),
("doi", "डोगरी"),
("dsb", "dolnoserbski"),
("dtp", "Dusun Bundu-liwan"),
("dty", "डोटेली"),
("dv", "ދިވެހިބަސް"),
("dz", "ཇོང་ཁ"),
("ee", "eʋegbe"),
("egl", "Emiliàn"),
("el", "Ελληνικά"),
("elm", "Eleme"),
("eml", "emiliàn e rumagnòl"),
("en-ca", "Canadian English"),
("en-gb", "British English"),
("en-simple", "Simple English"),
("en", "English"),
("eo", "Esperanto"),
("es-419", "español de América Latina"),
("es-formal", "español (formal)"),
("es", "español"),
("es-ni", "español nicaragüense"),
("esu", "Yup'ik"),
("et", "eesti"),
("eu", "euskara"),
("ext", "estremeñu"),
("eya", "I·ya·q"),
("fa", "فارسی"),
("fan", "Faŋ"),
("fax", "Fala"),
("ff", "Fulfulde"),
("fi", "suomi"),
("fil", "tl"),
("fit", "meänkieli"),
("fiu-vro", "vro"),
("fj", "Na Vosa Vakaviti"),
("fkv", "kvääni"),
("fo", "føroyskt"),
("fon", "fɔ̀ngbè"),
("fr", "français"),
("frc", "français cadien"),
("frp", "arpetan"),
("frr", "Nordfriisk"),
("fuf", "Fuuta Jalon"),
("fur", "furlan"),
("fy", "Frysk"),
("ga", "Gaeilge"),
("gaa", "Ga"),
("gag", "Gagauz"),
("gah", "Alekano"),
("gan-hans", "赣语(简体)"),
("gan-hant", "gan"),
("gan", "贛語"),
("gbm", "गढ़वळि"),
("gbz", "Dari-e Mazdeyasnā"),
("gcf", "Guadeloupean Creole French"),
("gcr", "kriyòl gwiyannen"),
("gd", "Gàidhlig"),
("gez", "ግዕዝ"),
("gl", "galego"),
("gld", "на̄ни"),
("glk", "گیلکی"),
("gn", "Avañe'ẽ"),
("gom", "gom-deva"),
("gom-deva", "गोंयची कोंकणी"),
("gom-latn", "Gõychi Konknni"),
("gor", "Bahasa Hulontalo"),
("got", "𐌲𐌿𐍄𐌹𐍃𐌺"),
("grc", "Ἀρχαία ἑλληνικὴ"),
("gsw", "Alemannisch"),
("gu", "ગુજરાતી"),
("guc", "wayuunaiki"),
("gum", "Namtrik"),
("gur", "Gurenɛ"),
("guw", "gungbe"),
("gv", "Gaelg"),
("ha", "Hausa"),
("ha-arab", "هَوُسَ"),
("ha-latn", "ha"),
("hai", "X̱aat Kíl"),
("hak", "Hak-kâ-fa"),
("haw", "Hawai`i"),
("he", "עברית"),
("hak-hans", "客家语(简体)"),
("hak-hant", "客家語(繁體)"),
("hi", "हिन्दी"),
("hif", "Fiji Hindi"),
("hif-deva", "फ़ीजी हिन्दी"),
("hif-latn", "hif"),
("hil", "Ilonggo"),
("hne", "छत्तीसगढ़ी"),
("ho", "Hiri Motu"),
("hoc", "𑢹𑣉𑣉"),
("hr", "hrvatski"),
("hrx", "Hunsrik"),
("hsb", "hornjoserbsce"),
("hsn", "湘语"),
("ht", "Kreyòl ayisyen"),
("hu-formal", "Magyar (magázó)"),
("hu", "magyar"),
("hy", "հայերեն"),
("hyw", "Արեւմտահայերէն"),
("hz", "Otsiherero"),
("ia", "interlingua"),
("id", "Bahasa Indonesia"),
("ie", "Interlingue"),
("ig", "Igbo"),
("ii", "ꆇꉙ"),
("ik", "Iñupiak"),
("ike-cans", "ᐃᓄᒃᑎᑐᑦ"),
("ike-latn", "inuktitut"),
("ilo", "Ilokano"),
("inh", "ГӀалгӀай"),
("io", "Ido"),
("is", "íslenska"),
("it", "italiano"),
("iu", "ike-cans"),
("izh", "ižoran keel"),
("ja", "日本語"),
("jam", "Patois"),
("jbo", "lojban"),
("jdt", "jdt-cyrl"),
("jdt-cyrl", "жугьури"),
("jje", "제주말"),
("jut", "jysk"),
("jv", "Jawa"),
("jv-java", "ꦗꦮ"),
("ka", "ქართული"),
("kaa", "Qaraqalpaqsha"),
("kab", "Taqbaylit"),
("kac", "Jinghpaw"),
("kbd-cyrl", "kbd"),
("kbd-latn", "Qabardjajəbza"),
("kbd", "Адыгэбзэ"),
("kbp", "Kabɩyɛ"),
("kcg", "Tyap"),
("kea", "Kabuverdianu"),
("kg", "Kongo"),
("kgp", "Kaingáng"),
("khw", "کھوار"),
("ki", "Gĩkũyũ"),
("kiu", "Kırmancki"),
("kj", "Kwanyama"),
("kjh", "хакас"),
("kjp", "ဖၠုံလိက်"),
("kk", "kk-cyrl"),
("kk-arab", "قازاقشا (تٶتە)"),
("kk-cn", "kk-arab"),
("kk-cyrl", "қазақша"),
("kk-kz", "kk-cyrl"),
("kk-latn", "qazaqşa"),
("kk-tr", "kk-latn"),
("kl", "kalaallisut"),
("km", "ភាសាខ្មែរ"),
("kn", "ಕನ್ನಡ"),
("knn", "महाराष्ट्रीय कोंकणी"),
("ko-kp", "조선말"),
("ko", "한국어"),
("koi", "перем коми"),
("koy", "Denaakkenaageʼ"),
("kr", "Kanuri"),
("krc", "къарачай-малкъар"),
("kri", "Krio"),
("krj", "Kinaray-a"),
("krl", "Karjala"),
("ks-arab", "کٲشُر"),
("ks-deva", "कॉशुर"),
("ks", "ks-arab"),
("ksf", "Bafia"),
("ksh", "Ripoarisch"),
("ksw", "စှီၤ ကညီကျိာ်"),
("ku", "ku-latn"),
("ku-arab", "كوردي"),
("ku-latn", "kurdî"),
("kum", "къумукъ"),
("kv", "коми"),
("kw", "kernowek"),
("ky", "Кыргызча"),
("la", "Latina"),
("lad", "lad-latn"),
("lad-latn", "Ladino"),
("lad-hebr", "לאדינו"),
("lag", "Kilaangi"),
("lb", "Lëtzebuergesch"),
("lbe", "лакку"),
("lez", "лезги"),
("lfn", "Lingua Franca Nova"),
("lg", "Luganda"),
("li", "Limburgs"),
("lij", "Ligure"),
("liv", "Līvõ kēļ"),
("lki", "لەکی"),
("lkt", "Lakȟótiyapi"),
("lld", "Ladin"),
("lmo", "lombard"),
("ln", "lingála"),
("lo", "ລາວ"),
("loz", "Silozi"),
("lt", "lietuvių"),
("lrc", "لۊری شومالی"),
("ltg", "latgaļu"),
("lud", "lüüdi"),
("lus", "Mizo ţawng"),
("lut", "dxʷləšucid"),
("luz", "لئری دوٙمینی"),
("lv", "latviešu"),
("lzh", "文言"),
("lzz", "Lazuri"),
("mad", "Madhurâ"),
("mai", "मैथिली"),
("map-bms", "Basa Banyumasan"),
("mdf", "мокшень"),
("mfe", "Morisyen"),
("mg", "Malagasy"),
("mh", "Ebon"),
("mhr", "олык марий"),
("mi", "Māori"),
("mic", "Mi'kmaq"),
("min", "Minangkabau"),
("miq", "Mískitu"),
("mk", "македонски"),
("ml", "മലയാളം"),
("mn", "монгол"),
("mn-cyrl", "mn"),
("mn-mong", "mvf"),
("mnc", "ᠮᠠᠨᠵᡠ ᡤᡳᠰᡠᠨ"),
("mni", "ꯃꯤꯇꯩ ꯂꯣꯟ"),
("mni-beng", "মেইতেই লোন্"),
("mnw", "ဘာသာ မန်"),
("mo", "молдовеняскэ"),
("moe", "innu-aimun"),
("mr", "मराठी"),
("mrh", "Mara"),
("mrj", "кырык мары"),
("mrv", "Magareva"),
("ms", "Bahasa Melayu"),
("ms-arab", "بهاس ملايو"),
("mt", "Malti"),
("mui", "Musi"),
("mus", "Mvskoke"),
("mvf", "ᠮᠣᠩᠭᠣᠯ"),
("mwl", "Mirandés"),
("mwv", "Behase Mentawei"),
("mww", "mww-latn"),
("mww-latn", "Hmoob Dawb"),
("my", "မြန်မာဘာသာ"),
("myv", "эрзянь"),
("mzn", "مازِرونی"),
("na", "Dorerin Naoero"),
("nah", "Nāhuatl"),
("nan", "Bân-lâm-gú"),
("nan-hani", "閩南語(漢字)"),
("nap", "Napulitano"),
("nb", "norsk (bokmål)"),
("nd", "siNdebele saseNyakatho"),
("nds-nl", "Nedersaksisch"),
("nds", "Plattdüütsch"),
("ne", "नेपाली"),
("new", "नेपाल भाषा"),
("ng", "Oshiwambo"),
("nia", "Li Niha"),
("niu", "ko e vagahau Niuē"),
("njo", "Ao"),
("nl-informal", "Nederlands (informeel)"),
("nl", "Nederlands"),
("nn", "norsk (nynorsk)"),
("no", "norsk"),
("nod", "คำเมือง"),
("nog", "ногайша"),
("nov", "Novial"),
("nqo", "ߒߞߏ"),
("nr", "isiNdebele seSewula"),
("nrm", "Nouormand"),
("nso", "Sesotho sa Leboa"),
("nus", "Thok Naath"),
("nv", "Diné bizaad"),
("ny", "Chi-Chewa"),
("nys", "Nyungar"),
("oc", "occitan"),
("ojb", "Ojibwemowin"),
("oka", "n̓səl̓xcin̓"),
("olo", "livvinkarjala"),
("om", "Oromoo"),
("ood", "ʼOʼodham ha-ñeʼokĭ"),
("or", "ଓଡ଼ିଆ"),
("os", "Ирон"),
("osi", "Using"),
("ota", "لسان عثمانى"),
("ovd", "övdalsk"),
("pa", "pa-guru"),
("pa-guru", "ਪੰਜਾਬੀ"),
("pag", "Pangasinan"),
("pam", "Kapampangan"),
("pap", "Papiamentu"),
("pap-aw", "Papiamento"),
("pbb", "Nasa Yuwe"),
("pcd", "Picard"),
("pdc", "Deitsch"),
("pdt", "Plautdietsch"),
("pfl", "Pälzisch"),
("pi", "पालि"),
("pih", "Norfuk / Pitkern"),
("pis", "Pijin"),
("pjt", "Pitjantjatjara"),
("pko", "Pökoot"),
("pl", "polski"),
("pms", "Piemontèis"),
("pnb", "پنجابی"),
("pnt", "Ποντιακά"),
("pov", "guinensi"),
("ppl", "Nawat"),
("prg", "Prūsiskan"),
("prs", "دری"),
("ps", "پښتو"),
("pt-br", "português do Brasil"),
("pt", "português"),
("pwn", "pinayuanan"),
("qu", "Runa Simi"),
("quc", "K'iche'"),
("qug", "Runa shimi"),
("qwh", "anqash qichwa"),
("rap", "arero rapa nui"),
("rcf", "Kreol Réyoné"),
("rej", "Jang"),
("rgn", "Rumagnôl"),
("rhg", "𐴌𐴟𐴇𐴥𐴝𐴚𐴒𐴙𐴝"),
("rif", "Tarifit"),
("rki", "ရခိုင်"),
("rm", "rumantsch"),
("rmc", "romaňi čhib"),
("rmf", "kaalengo tšimb"),
("rmy", "Romani"),
("rn", "Kirundi"),
("ro", "română"),
("roa-rup", "rup"),
("roa-tara", "tarandíne"),
("rtm", "Faeag Rotuma"),
("ru", "русский"),
("rue", "русиньскый"),
("rup", "armãneashti"),
("ruq", "Влахесте"),
("ruq-cyrl", "ruq"),
("ruq-grek", "Megleno-Romanian (Greek script)"),
("ruq-latn", "Vlăheşte"),
("rut", "мыхаӀбишды"),
("rw", "Kinyarwanda"),
("rwr", "मारवाड़ी"),
("ryu", "ʔucināguci"),
("sa", "संस्कृतम्"),
("sah", "саха тыла"),
("sat", "ᱥᱟᱱᱛᱟᱲᱤ"),
("saz", "ꢱꣃꢬꢵꢯ꣄ꢡ꣄ꢬꢵ"),
("sc", "sardu"),
("scn", "sicilianu"),
("sco", "Scots"),
("sd", "سنڌي"),
("sdc", "Sassaresu"),
("sdh", "کوردی خوارگ"),
("se", "davvisámegiella"),
("ses", "Koyraboro Senni"),
("sei", "Cmique Itom"),
("sg", "Sängö"),
("sgs", "žemaitėška"),
("sh", "srpskohrvatski"),
("shi-latn", "Taclḥit"),
("shi-tfng", "ⵜⴰⵛⵍⵃⵉⵜ"),
("shi", "shi-latn"),
("shn", "လိၵ်ႈတႆး"),
("shy-latn", "tacawit"),
("si", "සිංහල"),
("simple", "en-simple"),
("sjd", "кӣллт са̄мь кӣлл"),
("sje", "bidumsámegiella"),
("sjo", "ᠰᡞᠪᡝ ᡤᡞᠰᡠᠨ"),
("sju", "ubmejesámiengiälla"),
("sk", "slovenčina"),
("sl", "slovenščina"),
("sli", "Schläsch"),
("slr", "Salırça"),
("sly", "Bahasa Selayar"),
("skr-arab", "سرائیکی"),
("skr", "skr-arab"),
("syc", "ܣܘܪܝܝܐ"),
("syl", "ꠍꠤꠟꠐꠤ"),
("syl-beng", "সিলেটি"),
("syl-sylo", "syl"),
("sm", "Gagana Samoa"),
("sma", "åarjelsaemien"),
("smj", "julevsámegiella"),
("smn", "anarâškielâ"),
("sms", "nuõrttsääʹmǩiõll"),
("sn", "chiShona"),
("so", "Soomaaliga"),
("son", "soŋay"),
("sq", "shqip"),
("sr", "sr-cyrl"),
("sr-ec", "sr-cyrl"),
("sr-cyrl", "српски"),
("sr-el", "sr-latn"),
("sr-latn", "srpski"),
("srn", "Sranantongo"),
("ss", "SiSwati"),
("st", "Sesotho"),
("stq", "Seeltersk"),
("sty", "себертатар"),
("su", "Sunda"),
("sv", "svenska"),
("sw", "Kiswahili"),
("swb", "Shikomoro"),
("sxu", "Säggssch"),
("szl", "ślůnski"),
("szy", "Sakizaya"),
("ta", "தமிழ்"),
("tay", "Tayal"),
("tcy", "ತುಳು"),
("te", "తెలుగు"),
("tet", "tetun"),
("tg-cyrl", "тоҷикӣ"),
("tg-latn", "tojikī"),
("tg", "tg-cyrl"),
("th", "ไทย"),
("ti", "ትግርኛ"),
("tig", "ትግረ"),
("tk", "Türkmençe"),
("tkr", "ЦӀаӀхна миз"),
("tl", "Tagalog"),
("tly", "tolışi"),
("tly-cyrl", "толыши"),
("tmr", "ארמית בבלית"),
("tn", "Setswana"),
("to", "lea faka-Tonga"),
("tokipona", "Toki Pona"),
("tpi", "Tok Pisin"),
("tr", "Türkçe"),
("trp", "Kokborok (Tripuri)"),
("tru", "Ṫuroyo"),
("trv", "Sediq Taroko"),
("ts", "Xitsonga"),
("tsd", "Τσακωνικά"),
("tt", "татарча"),
("tt-cyrl", "tt"),
("tt-latn", "tatarça"),
("ttt", "Tati"),
("tum", "chiTumbuka"),
("tw", "Twi"),
("twd", "Tweants"),
("ty", "reo tahiti"),
("tyv", "тыва дыл"),
("tzl", "Talossan"),
("tzm", "ⵜⴰⵎⴰⵣⵉⵖⵜ"),
("udm", "удмурт"),
("ug", "ug-arab"),
("ug-arab", "ئۇيغۇرچە"),
("ug-latn", "uyghurche"),
("ug-cyrl", "уйғурчә"),
("uk", "українська"),
("umu", "Huluníixsuwaakan"),
("ur", "اردو"),
("uz", "oʻzbekcha"),
("ve", "Tshivenda"),
("vai", "ꕙꔤ"),
("vec", "vèneto"),
("vep", "vepsän kel’"),
("vi", "Tiếng Việt"),
("vls", "West-Vlams"),
("vmf", "Mainfränkisch"),
("vo", "Volapük"),
("vot", "Vaďďa"),
("vro", "võro"),
("wa", "walon"),
("war", "Winaray"),
("wls", "Faka'uvea"),
("wo", "Wolof"),
("wuu", "吴语"),
("xal", "хальмг"),
("xh", "isiXhosa"),
("xmf", "მარგალური"),
("xsy", "SaiSiyat"),
("ydd", "Eastern Yiddish"),
("yi", "ייִדיש"),
("yo", "Yorùbá"),
("yrk", "Ненэцяʼ вада"),
("yrl", "ñe'engatú"),
("yua", "Maaya T'aan"),
("yue", "粵語"),
("za", "Vahcuengh"),
("zea", "Zeêuws"),
("zgh", "ⵜⴰⵎⴰⵣⵉⵖⵜ ⵜⴰⵏⴰⵡⴰⵢⵜ"),
("zh", "中文"),
("zh-classical", "lzh"),
("zh-cn", "中文(中国大陆)"),
("zh-hans", "中文(简体)"),
("zh-hant", "中文(繁體)"),
("zh-hk", "中文(香港)"),
("zh-min-nan", "nan"),
("zh-mo", "中文(澳門)"),
("zh-my", "中文(马来西亚)"),
("zh-sg", "中文(新加坡)"),
("zh-tw", "中文(台灣)"),
("zh-yue", "yue"),
("zh-cdo", "cdo"),
("zu", "isiZulu"),
("zun", "Shiwi'ma"),
],
max_length=12,
unique=True,
validators=[TWLight.resources.models.validate_language_code],
),
),
]
|
WikipediaLibrary/TWLight
|
TWLight/resources/migrations/0077_auto_20210610_1739.py
|
Python
|
mit
| 29,851
|
[
"ASE",
"MOE"
] |
1fc1934e5a0a987a1c73eceb08a8d14e36c7da73200f5c1ac7d27387d2a778c1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import email.parser
import logging
import math
import os
import pickle
import rfc822
import sys
import unittest
from contextlib import closing, contextmanager, nested
from gzip import GzipFile
from shutil import rmtree
import gc
import time
from textwrap import dedent
from urllib import quote
from hashlib import md5
from pyeclib.ec_iface import ECDriverError
from tempfile import mkdtemp, NamedTemporaryFile
import weakref
import operator
import functools
from swift.obj import diskfile
import re
import random
from collections import defaultdict
import mock
from eventlet import sleep, spawn, wsgi, listen, Timeout, debug
from eventlet.green import httplib
from six import BytesIO
from six import StringIO
from six.moves import range
from swift.common.utils import hash_path, json, storage_directory, \
parse_content_type, iter_multipart_mime_documents, public
from test.unit import (
connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing,
FakeMemcache, debug_logger, patch_policies, write_fake_ring,
mocked_http_conn)
from swift.proxy import server as proxy_server
from swift.proxy.controllers.obj import ReplicatedObjectController
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server
from swift.common.middleware import proxy_logging, versioned_writes
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \
APIVersionError
from swift.common import utils, constraints
from swift.common.ring import RingData
from swift.common.utils import mkdirs, normalize_timestamp, NullLogger
from swift.common.wsgi import monkey_patch_mimetools, loadapp
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_container_memcache_key, \
get_account_memcache_key, cors_validation
import swift.proxy.controllers
import swift.proxy.controllers.obj
from swift.common.swob import Request, Response, HTTPUnauthorized, \
HTTPException, HeaderKeyDict, HTTPBadRequest
from swift.common import storage_policy
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \
StoragePolicyCollection, POLICIES
from swift.common.request_helpers import get_sys_meta_prefix
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
STATIC_TIME = time.time()
_test_coros = _test_servers = _test_sockets = _orig_container_listing_limit = \
_testdir = _orig_SysLogHandler = _orig_POLICIES = _test_POLICIES = None
def do_setup(the_object_server):
utils.HASH_PATH_SUFFIX = 'endcap'
global _testdir, _test_servers, _test_sockets, \
_orig_container_listing_limit, _test_coros, _orig_SysLogHandler, \
_orig_POLICIES, _test_POLICIES
_orig_POLICIES = storage_policy._POLICIES
_orig_SysLogHandler = utils.SysLogHandler
utils.SysLogHandler = mock.MagicMock()
monkey_patch_mimetools()
# Since we're starting up a lot here, we're going to test more than
# just chunked puts; we're also going to test parts of
# proxy_server.Application we couldn't get to easily otherwise.
_testdir = \
os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
mkdirs(_testdir)
rmtree(_testdir)
for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
'sdf1', 'sdg1', 'sdh1', 'sdi1'):
mkdirs(os.path.join(_testdir, drive, 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
'allow_versions': 't'}
prolis = listen(('localhost', 0))
acc1lis = listen(('localhost', 0))
acc2lis = listen(('localhost', 0))
con1lis = listen(('localhost', 0))
con2lis = listen(('localhost', 0))
obj1lis = listen(('localhost', 0))
obj2lis = listen(('localhost', 0))
obj3lis = listen(('localhost', 0))
objsocks = [obj1lis, obj2lis, obj3lis]
_test_sockets = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
account_devs = [
{'port': acc1lis.getsockname()[1]},
{'port': acc2lis.getsockname()[1]},
]
write_fake_ring(account_ring_path, *account_devs)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
container_devs = [
{'port': con1lis.getsockname()[1]},
{'port': con2lis.getsockname()[1]},
]
write_fake_ring(container_ring_path, *container_devs)
storage_policy._POLICIES = StoragePolicyCollection([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False),
ECStoragePolicy(3, 'ec', ec_type='jerasure_rs_vand',
ec_ndata=2, ec_nparity=1, ec_segment_size=4096)])
obj_rings = {
0: ('sda1', 'sdb1'),
1: ('sdc1', 'sdd1'),
2: ('sde1', 'sdf1'),
# sdg1, sdh1, sdi1 taken by policy 3 (see below)
}
for policy_index, devices in obj_rings.items():
policy = POLICIES[policy_index]
obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
obj_devs = [
{'port': objsock.getsockname()[1], 'device': dev}
for objsock, dev in zip(objsocks, devices)]
write_fake_ring(obj_ring_path, *obj_devs)
# write_fake_ring can't handle a 3-element ring, and the EC policy needs
# at least 3 devs to work with, so we do it manually
devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]},
{'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
'port': obj3lis.getsockname()[1]}]
pol3_replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
obj3_ring_path = os.path.join(_testdir, POLICIES[3].ring_name + '.ring.gz')
part_shift = 30
with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)
prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(),
logger=debug_logger('proxy'))
for policy in POLICIES:
# make sure all the rings are loaded
prosrv.get_object_ring(policy.idx)
# don't lose this one!
_test_POLICIES = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
conf, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
conf, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
conf, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
obj3srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj3'))
_test_servers = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf,
logger=prosrv.logger)
prospa = spawn(wsgi.server, prolis, logging_prosv, nl)
acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl)
acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl)
con1spa = spawn(wsgi.server, con1lis, con1srv, nl)
con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa)
# Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT', '/a',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create another account
# used for account-to-account tests
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a1')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT',
'/a1',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create containers, 1 per test policy
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
# Create container in other account
# used for account-to-account tests
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(
'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(
'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
def unpatch_policies(f):
"""
This will unset a TestCase level patch_policies to use the module level
policies setup for the _test_servers instead.
N.B. You should NEVER modify the _test_server policies or rings during a
test because they persist for the life of the entire module!
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with patch_policies(_test_POLICIES):
return f(*args, **kwargs)
return wrapper
def setup():
do_setup(object_server)
def teardown():
for server in _test_coros:
server.kill()
rmtree(os.path.dirname(_testdir))
utils.SysLogHandler = _orig_SysLogHandler
storage_policy._POLICIES = _orig_POLICIES
def sortHeaderNames(headerNames):
"""
Return the given string of header names sorted.
headerName: a comma-delimited list of header names
"""
headers = [a.strip() for a in headerNames.split(',') if a.strip()]
headers.sort()
return ', '.join(headers)
def parse_headers_string(headers_str):
headers_dict = HeaderKeyDict()
for line in headers_str.split('\r\n'):
if ': ' in line:
header, value = line.split(': ', 1)
headers_dict[header] = value
return headers_dict
def node_error_count(proxy_app, ring_node):
# Reach into the proxy's internals to get the error count for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('errors', 0)
def node_last_error(proxy_app, ring_node):
# Reach into the proxy's internals to get the last error for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('last_error')
def set_node_errors(proxy_app, ring_node, value, last_error):
# Set the node's error count to value
node_key = proxy_app._error_limit_node_key(ring_node)
stats = proxy_app._error_limiting.setdefault(node_key, {})
stats['errors'] = value
stats['last_error'] = last_error
class FakeMemcacheReturnsNone(FakeMemcache):
def get(self, key):
# Returns None as the timestamp of the container; assumes we're only
# using the FakeMemcache for container existence checks.
return None
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.proxy.controllers.base, 'http_connect',
None)
orig_account_info = getattr(swift.proxy.controllers.Controller,
'account_info', None)
orig_container_info = getattr(swift.proxy.controllers.Controller,
'container_info', None)
try:
yield True
finally:
swift.proxy.controllers.Controller.account_info = orig_account_info
swift.proxy.controllers.base.http_connect = orig_http_connect
swift.proxy.controllers.obj.http_connect = orig_http_connect
swift.proxy.controllers.account.http_connect = orig_http_connect
swift.proxy.controllers.container.http_connect = orig_http_connect
swift.proxy.controllers.Controller.container_info = orig_container_info
def set_http_connect(*args, **kwargs):
new_connect = fake_http_connect(*args, **kwargs)
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
return new_connect
def _make_callback_func(calls):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context = {}
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
calls.append(context)
return callback
def _limit_max_file_size(f):
"""
This will limit constraints.MAX_FILE_SIZE for the duration of the
wrapped function, based on whether MAX_FILE_SIZE exceeds the
sys.maxsize limit on the system running the tests.
This allows successful testing on 32 bit systems.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_max_file_size = constraints.MAX_FILE_SIZE
if constraints.MAX_FILE_SIZE >= sys.maxsize:
test_max_file_size = (2 ** 30 + 2)
with mock.patch.object(constraints, 'MAX_FILE_SIZE',
test_max_file_size):
return f(*args, **kwargs)
return wrapper
# tests
class TestController(unittest.TestCase):
def setUp(self):
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.memcache = FakeMemcache()
app = proxy_server.Application(None, self.memcache,
account_ring=self.account_ring,
container_ring=self.container_ring)
self.controller = swift.proxy.controllers.Controller(app)
class FakeReq(object):
def __init__(self):
self.url = "/foo/bar"
self.method = "METHOD"
def as_referer(self):
return self.method + ' ' + self.url
self.account = 'some_account'
self.container = 'some_container'
self.request = FakeReq()
self.read_acl = 'read_acl'
self.write_acl = 'write_acl'
def test_transfer_headers(self):
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = {'x-base-meta-owner': 'Gareth',
'x-base-meta-size': '150M'}
self.controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M'}
self.assertEqual(dst_headers, expected_headers)
def check_account_info_return(self, partition, nodes, is_none=False):
if is_none:
p, n = None, None
else:
p, n = self.account_ring.get_nodes(self.account)
self.assertEqual(p, partition)
self.assertEqual(n, nodes)
def test_account_info_container_count(self):
with save_globals():
set_http_connect(200, count=123)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
set_http_connect(200, count='123')
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 200, 'container_count': 1234}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
with save_globals():
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 200, 'container_count': '1234'}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
def test_make_requests(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
set_http_connect(201, raise_timeout_exc=True)
self.controller._make_request(
nodes, partition, 'POST', '/', '', '',
self.controller.app.logger.thread_locals)
# tests if 200 is cached and used
def test_account_info_200(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# Test the internal representation in memcache
# 'container_count' changed from int to str
cache_key = get_account_memcache_key(self.account)
container_info = {'status': 200,
'container_count': '12345',
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(container_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# tests if 404 is cached and used
def test_account_info_404(self):
with save_globals():
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
# Test the internal representation in memcache
# 'container_count' changed from 0 to None
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 404,
'container_count': None, # internally keep None
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(account_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
# tests if some http status codes are not cached
def test_account_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
def test_account_info_no_account(self):
with save_globals():
self.memcache.store = {}
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, is_none=True)
self.assertEqual(count, None)
def check_container_info_return(self, ret, is_none=False):
if is_none:
partition, nodes, read_acl, write_acl = None, None, None, None
else:
partition, nodes = self.container_ring.get_nodes(self.account,
self.container)
read_acl, write_acl = self.read_acl, self.write_acl
self.assertEqual(partition, ret['partition'])
self.assertEqual(nodes, ret['nodes'])
self.assertEqual(read_acl, ret['read_acl'])
self.assertEqual(write_acl, ret['write_acl'])
def test_container_info_invalid_account(self):
def account_info(self, account, request, autocreate=False):
return None, None
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
ret = self.controller.container_info(self.account,
self.container,
self.request)
self.check_container_info_return(ret, True)
# tests if 200 is cached and used
def test_container_info_200(self):
with save_globals():
headers = {'x-container-read': self.read_acl,
'x-container-write': self.write_acl}
set_http_connect(200, # account_info is found
200, headers=headers) # container_info is found
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(200, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
# tests if 404 is cached and used
def test_container_info_404(self):
def account_info(self, account, request):
return True, True, 0
with save_globals():
set_http_connect(503, 204, # account_info found
504, 404, 404) # container_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
set_http_connect(503, 404, 404) # account_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
# tests if some http status codes are not cached
def test_container_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
ret = self.controller.container_info(
self.account, self.container, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_container_info_return(ret, True)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServer(unittest.TestCase):
def test_get_object_ring(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
with patch_policies([
StoragePolicy(0, 'a', False, object_ring=123),
StoragePolicy(1, 'b', True, object_ring=456),
StoragePolicy(2, 'd', False, object_ring=789)
]):
# None means legacy so always use policy 0
ring = baseapp.get_object_ring(None)
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('0')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('1')
self.assertEqual(ring, 456)
ring = baseapp.get_object_ring('2')
self.assertEqual(ring, 789)
# illegal values
self.assertRaises(ValueError, baseapp.get_object_ring, '99')
self.assertRaises(ValueError, baseapp.get_object_ring, 'asdf')
def test_unhandled_exception(self):
class MyApp(proxy_server.Application):
def get_controller(self, path):
raise Exception('this shouldn\'t be caught')
app = MyApp(None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
app.update_request(req)
resp = app.handle_request(req)
self.assertEqual(resp.status_int, 500)
def test_internal_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_inexistent_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_calls_authorize_allow(self):
called = [False]
def authorize(req):
called[0] = True
with save_globals():
set_http_connect(200)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_calls_authorize_deny(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_negative_content_length(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-1'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-123'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'sardine',
'REQUEST_METHOD': 'GET'}))
# This is kind of a hokey way to get the transaction ID; it'd be
# better to examine response headers, but the catch_errors
# middleware is what sets the X-Trans-Id header, and we don't have
# that available here.
self.assertTrue(logger.txn_id.endswith('-sardine'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id_length_limit(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'a' * 1000,
'REQUEST_METHOD': 'GET'}))
self.assertTrue(logger.txn_id.endswith(
'-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_denied_host_header(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir,
'deny_host_headers':
'invalid_host.com'},
FakeMemcache(),
container_ring=FakeLogger(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a/c/o',
environ={'HTTP_HOST': 'invalid_host.com'}))
self.assertEqual(resp.status, '403 Forbidden')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_node_timing(self):
baseapp = proxy_server.Application({'sorting_method': 'timing'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.node_timings, {})
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
baseapp.update_request(req)
resp = baseapp.handle_request(req)
self.assertEqual(resp.status_int, 503) # couldn't connect to anything
exp_timings = {}
self.assertEqual(baseapp.node_timings, exp_timings)
times = [time.time()]
exp_timings = {'127.0.0.1': (0.1, times[0] + baseapp.timing_expiry)}
with mock.patch('swift.proxy.server.time', lambda: times.pop(0)):
baseapp.set_node_timing({'ip': '127.0.0.1'}, 0.1)
self.assertEqual(baseapp.node_timings, exp_timings)
nodes = [{'ip': '127.0.0.1'}, {'ip': '127.0.0.2'}, {'ip': '127.0.0.3'}]
with mock.patch('swift.proxy.server.shuffle', lambda l: l):
res = baseapp.sort_nodes(nodes)
exp_sorting = [{'ip': '127.0.0.2'}, {'ip': '127.0.0.3'},
{'ip': '127.0.0.1'}]
self.assertEqual(res, exp_sorting)
def test_node_affinity(self):
baseapp = proxy_server.Application({'sorting_method': 'affinity',
'read_affinity': 'r1=1'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
nodes = [{'region': 2, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 1, 'zone': 2, 'ip': '127.0.0.2'}]
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app_sorted = baseapp.sort_nodes(nodes)
exp_sorted = [{'region': 1, 'zone': 2, 'ip': '127.0.0.2'},
{'region': 2, 'zone': 1, 'ip': '127.0.0.1'}]
self.assertEqual(exp_sorted, app_sorted)
def test_info_defaults(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.assertTrue(app.expose_info)
self.assertTrue(isinstance(app.disallowed_sections, list))
self.assertEqual(1, len(app.disallowed_sections))
self.assertEqual(['swift.valid_api_versions'],
app.disallowed_sections)
self.assertTrue(app.admin_key is None)
def test_get_info_controller(self):
req = Request.blank('/info')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
controller, path_parts = app.get_controller(req)
self.assertTrue('version' in path_parts)
self.assertTrue(path_parts['version'] is None)
self.assertTrue('disallowed_sections' in path_parts)
self.assertTrue('expose_info' in path_parts)
self.assertTrue('admin_key' in path_parts)
self.assertEqual(controller.__name__, 'InfoController')
def test_error_limit_methods(self):
logger = debug_logger('test')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
# error occurred
app.error_occurred(node, 'test msg')
self.assertTrue('test msg' in
logger.get_lines_for_level('error')[-1])
self.assertEqual(1, node_error_count(app, node))
# exception occurred
try:
raise Exception('kaboom1!')
except Exception as e1:
app.exception_occurred(node, 'test1', 'test1 msg')
line = logger.get_lines_for_level('error')[-1]
self.assertTrue('test1 server' in line)
self.assertTrue('test1 msg' in line)
log_args, log_kwargs = logger.log_dict['error'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e1)
self.assertEqual(2, node_error_count(app, node))
# warning exception occurred
try:
raise Exception('kaboom2!')
except Exception as e2:
app.exception_occurred(node, 'test2', 'test2 msg',
level=logging.WARNING)
line = logger.get_lines_for_level('warning')[-1]
self.assertTrue('test2 server' in line)
self.assertTrue('test2 msg' in line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e2)
self.assertEqual(3, node_error_count(app, node))
# custom exception occurred
try:
raise Exception('kaboom3!')
except Exception as e3:
e3_info = sys.exc_info()
try:
raise Exception('kaboom4!')
except Exception:
pass
app.exception_occurred(node, 'test3', 'test3 msg',
level=logging.WARNING, exc_info=e3_info)
line = logger.get_lines_for_level('warning')[-1]
self.assertTrue('test3 server' in line)
self.assertTrue('test3 msg' in line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e3)
self.assertEqual(4, node_error_count(app, node))
def test_valid_api_version(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
# The version string is only checked for account, container and object
# requests; the raised APIVersionError returns a 404 to the client
for path in [
'/v2/a',
'/v2/a/c',
'/v2/a/c/o']:
req = Request.blank(path)
self.assertRaises(APIVersionError, app.get_controller, req)
# Default valid API versions are ok
for path in [
'/v1/a',
'/v1/a/c',
'/v1/a/c/o',
'/v1.0/a',
'/v1.0/a/c',
'/v1.0/a/c/o']:
req = Request.blank(path)
controller, path_parts = app.get_controller(req)
self.assertTrue(controller is not None)
# Ensure settings valid API version constraint works
for version in ["42", 42]:
try:
with NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
f.write('valid_api_versions = %s\n' % version)
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
req = Request.blank('/%s/a' % version)
controller, _ = app.get_controller(req)
self.assertTrue(controller is not None)
# In this case v1 is invalid
req = Request.blank('/v1/a')
self.assertRaises(APIVersionError, app.get_controller, req)
finally:
constraints.reload_constraints()
# Check that the valid_api_versions is not exposed by default
req = Request.blank('/info')
controller, path_parts = app.get_controller(req)
self.assertTrue('swift.valid_api_versions' in
path_parts.get('disallowed_sections'))
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
])
class TestProxyServerLoading(unittest.TestCase):
def setUp(self):
self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
utils.HASH_PATH_SUFFIX = 'endcap'
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
for policy in POLICIES:
policy.object_ring = None
def test_load_policy_rings(self):
for policy in POLICIES:
self.assertFalse(policy.object_ring)
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
for policy in POLICIES:
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
write_fake_ring(object_ring_path)
app = loadapp(conf_path)
# find the end of the pipeline
while hasattr(app, 'app'):
app = app.app
# validate loaded rings
self.assertEqual(app.account_ring.serialized_path,
account_ring_path)
self.assertEqual(app.container_ring.serialized_path,
container_ring_path)
for policy in POLICIES:
self.assertEqual(policy.object_ring,
app.get_object_ring(int(policy)))
def test_missing_rings(self):
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
ring_paths = [
os.path.join(self.tempdir, 'account.ring.gz'),
os.path.join(self.tempdir, 'container.ring.gz'),
]
for policy in POLICIES:
self.assertFalse(policy.object_ring)
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
ring_paths.append(object_ring_path)
for policy in POLICIES:
self.assertFalse(policy.object_ring)
for ring_path in ring_paths:
self.assertFalse(os.path.exists(ring_path))
self.assertRaises(IOError, loadapp, conf_path)
write_fake_ring(ring_path)
# all rings exist, app should load
loadapp(conf_path)
for policy in POLICIES:
self.assertTrue(policy.object_ring)
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(base_port=3000))])
class TestObjectController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
# clear proxy logger result for each test
_test_servers[0].logger._clear()
def tearDown(self):
self.app.account_ring.set_replicas(3)
self.app.container_ring.set_replicas(3)
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
def put_container(self, policy_name, container_name):
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (container_name, policy_name))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
# repeat test
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
@unpatch_policies
def test_policy_IO(self):
def check_file(policy, cont, devs, check_val):
partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
for dev in devs:
file = df_mgr.get_diskfile(dev, partition, 'a',
cont, 'o',
policy=policy)
if check_val is True:
file.open()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# check policy 0: put file on c, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'test_object0'
path = '/v1/a/c/o'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
# check policy 1: put file on c1, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c1/o'
obj = 'test_object1'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
# check policy 2: put file on c2, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c2/o'
obj = 'test_object2'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
@unpatch_policies
def test_policy_IO_override(self):
if hasattr(_test_servers[-1], '_filesystem'):
# ironically, the _filesystem attribute on the object server means
# the in-memory diskfile is in use, so this test does not apply
return
prosrv = _test_servers[0]
# validate container policy is 1
req = Request.blank('/v1/a/c1', method='HEAD')
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204) # sanity check
self.assertEqual(POLICIES[1].name, res.headers['x-storage-policy'])
# check overrides: put it in policy 2 (not where the container says)
req = Request.blank(
'/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': BytesIO(b"hello")},
headers={'Content-Type': 'text/plain',
'Content-Length': '5',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 201) # sanity check
# go to disk to make sure it's there
partition, nodes = prosrv.get_object_ring(2).get_nodes(
'a', 'c1', 'wrong-o')
node = nodes[0]
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
with df.open():
contents = ''.join(df.reader())
self.assertEqual(contents, "hello")
# can't get it from the normal place
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 404) # sanity check
# but we can get it from policy 2
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, 'hello')
# and we can delete it the same way
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204)
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
try:
df.open()
except DiskFileNotExist as e:
self.assertTrue(float(e.timestamp) > 0)
else:
self.fail('did not raise DiskFileNotExist')
@unpatch_policies
def test_GET_newest_large_file(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'a' * (1024 * 1024)
path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'application/octet-stream',
'X-Newest': 'true'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
@unpatch_policies
def test_GET_ranges(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('beans lots of beans lots of beans lots of beans yeah %04d ' % i)
for i in range(100)))
path = '/v1/a/c/o.beans'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one byte range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
self.assertEqual(res.body, obj[10:201])
# multiple byte ranges
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200,1000-1099,4123-4523'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges')
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None)
got_mime_docs = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(res.body),
boundary):
headers = HeaderKeyDict(rfc822.Message(mime_doc_fh, 0).items())
body = mime_doc_fh.read()
got_mime_docs.append((headers, body))
self.assertEqual(len(got_mime_docs), 3)
first_range_headers = got_mime_docs[0][0]
first_range_body = got_mime_docs[0][1]
self.assertEqual(first_range_headers['Content-Range'],
'bytes 10-200/5800')
self.assertEqual(first_range_body, obj[10:201])
second_range_headers = got_mime_docs[1][0]
second_range_body = got_mime_docs[1][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 1000-1099/5800')
self.assertEqual(second_range_body, obj[1000:1100])
second_range_headers = got_mime_docs[2][0]
second_range_body = got_mime_docs[2][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 4123-4523/5800')
self.assertEqual(second_range_body, obj[4123:4524])
@unpatch_policies
def test_GET_bad_range_zero_byte(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c/o.zerobyte'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 0\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# bad byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=spaghetti-carbonara'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
# not a byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'Kotta'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
@unpatch_policies
def test_GET_ranges_resuming(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('Smurf! The smurfing smurf is completely smurfed. %03d ' % i)
for i in range(1000)))
path = '/v1/a/c/o.smurfs'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/smurftet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
kaboomed = [0]
bytes_before_timeout = [None]
class FileLikeKaboom(object):
def __init__(self, inner_file_like):
self.inner_file_like = inner_file_like
# close(), etc.
def __getattr__(self, attr):
return getattr(self.inner_file_like, attr)
def readline(self, *a, **kw):
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
result = self.inner_file_like.readline(*a, **kw)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
def read(self, length=None):
result = self.inner_file_like.read(length)
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
orig_hrtdi = proxy_base.http_response_to_document_iters
# Use this to mock out http_response_to_document_iters. On the first
# call, the result will be sabotaged to blow up with
# ChunkReadTimeout after some number of bytes are read. On
# subsequent calls, no sabotage will be added.
def sabotaged_hrtdi(*a, **kw):
resp_parts = orig_hrtdi(*a, **kw)
for sb, eb, l, h, range_file in resp_parts:
if bytes_before_timeout[0] <= 0:
# simulate being unable to read MIME part of
# multipart/byteranges response
kaboomed[0] += 1
raise ChunkReadTimeout(None)
boomer = FileLikeKaboom(range_file)
yield sb, eb, l, h, boomer
sabotaged = [False]
def single_sabotage_hrtdi(*a, **kw):
if not sabotaged[0]:
sabotaged[0] = True
return sabotaged_hrtdi(*a, **kw)
else:
return orig_hrtdi(*a, **kw)
# We want sort of an end-to-end test of object resuming, so what we
# do is mock out stuff so the proxy thinks it only read a certain
# number of bytes before it got a timeout.
bytes_before_timeout[0] = 300
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=0-500'})
res = req.get_response(prosrv)
body = res.body # read the whole thing
self.assertEqual(kaboomed[0], 1) # sanity check
self.assertEqual(res.status_int, 206)
self.assertEqual(len(body), 501)
self.assertEqual(body, obj[:501])
# Sanity-check for multi-range resume: make sure we actually break
# in the middle of the second byterange. This test is partially
# about what happens when all the object servers break at once, and
# partially about validating all these mocks we do. After all, the
# point of resuming is that the client can't tell anything went
# wrong, so we need a test where we can't resume and something
# *does* go wrong so we can observe it.
bytes_before_timeout[0] = 700
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
sabotaged_hrtdi): # perma-broken
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''
try:
for chunk in res.app_iter:
body += chunk
except ChunkReadTimeout:
pass
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] > 0) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(len(got_byteranges[1]), 199) # partial
# Multi-range resume, resuming in the middle of the first byterange
bytes_before_timeout[0] = 300
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second set
# of MIME headers
bytes_before_timeout[0] = 501
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] >= 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second
# byterange
bytes_before_timeout[0] = 750
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] >= 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
@unpatch_policies
def test_PUT_ec(self):
policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = 'abCD' * 10 # small, so we don't get multiple EC stripes
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
ecd = policy.pyeclib_driver
expected_pieces = set(ecd.encode(obj))
# go to disk to make sure it's there and all erasure-coded
partition, nodes = policy.object_ring.get_nodes('a', 'ec-con', 'o1')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[policy]
got_pieces = set()
got_indices = set()
got_durable = []
for node_index, node in enumerate(nodes):
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'o1',
policy=policy)
with df.open():
meta = df.get_metadata()
contents = ''.join(df.reader())
got_pieces.add(contents)
# check presence for a .durable file for the timestamp
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(policy),
partition, hash_path('a', 'ec-con', 'o1')),
utils.Timestamp(df.timestamp).internal + '.durable')
if os.path.isfile(durable_file):
got_durable.append(True)
lmeta = dict((k.lower(), v) for k, v in meta.items())
got_indices.add(
lmeta['x-object-sysmeta-ec-frag-index'])
self.assertEqual(
lmeta['x-object-sysmeta-ec-etag'],
md5(obj).hexdigest())
self.assertEqual(
lmeta['x-object-sysmeta-ec-content-length'],
str(len(obj)))
self.assertEqual(
lmeta['x-object-sysmeta-ec-segment-size'],
'4096')
self.assertEqual(
lmeta['x-object-sysmeta-ec-scheme'],
'jerasure_rs_vand 2+1')
self.assertEqual(
lmeta['etag'],
md5(contents).hexdigest())
self.assertEqual(expected_pieces, got_pieces)
self.assertEqual(set(('0', '1', '2')), got_indices)
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertTrue(num_durable_puts >= 2)
@unpatch_policies
def test_PUT_ec_multiple_segments(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
pyeclib_header_size = len(ec_policy.pyeclib_driver.encode("")[0])
segment_size = ec_policy.ec_segment_size
# Big enough to have multiple segments. Also a multiple of the
# segment size to get coverage of that path too.
obj = 'ABC' * segment_size
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o2 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# it's a 2+1 erasure code, so each fragment archive should be half
# the length of the object, plus three inline pyeclib metadata
# things (one per segment)
expected_length = (len(obj) / 2 + pyeclib_header_size * 3)
partition, nodes = ec_policy.object_ring.get_nodes(
'a', 'ec-con', 'o2')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
got_durable = []
fragment_archives = []
for node in nodes:
df = df_mgr.get_diskfile(
node['device'], partition, 'a',
'ec-con', 'o2', policy=ec_policy)
with df.open():
contents = ''.join(df.reader())
fragment_archives.append(contents)
self.assertEqual(len(contents), expected_length)
# check presence for a .durable file for the timestamp
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(ec_policy),
partition, hash_path('a', 'ec-con', 'o2')),
utils.Timestamp(df.timestamp).internal + '.durable')
if os.path.isfile(durable_file):
got_durable.append(True)
# Verify that we can decode each individual fragment and that they
# are all the correct size
fragment_size = ec_policy.fragment_size
nfragments = int(
math.ceil(float(len(fragment_archives[0])) / fragment_size))
for fragment_index in range(nfragments):
fragment_start = fragment_index * fragment_size
fragment_end = (fragment_index + 1) * fragment_size
try:
frags = [fa[fragment_start:fragment_end]
for fa in fragment_archives]
seg = ec_policy.pyeclib_driver.decode(frags)
except ECDriverError:
self.fail("Failed to decode fragments %d; this probably "
"means the fragments are not the sizes they "
"should be" % fragment_index)
segment_start = fragment_index * segment_size
segment_end = (fragment_index + 1) * segment_size
self.assertEqual(seg, obj[segment_start:segment_end])
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertTrue(num_durable_puts >= 2)
@unpatch_policies
def test_PUT_ec_object_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o3 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5('something else').hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 422'
self.assertEqual(headers[:len(exp)], exp)
# nothing should have made it to disk on the object servers
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'o3')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'o3', policy=POLICIES[3])
self.assertRaises(DiskFileNotExist, df.open)
@unpatch_policies
def test_PUT_ec_fragment_archive_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
# Cause a hash mismatch by feeding one particular MD5 hasher some
# extra data. The goal here is to get exactly one of the hashers in
# an object server.
countdown = [1]
def busted_md5_constructor(initial_str=""):
hasher = md5(initial_str)
if countdown[0] == 0:
hasher.update('wrong')
countdown[0] -= 1
return hasher
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
with mock.patch('swift.obj.server.md5', busted_md5_constructor):
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/pimento HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# 2/3 of the fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'pimento')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
found = 0
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'pimento',
policy=POLICIES[3])
try:
# diskfile open won't succeed because no durable was written,
# so look under the hood for data files.
files = os.listdir(df._datadir)
num_data_files = len([f for f in files if f.endswith('.data')])
self.assertEqual(1, num_data_files)
found += 1
except OSError:
pass
self.assertEqual(found, 2)
@unpatch_policies
def test_PUT_ec_fragment_quorum_archive_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
def busted_md5_constructor(initial_str=""):
hasher = md5(initial_str)
hasher.update('wrong')
return hasher
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
commit_confirmation = \
'swift.proxy.controllers.obj.ECPutter.send_commit_confirmation'
with nested(
mock.patch('swift.obj.server.md5', busted_md5_constructor),
mock.patch(commit_confirmation, mock_committer)) as \
(_junk, commit_call):
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=POLICIES[3])
self.assertFalse(os.path.exists(df._datadir))
@unpatch_policies
def test_PUT_ec_fragment_quorum_bad_request(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
read_footer = \
'swift.obj.server.ObjectController._read_metadata_footer'
commit_confirmation = \
'swift.proxy.controllers.obj.ECPutter.send_commit_confirmation'
with nested(
mock.patch(read_footer),
mock.patch(commit_confirmation, mock_committer)) as \
(read_footer_call, commit_call):
# Emulate missing footer MIME doc in all object-servers
read_footer_call.side_effect = HTTPBadRequest(
body="couldn't find footer MIME doc")
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
# Don't show a result of the bad conversation between proxy-server
# and object-server
exp = 'HTTP/1.1 503'
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=POLICIES[3])
self.assertFalse(os.path.exists(df._datadir))
@unpatch_policies
def test_PUT_ec_if_none_match(self):
self.put_container("ec", "ec-con")
obj = 'ananepionic-lepidophyllous-ropewalker-neglectful'
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'If-None-Match: *\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_GET_ec(self):
self.put_container("ec", "ec-con")
obj = '0123456' * 11 * 17
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEquals(len(error_lines), 0) # sanity
self.assertEquals(len(warn_lines), 0) # sanity
@unpatch_policies
def test_conditional_GET_ec(self):
self.put_container("ec", "ec-con")
obj = 'this object has an etag and is otherwise unimportant'
etag = md5(obj).hexdigest()
not_etag = md5(obj + "blahblah").hexdigest()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/conditionals HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
for verb, body in (('GET', obj), ('HEAD', '')):
# If-Match
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
# If-None-Match
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEquals(len(error_lines), 0) # sanity
self.assertEquals(len(warn_lines), 0) # sanity
@unpatch_policies
def test_GET_ec_big(self):
self.put_container("ec", "ec-con")
# our EC segment size is 4 KiB, so this is multiple (3) segments;
# we'll verify that with a sanity check
obj = 'a moose once bit my sister' * 400
self.assertTrue(
len(obj) > POLICIES.get_by_name("ec").ec_segment_size * 2,
"object is too small for proper testing")
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# This may look like a redundant test, but when things fail, this
# has a useful failure message while the subsequent one spews piles
# of garbage and demolishes your terminal's scrollback buffer.
self.assertEqual(len(gotten_obj), len(obj))
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEquals(len(error_lines), 0) # sanity
self.assertEquals(len(warn_lines), 0) # sanity
@unpatch_policies
def test_GET_ec_failure_handling(self):
self.put_container("ec", "ec-con")
obj = 'look at this object; it is simply amazing ' * 500
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def explodey_iter(inner_iter):
yield next(inner_iter)
raise Exception("doom ba doom")
def explodey_doc_parts_iter(inner_iter_iter):
for item in inner_iter_iter:
item = item.copy() # paranoia about mutable data
item['part_iter'] = explodey_iter(item['part_iter'])
yield item
real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
# Each thing in `iterators` here is a document-parts iterator,
# and we want to fail after getting a little into each part.
#
# That way, we ensure we've started streaming the response to
# the client when things go wrong.
return real_ec_app_iter(
path, policy,
[explodey_doc_parts_iter(i) for i in iterators],
*a, **kw)
with mock.patch("swift.proxy.controllers.obj.ECAppIter",
explodey_ec_app_iter):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
try:
with Timeout(300): # don't hang the testrun when this fails
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
except Timeout:
self.fail("GET hung when connection failed")
# Ensure we failed partway through, otherwise the mocks could
# get out of date without anyone noticing
self.assertTrue(0 < len(gotten_obj) < len(obj))
@unpatch_policies
def test_HEAD_ec(self):
self.put_container("ec", "ec-con")
obj = '0123456' * 11 * 17
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEquals(len(error_lines), 0) # sanity
self.assertEquals(len(warn_lines), 0) # sanity
@unpatch_policies
def test_GET_ec_404(self):
self.put_container("ec", "ec-con")
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEquals(len(error_lines), 0) # sanity
self.assertEquals(len(warn_lines), 0) # sanity
@unpatch_policies
def test_HEAD_ec_404(self):
self.put_container("ec", "ec-con")
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEquals(len(error_lines), 0) # sanity
self.assertEquals(len(warn_lines), 0) # sanity
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'expect' in headers or 'Expect' in headers:
test_errors.append('Expect was in headers for object '
'server!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
# The (201, Exception('test')) tuples in there have the effect of
# changing the status of the initial expect response. The default
# expect response from FakeConn for 201 is 100.
# But the object server won't send a 100 continue line if the
# client doesn't send a expect 100 header (as is the case with
# zero byte PUTs as validated by this test), nevertheless the
# object controller calls getexpect without prejudice. In this
# case the status from the response shows up early in getexpect
# instead of having to wait until getresponse. The Exception is
# in there to ensure that the object controller also *uses* the
# result of getexpect instead of calling getresponse in which case
# our FakeConn will blow up.
success_codes = [(201, Exception('test'))] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '), res.status)
def test_PUT_expect_header_nonzero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'Expect' not in headers:
test_errors.append('Expect was not in headers for '
'non-zero byte PUT!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
# the (100, 201) tuples in there are just being extra explicit
# about the FakeConn returning the 100 Continue status when the
# object controller calls getexpect. Which is FakeConn's default
# for 201 if no expect_status is specified.
success_codes = [(100, 201)] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '))
def test_PUT_respects_write_affinity(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
for ip, port, device in written_to:
# this is kind of a hokey test, but in FakeRing, the port is even
# when the region is 0, and odd when the region is 1, so this test
# asserts that we only wrote to nodes in region 0.
self.assertEqual(0, port % 2)
def test_PUT_respects_write_affinity_with_507s(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
self.app.error_limit(
object_ring.get_part_nodes(1)[0], 'test')
set_http_connect(200, 200, # account, container
201, 201, 201, # 3 working backends
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
# this is kind of a hokey test, but in FakeRing, the port is even when
# the region is 0, and odd when the region is 1, so this test asserts
# that we wrote to 2 nodes in region 0, then went to 1 non-r0 node.
self.assertEqual(0, written_to[0][1] % 2) # it's (ip, port, device)
self.assertEqual(0, written_to[1][1] % 2)
self.assertNotEqual(0, written_to[2][1] % 2)
@unpatch_policies
def test_PUT_no_etag_fallocate(self):
with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'hemoleucocytic-surfactant'
fd.write('PUT /v1/a/c/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one for each obj server; this test has 2
self.assertEqual(len(mock_fallocate.mock_calls), 2)
@unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'j' * 20
fd.write('PUT /v1/a/c/o.content-length HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_transfer_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_both(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 400'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_unsup_xfr_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip,chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 501'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_too_large(self):
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n\r\n'
'oh say can you see by the dawns\'\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 413'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_last_modified(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
lm_hdr = 'Last-Modified: '
self.assertEqual(headers[:len(exp)], exp)
last_modified_put = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
last_modified_head = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
self.assertEqual(last_modified_put, last_modified_head)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Modified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_put)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 304'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Unmodified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_put)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
def test_PUT_auto_content_type(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_content_type(filename, expected):
# The three responses here are for account_info() (HEAD to
# account server), container_info() (HEAD to container server)
# and three calls to _connect_put_node() (PUT to three object
# servers)
set_http_connect(201, 201, 201, 201, 201,
give_content_type=lambda content_type:
self.assertEqual(content_type,
next(expected)))
# We need into include a transfer-encoding to get past
# constraints.check_object_creation()
req = Request.blank('/v1/a/c/%s' % filename, {},
headers={'transfer-encoding': 'chunked'})
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
# If we don't check the response here we could miss problems
# in PUT()
self.assertEqual(res.status_int, 201)
test_content_type('test.jpg', iter(['', '', 'image/jpeg',
'image/jpeg', 'image/jpeg']))
test_content_type('test.html', iter(['', '', 'text/html',
'text/html', 'text/html']))
test_content_type('test.css', iter(['', '', 'text/css',
'text/css', 'text/css']))
def test_custom_mime_types_files(self):
swift_dir = mkdtemp()
try:
with open(os.path.join(swift_dir, 'mime.types'), 'w') as fp:
fp.write('foo/bar foo\n')
proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
self.assertEqual(proxy_server.mimetypes.guess_type('blah.foo')[0],
'foo/bar')
self.assertEqual(proxy_server.mimetypes.guess_type('blah.jpg')[0],
'image/jpeg')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_PUT(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, 201), 201)
test_status_map((200, 200, 201, 201, 500), 201)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 200, 204, 500, 404), 503)
test_status_map((200, 200, 202, 202, 204), 204)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, -1), 201) # connect exc
# connect errors
test_status_map((200, 200, Timeout(), 201, 201, ), 201)
test_status_map((200, 200, 201, 201, Exception()), 201)
# expect errors
test_status_map((200, 200, (Timeout(), None), 201, 201), 201)
test_status_map((200, 200, (Exception(), None), 201, 201), 201)
# response errors
test_status_map((200, 200, (100, Timeout()), 201, 201), 201)
test_status_map((200, 200, (100, Exception()), 201, 201), 201)
test_status_map((200, 200, 507, 201, 201), 201) # error limited
test_status_map((200, 200, -1, 201, -1), 503)
test_status_map((200, 200, 503, -1, 503), 503)
def test_PUT_send_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg',
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 400)
def test_PUT_getresponse_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 201, 201, -1), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_POST(self):
with save_globals():
self.app.object_post_as_copy = False
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 404, 404, 404), 404)
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_POST_backend_headers(self):
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.object_post_as_copy = False
self.app.sort_nodes = lambda nodes: nodes
backend_requests = []
def capture_requests(ip, port, method, path, headers, *args,
**kwargs):
backend_requests.append((method, path, headers))
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue'})
# we want the container_info response to says a policy index of 1
resp_headers = {'X-Backend-Storage-Policy-Index': 1}
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
def check_request(req, method, path, headers=None):
req_method, req_path, req_headers = req
self.assertEqual(method, req_method)
# caller can ignore leading path parts
self.assertTrue(req_path.endswith(path),
'expected path to end with %s, it was %s' % (
path, req_path))
headers = headers or {}
# caller can ignore some headers
for k, v in headers.items():
self.assertEqual(req_headers[k], v)
account_request = backend_requests.pop(0)
check_request(account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests.pop(0)
check_request(container_request, method='HEAD', path='/sda/0/a/c')
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests:
req_headers = request[2]
device = req_headers['x-container-device']
host = req_headers['x-container-host']
container_headers[device] = host
expectations = {
'method': 'POST',
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Connection': 'close',
'User-Agent': 'proxy-server %s' % os.getpid(),
'Host': 'localhost:80',
'Referer': 'POST http://localhost/v1/a/c/o',
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '1'
},
}
check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
# and again with policy override
self.app.memcache.store = {}
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
for request in backend_requests[2:]:
expectations = {
'method': 'POST',
'path': '/0/a/c/o', # ignore device bit
'headers': {
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '0',
}
}
check_request(request, **expectations)
# and this time with post as copy
self.app.object_post_as_copy = True
self.app.memcache.store = {}
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 200, 200, 200, 201, 201, 201,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 8)
policy0 = {'X-Backend-Storage-Policy-Index': '0'}
policy1 = {'X-Backend-Storage-Policy-Index': '1'}
expected = [
# account info
{'method': 'HEAD', 'path': '/0/a'},
# container info
{'method': 'HEAD', 'path': '/0/a/c'},
# x-newests
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
# new writes
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
]
for request, expectations in zip(backend_requests, expected):
check_request(request, **expectations)
def test_POST_as_copy(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 200, 200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 200, 200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 200, 200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 200, 200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 200, 200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 200, 200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 200, 200, 200, 404, 404, 404), 404)
def test_DELETE(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 204, 204, 204), 204)
test_status_map((200, 200, 204, 204, 500), 204)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 204, 500, 500, 404), 503)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 400, 400, 400), 400)
def test_HEAD(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
test_status_map((200, 200, 200, 404, 404), 200)
test_status_map((200, 200, 200, 500, 404), 200)
test_status_map((200, 200, 304, 500, 404), 304)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 404, 404, 500), 404)
test_status_map((200, 200, 500, 500, 500), 503)
def test_HEAD_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
# acct cont obj obj obj
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
test_status_map((200, 200, 404, 404, 200), 200, ('0', '0', None,
None, '1'), '1')
def test_GET_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'GET'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
'1', '2'), None)
def test_POST_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_as_copy_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
self.app.object_post_as_copy = False
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_as_copy_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_count(self):
with save_globals():
limit = constraints.MAX_META_COUNT
headers = dict(
(('X-Object-Meta-' + str(i), 'a') for i in range(limit + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_size(self):
with save_globals():
limit = constraints.MAX_META_OVERALL_SIZE
count = limit / 256 # enough to cause the limit to be reached
headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256)
for i in range(count + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_PUT_not_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/right', 'Content-Length': 0}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('something/right'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEquals(it_worked, [])
self.assertTrue(all(it_worked))
def test_PUT_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/wrong', 'Content-Length': 0,
'X-Detect-Content-Type': 'True'}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('text/html'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEquals(it_worked, [])
self.assertTrue(all(it_worked))
def test_client_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
if self.sent < 4:
sleep(0.1)
self.sent += 1
return ' '
return ''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.client_timeout = 0.05
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(201, 201, 201)
# obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_client_disconnect(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
raise Exception('Disconnected')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200, slow=0.1)
req.sent_size = 0
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=1.0)
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_read_timeout_retry(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=[1.0, 1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual('', resp.body)
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'a', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'b'])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_write_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201, slow=0.1)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.node_timeout = 0.1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(201, 201, 201, slow=1.0)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_node_request_setting(self):
baseapp = proxy_server.Application({'request_node_count': '3'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.request_node_count(3), 3)
def test_iter_nodes(self):
with save_globals():
try:
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 2
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 5)
object_ring.max_more_nodes = 6
self.app.request_node_count = lambda r: 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 9)
# zero error-limited primary nodes -> no handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
object_ring.max_more_nodes = 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [])
self.assertEqual(self.app.logger.get_increments(), [])
# one error-limited primary node -> one handoff warning
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
set_node_errors(self.app, object_ring._devs[0], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count'])
# two error-limited primary nodes -> two handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
for i in range(2):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {}),
(('Handoff requested (6)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count'])
# all error-limited primary nodes -> four handoff warnings,
# plus a handoff-all metric
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 10
object_ring.set_replicas(4) # otherwise we run out of handoffs
self.app._error_limiting = {} # clear out errors
for i in range(4):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 10)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (7)',), {}),
(('Handoff requested (8)',), {}),
(('Handoff requested (9)',), {}),
(('Handoff requested (10)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count',
'handoff_count',
'handoff_count',
'handoff_all_count'])
finally:
object_ring.max_more_nodes = 0
def test_iter_nodes_calls_sort_nodes(self):
with mock.patch.object(self.app, 'sort_nodes') as sort_nodes:
object_ring = self.app.get_object_ring(None)
for node in self.app.iter_nodes(object_ring, 0):
pass
sort_nodes.assert_called_once_with(
object_ring.get_part_nodes(0))
def test_iter_nodes_skips_error_limited(self):
with mock.patch.object(self.app, 'sort_nodes', lambda n: n):
object_ring = self.app.get_object_ring(None)
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] in second_nodes)
self.app.error_limit(first_nodes[0], 'test')
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] not in second_nodes)
def test_iter_nodes_gives_extra_if_error_limited_inline(self):
object_ring = self.app.get_object_ring(None)
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 6),
mock.patch.object(object_ring, 'max_more_nodes', 99)):
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = []
for node in self.app.iter_nodes(object_ring, 0):
if not second_nodes:
self.app.error_limit(node, 'test')
second_nodes.append(node)
self.assertEqual(len(first_nodes), 6)
self.assertEqual(len(second_nodes), 7)
def test_iter_nodes_with_custom_node_iter(self):
object_ring = self.app.get_object_ring(None)
node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D')
for n in range(10)]
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 3)):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list[:3], got_nodes)
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 1000000)):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list, got_nodes)
def test_best_response_sets_headers(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object', headers=[{'X-Test': '1'},
{'X-Test': '2'},
{'X-Test': '3'}])
self.assertEqual(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object')
self.assertEqual(resp.etag, None)
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object',
etag='68b329da9893e34099c7d8ad5cb9c940'
)
self.assertEqual(resp.etag, '68b329da9893e34099c7d8ad5cb9c940')
def test_proxy_passes_content_type(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'x-application/test')
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_proxy_passes_content_length_on_head(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201,
201), 503)
self.assert_status_map(controller.POST,
(200, 200, 200, 200, 200, 200, 202, 202,
202), 503)
self.assert_status_map(controller.DELETE,
(200, 200, 200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
200)
self.assertRaises(BaseException,
self.assert_status_map, controller.DELETE,
(200, 200, 200, 204, 204, 204), 503,
raise_exc=True)
def test_error_limiting_survives_ring_reload(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# wipe out any state in the ring
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
# and we still get an error, which proves that the
# error-limiting info survived a ring reload
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
def test_PUT_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 503, 200, 200),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 2)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 0)
self.assertTrue(
node_last_error(controller.app, odevs[0]) is not None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(node_last_error(controller.app, odevs[2]) is None)
def test_PUT_error_limiting_last_node(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 200, 200, 503),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 0)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 2)
self.assertTrue(node_last_error(controller.app, odevs[0]) is None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(
node_last_error(controller.app, odevs[2]) is not None)
def test_acc_or_con_missing_returns_404(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 503)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 200, 204, 204, 204)
# acct cont obj obj obj
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 204)
set_http_connect(200, 404, 404, 404)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 503, 503, 503)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(
self.app, dev, self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200)
# acct [isn't actually called since everything
# is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev, 0, last_error=None)
for dev in self.app.container_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200)
# acct cont [isn't actually called since
# everything is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_requires_container_exist(self):
with save_globals():
self.app.object_post_as_copy = False
self.app.memcache = FakeMemcacheReturnsNone()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_as_copy_requires_container_exist(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_bad_metadata(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-' + (
'a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'Content-Length': '0',
'X-Object-Meta-' + (
'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
(constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < constraints.MAX_META_OVERALL_SIZE - 4 - \
constraints.MAX_META_VALUE_LENGTH:
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
controller = ReplicatedObjectController(
self.app, account, container, obj)
self.app.update_request(req)
self.app.memcache.store = {}
with save_globals():
new_connect = set_http_connect(*args, **kwargs)
yield controller
unused_status_list = []
while True:
try:
unused_status_list.append(next(new_connect.code_iter))
except StopIteration:
break
if unused_status_list:
raise self.fail('UN-USED STATUS CODES: %r' %
unused_status_list)
def test_basic_put_with_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_basic_put_with_x_copy_from_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_basic_put_with_x_copy_from_across_container(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c2/o'})
status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont conc objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c2/o')
def test_basic_put_with_x_copy_from_across_container_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c2/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c2/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
'X-Copy-From': 'c/o'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
def test_copy_non_zero_content_length_with_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
'X-Copy-From': 'c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
def test_copy_with_slashes_in_x_copy_from(self):
# extra source path parsing
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_copy_with_slashes_in_x_copy_from_and_account(self):
# extra source path parsing
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_spaces_in_x_copy_from(self):
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2')
def test_copy_with_spaces_in_x_copy_from_and_account(self):
# space in soure path
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_leading_slash_in_x_copy_from(self):
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
# repeat tests with leading /
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
try:
controller.PUT(req)
except HTTPException as resp:
self.assertEqual(resp.status_int // 100, 4) # client error
else:
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
def test_copy_with_no_object_in_x_copy_from_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c',
'X-Copy-From-Account': 'a'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
try:
controller.PUT(req)
except HTTPException as resp:
self.assertEqual(resp.status_int // 100, 4) # client error
else:
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
def test_copy_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 503, 503, 503)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 503)
def test_copy_server_error_reading_source_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 503, 503, 503)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 503)
def test_copy_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
# not found
status_list = (200, 200, 404, 404, 404)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
def test_copy_not_found_reading_source_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
# not found
status_list = (200, 200, 200, 200, 404, 404, 404)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
def test_copy_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
def test_copy_with_some_missing_sources_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
def test_copy_with_object_metadata(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay'})
# test object metadata
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
def test_copy_with_object_metadata_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay',
'X-Copy-From-Account': 'a'})
# test object metadata
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
@_limit_max_file_size
def test_copy_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
# copy-from object is too large to fit in target object
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
self.app.update_request(req)
self.app.memcache.store = {}
try:
resp = controller.PUT(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
def test_basic_COPY(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_basic_COPY_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o2',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_across_containers(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c2/o'})
status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont c2 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_COPY_source_with_slashes_in_name(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_COPY_account_source_with_slashes_in_name(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_COPY_account_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_COPY_account_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_account_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o',
'Destination-Account': 'a1'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 503, 503, 503)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 503)
def test_COPY_account_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 503, 503, 503)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 503)
def test_COPY_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 404, 404, 404)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 404)
def test_COPY_account_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 404, 404, 404)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 404)
def test_COPY_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
def test_COPY_account_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
def test_COPY_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
def test_COPY_account_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'X-Object-Meta-Ours': 'okay',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
@_limit_max_file_size
def test_COPY_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
@_limit_max_file_size
def test_COPY_account_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
def test_COPY_newest(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
# act cont objc objc objc obj obj obj
timestamps=('1', '1', '1', '3', '2', '4', '4',
'4'))
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from-last-modified'],
'3')
def test_COPY_account_newest(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
req.account = 'a'
controller.object_name = 'o'
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
# act cont acct cont objc objc objc obj obj obj
timestamps=('1', '1', '1', '1', '3', '2', '1',
'4', '4', '4'))
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from-last-modified'],
'3')
def test_COPY_delete_at(self):
with save_globals():
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
self.app.update_request(req)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEqual(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_COPY_account_delete_at(self):
with save_globals():
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
self.app.update_request(req)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEqual(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_chunked_put(self):
class ChunkedFile(object):
def __init__(self, bytes):
self.bytes = bytes
self.read_bytes = 0
@property
def bytes_left(self):
return self.bytes - self.read_bytes
def read(self, amt=None):
if self.read_bytes >= self.bytes:
raise StopIteration()
if not amt:
amt = self.bytes_left
data = 'a' * min(amt, self.bytes_left)
self.read_bytes += len(data)
return data
with save_globals():
set_http_connect(201, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(10)
self.app.memcache.store = {}
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int // 100, 2) # success
# test 413 entity to large
set_http_connect(201, 201, 201, 201)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(11)
self.app.memcache.store = {}
self.app.update_request(req)
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
@unpatch_policies
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 405'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
raise Exception('fake: this should be printed')
prosrv.update_request = broken_update_request
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 500'
self.assertEqual(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
@unpatch_policies
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('\r\nContent-Length: 0\r\n' in headers)
@unpatch_policies
def test_chunked_put_utf8_all_the_way_down(self):
# Test UTF-8 Unicode all the way through the system
ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \
'\xbf\xe1\xbd\xbb\xce\x87 \xcf\x84\xe1\xbd\xb0 \xcf' \
'\x80\xe1\xbd\xb1\xce\xbd\xcf\x84\xca\xbc \xe1\xbc' \
'\x82\xce\xbd \xe1\xbc\x90\xce\xbe\xe1\xbd\xb5\xce' \
'\xba\xce\xbf\xce\xb9 \xcf\x83\xce\xb1\xcf\x86\xe1' \
'\xbf\x86.Test'
ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List account with ustr container (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
containers = fd.read().split('\n')
self.assertTrue(ustr in containers)
# List account with ustr container (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertTrue(ustr.decode('utf8') in [l['name'] for l in listing])
# List account with ustr container (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('<name>%s</name>' % ustr in fd.read())
# Create ustr object with ustr metadata in ustr container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'X-Object-Meta-%s: %s\r\nContent-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr), quote(ustr_short),
quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List ustr container with ustr object (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
objects = fd.read().split('\n')
self.assertTrue(ustr in objects)
# List ustr container with ustr object (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertEqual(listing[0]['name'], ustr.decode('utf8'))
# List ustr container with ustr object (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('<name>%s</name>' % ustr in fd.read())
# Retrieve ustr object with ustr metadata
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).lower(), quote(ustr)) in headers)
@unpatch_policies
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# Also happens to assert that x-storage-token is taken as a
# replacement for x-auth-token.
fd.write('PUT /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\nf\r\n123456789abcdef\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure we get what we put
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
self.assertEqual(body, 'oh hai123456789abcdef')
@unpatch_policies
def test_conditional_range_get(self):
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
fd = sock.makefile()
fd.write('PUT /v1/a/con HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# put an object in it
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/con/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 10\r\n'
'Content-Type: text/plain\r\n'
'\r\n'
'abcdefghij\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# request with both If-None-Match and Range
etag = md5("abcdefghij").hexdigest()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/con/o HTTP/1.1\r\n' +
'Host: localhost\r\n' +
'Connection: close\r\n' +
'X-Storage-Token: t\r\n' +
'If-None-Match: "' + etag + '"\r\n' +
'Range: bytes=3-8\r\n' +
'\r\n')
fd.flush()
exp = 'HTTP/1.1 304'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
set_http_connect(200, 201, 201, 201,
etags=[None,
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941'])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 5) # server error
# req supplies etag, object servers return 422 - mismatch
headers = {'Content-Length': '0',
'ETag': '68b329da9893e34099c7d8ad5cb9c940'}
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
set_http_connect(200, 422, 422, 503,
etags=['68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941',
None,
None])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 4) # client error
def test_response_get_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assertTrue('accept-ranges' in resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertTrue('accept-ranges' in resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.GET(req)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_POST_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
self.app.object_post_as_copy = False
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_POST_as_copy_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_COPY_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.COPY(req)
self.assertTrue(called[0])
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
self.app.object_post_as_copy = False
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
self.app.memcache.store = {}
orig_time = time.time
try:
t = time.time()
time.time = lambda: t
req = Request.blank('/v1/a/c/o', {},
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
self.app.update_request(req)
res = controller.POST(req)
self.assertEqual(res.status, '202 Fake')
self.assertEqual(req.headers.get('x-delete-at'),
str(int(t + 60)))
finally:
time.time = orig_time
@unpatch_policies
def test_ec_client_disconnect(self):
prolis = _test_sockets[0]
# create connection
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# create container
fd.write('PUT /v1/a/ec-discon HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
# create object
obj = 'a' * 4 * 64 * 2 ** 10
fd.write('PUT /v1/a/ec-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# get object
fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# read most of the object, and disconnect
fd.read(10)
fd.close()
sock.close()
sleep(0)
# check for disconnect message!
expected = ['Client disconnected on read'] * 2
self.assertEqual(
_test_servers[0].logger.get_lines_for_level('warning'),
expected)
@unpatch_policies
def test_leak_1(self):
_request_instances = weakref.WeakKeyDictionary()
_orig_init = Request.__init__
def request_init(self, *args, **kwargs):
_orig_init(self, *args, **kwargs)
_request_instances[self] = None
with mock.patch.object(Request, "__init__", request_init):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
obj_len = prosrv.client_chunk_size * 2
# PUT test file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (obj_len, 'a' * obj_len))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Remember Request instance count, make sure the GC is run for
# pythons without reference counting.
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
before_request_instances = len(_request_instances)
# GET test file, but disconnect early
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
fd.read(1)
fd.close()
sock.close()
# Make sure the GC is run again for pythons without reference
# counting
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
self.assertEqual(
before_request_instances, len(_request_instances))
def test_OPTIONS(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
7)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
7)
self.assertEqual('999', resp.headers['access-control-max-age'])
def test_CORS_valid(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://not.foo.bar'
}
}
controller.container_info = stubContainerInfo
controller.app.strict_cors_mode = False
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-object-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertTrue('access-control-expose-headers' in resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set(['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp',
'x-trans-id', 'x-object-meta-color'])
self.assertEqual(expected_exposed, exposed)
controller.app.strict_cors_mode = True
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertTrue('access-control-allow-origin' not in resp.headers)
def test_CORS_valid_with_obj_headers(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
'Access-Control-Allow-Origin': 'http://obj.origin',
'Access-Control-Expose-Headers': 'x-trans-id'
})
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://obj.origin',
resp.headers['access-control-allow-origin'])
self.assertEqual('x-trans-id',
resp.headers['access-control-expose-headers'])
def _gather_x_container_headers(self, controller_call, req, *connect_args,
**kwargs):
header_list = kwargs.pop('header_list', ['X-Container-Device',
'X-Container-Host',
'X-Container-Partition'])
seen_headers = []
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in header_list:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account/container HEADs, so chuck
# the first two requests
return sorted(seen_headers[2:],
key=lambda d: d.get(header_list[0]) or 'z')
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_PUT_x_container_headers_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'}])
def test_PUT_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_POST_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.object_post_as_copy = False
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_DELETE_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': None,
'X-Delete-At-Container': None,
'X-Delete-At-Partition': None,
'X-Delete-At-Device': None}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.expiring_objects_account = 'expires'
self.app.expiring_objects_container_divisor = 60
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda,sdd'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': '10.0.0.2:1002',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdc'}
])
class TestECMismatchedFA(unittest.TestCase):
def tearDown(self):
prosrv = _test_servers[0]
# don't leak error limits and poison other tests
prosrv._error_limiting = {}
def test_mixing_different_objects_fragment_archives(self):
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv) = _test_servers
ec_policy = POLICIES[3]
@public
def bad_disk(req):
return Response(status=507, body="borken")
ensure_container = Request.blank(
"/v1/a/ec-crazytown",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
resp = ensure_container.get_response(prosrv)
self.assertTrue(resp.status_int in (201, 202))
obj1 = "first version..."
put_req1 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req1.body = obj1
obj2 = u"versión segundo".encode("utf-8")
put_req2 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req2.body = obj2
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
# Server obj1 will have the first version of the object (obj2 also
# gets it, but that gets stepped on later)
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj3srv, 'PUT', bad_disk),
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum')):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# Servers obj2 and obj3 will have the second version of the object.
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj1srv, 'PUT', bad_disk),
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum')):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# A GET that only sees 1 fragment archive should fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj1srv, 'GET', bad_disk),
mock.patch.object(obj2srv, 'GET', bad_disk)):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
# A GET that sees 2 matching FAs will work
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
class TestObjectDisconnectCleanup(unittest.TestCase):
# update this if you need to make more different devices in do_setup
device_pattern = re.compile('sd[a-z][0-9]')
def _cleanup_devices(self):
# make sure all the object data is cleaned up
for dev in os.listdir(_testdir):
if not self.device_pattern.match(dev):
continue
device_path = os.path.join(_testdir, dev)
for datadir in os.listdir(device_path):
if 'object' not in datadir:
continue
data_path = os.path.join(device_path, datadir)
rmtree(data_path, ignore_errors=True)
mkdirs(data_path)
def setUp(self):
debug.hub_exceptions(False)
self._cleanup_devices()
def tearDown(self):
debug.hub_exceptions(True)
self._cleanup_devices()
def _check_disconnect_cleans_up(self, policy_name, is_chunked=False):
proxy_port = _test_sockets[0].getsockname()[1]
def put(path, headers=None, body=None):
conn = httplib.HTTPConnection('localhost', proxy_port)
try:
conn.connect()
conn.putrequest('PUT', path)
for k, v in (headers or {}).items():
conn.putheader(k, v)
conn.endheaders()
body = body or ['']
for chunk in body:
if is_chunked:
chunk = '%x\r\n%s\r\n' % (len(chunk), chunk)
conn.send(chunk)
resp = conn.getresponse()
body = resp.read()
finally:
# seriously - shut this mother down
if conn.sock:
conn.sock.fd._sock.close()
return resp, body
# ensure container
container_path = '/v1/a/%s-disconnect-test' % policy_name
resp, _body = put(container_path, headers={
'Connection': 'close',
'X-Storage-Policy': policy_name,
'Content-Length': '0',
})
self.assertIn(resp.status, (201, 202))
def exploding_body():
for i in range(3):
yield '\x00' * (64 * 2 ** 10)
raise Exception('kaboom!')
headers = {}
if is_chunked:
headers['Transfer-Encoding'] = 'chunked'
else:
headers['Content-Length'] = 64 * 2 ** 20
obj_path = container_path + '/disconnect-data'
try:
resp, _body = put(obj_path, headers=headers,
body=exploding_body())
except Exception as e:
if str(e) != 'kaboom!':
raise
else:
self.fail('obj put connection did not ka-splod')
sleep(0.1)
def find_files(self):
found_files = defaultdict(list)
for root, dirs, files in os.walk(_testdir):
for fname in files:
filename, ext = os.path.splitext(fname)
found_files[ext].append(os.path.join(root, fname))
return found_files
def test_repl_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero')
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec')
found_files = self.find_files()
self.assertEqual(found_files['.durable'], [])
self.assertEqual(found_files['.data'], [])
def test_repl_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.durable'], [])
self.assertEqual(found_files['.data'], [])
class TestObjectECRangedGET(unittest.TestCase):
def setUp(self):
_test_servers[0].logger._clear()
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
def tearDown(self):
prosrv = _test_servers[0]
self.assertFalse(prosrv.logger.get_lines_for_level('error'))
self.assertFalse(prosrv.logger.get_lines_for_level('warning'))
@classmethod
def setUpClass(cls):
cls.obj_name = 'range-get-test'
cls.tiny_obj_name = 'range-get-test-tiny'
cls.aligned_obj_name = 'range-get-test-aligned'
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
assert headers[:len(exp)] == exp, "container PUT failed"
seg_size = POLICIES.get_by_name("ec").ec_segment_size
cls.seg_size = seg_size
# EC segment size is 4 KiB, hence this gives 4 segments, which we
# then verify with a quick sanity check
cls.obj = ' my hovercraft is full of eels '.join(
str(s) for s in range(431))
assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
"object is wrong number of segments"
cls.tiny_obj = 'tiny, tiny object'
assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
cls.aligned_obj = "".join(
"abcdEFGHijkl%04d" % x for x in range(512))
assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
for obj_name, obj in ((cls.obj_name, cls.obj),
(cls.tiny_obj_name, cls.tiny_obj),
(cls.aligned_obj_name, cls.aligned_obj)):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (obj_name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"object PUT failed %s" % obj_name
def _get_obj(self, range_value, obj_name=None):
if obj_name is None:
obj_name = self.obj_name
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Range: %s\r\n'
'\r\n' % (obj_name, range_value))
fd.flush()
headers = readuntil2crlfs(fd)
# e.g. "HTTP/1.1 206 Partial Content\r\n..."
status_code = int(headers[9:12])
headers = parse_headers_string(headers)
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# if we get this wrong, clients will either get truncated data or
# they'll hang waiting for bytes that aren't coming, so it warrants
# being asserted for every test case
if 'Content-Length' in headers:
self.assertEqual(int(headers['Content-Length']), len(gotten_obj))
# likewise, if we say MIME and don't send MIME or vice versa,
# clients will be horribly confused
if headers.get('Content-Type', '').startswith('multipart/byteranges'):
self.assertEqual(gotten_obj[:2], "--")
else:
# In general, this isn't true, as you can start an object with
# "--". However, in this test, we don't start any objects with
# "--", or even include "--" in their contents anywhere.
self.assertNotEqual(gotten_obj[:2], "--")
return (status_code, headers, gotten_obj)
def _parse_multipart(self, content_type, body):
parser = email.parser.FeedParser()
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
parser.feed(body)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertFalse(root_message.defects)
for i, message in enumerate(byteranges):
self.assertFalse(message.defects, "Part %d had defects" % i)
self.assertFalse(message.is_multipart(),
"Nested multipart at %d" % i)
return byteranges
def test_bogus(self):
status, headers, gotten_obj = self._get_obj("tacos=3-5")
self.assertEqual(status, 200)
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_unaligned(self):
# One segment's worth of data, but straddling two segment boundaries
# (so it has data from three segments)
status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[3783:7879])
def test_aligned_left(self):
# First byte is aligned to a segment boundary, last byte is not
status, headers, gotten_obj = self._get_obj("bytes=0-5500")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "5501")
self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
self.assertEqual(len(gotten_obj), 5501)
self.assertEqual(gotten_obj, self.obj[:5501])
def test_aligned_range(self):
# Ranged GET that wants exactly one segment
status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[4096:8192])
def test_aligned_range_end(self):
# Ranged GET that wants exactly the last segment
status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "2225")
self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
self.assertEqual(len(gotten_obj), 2225)
self.assertEqual(gotten_obj, self.obj[12288:])
def test_aligned_range_aligned_obj(self):
# Ranged GET that wants exactly the last segment, which is full-size
status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
def test_byte_0(self):
# Just the first byte, but it's index 0, so that's easy to get wrong
status, headers, gotten_obj = self._get_obj("bytes=0-0")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "1")
self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
self.assertEqual(gotten_obj, self.obj[0])
def test_unsatisfiable(self):
# Goes just one byte too far off the end of the object, so it's
# unsatisfiable
status, _junk, _junk = self._get_obj(
"bytes=%d-%d" % (len(self.obj), len(self.obj) + 100))
self.assertEqual(status, 416)
def test_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_aligned_off_end(self):
# Ranged GET that starts on a segment boundary but asks for a whole lot
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (8192, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '6321')
self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
self.assertEqual(gotten_obj, self.obj[8192:])
def test_way_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte, and wants multiple segments' worth off
# the end
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_boundaries(self):
# Wants the last byte of segment 1 + the first byte of segment 2
status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '2')
self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
self.assertEqual(gotten_obj, self.obj[4095:4097])
def test_until_end(self):
# Wants the last byte of segment 1 + the rest
status, headers, gotten_obj = self._get_obj("bytes=4095-")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '10418')
self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
self.assertEqual(gotten_obj, self.obj[4095:])
def test_small_suffix(self):
# Small range-suffix GET: the last 100 bytes (less than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-100")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
self.assertEqual(len(gotten_obj), 100)
self.assertEqual(gotten_obj, self.obj[-100:])
def test_small_suffix_aligned(self):
# Small range-suffix GET: the last 100 bytes, last segment is
# full-size
status, headers, gotten_obj = self._get_obj("bytes=-100",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
self.assertEqual(len(gotten_obj), 100)
def test_suffix_two_segs(self):
# Ask for enough data that we need the last two segments. The last
# segment is short, though, so this ensures we compensate for that.
#
# Note that the total range size is less than one full-size segment.
suffix_len = len(self.obj) % self.seg_size + 1
status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], str(suffix_len))
self.assertEqual(headers['Content-Range'],
'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
len(self.obj) - 1,
len(self.obj)))
self.assertEqual(len(gotten_obj), suffix_len)
def test_large_suffix(self):
# Large range-suffix GET: the last 5000 bytes (more than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-5000")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5000')
self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
self.assertEqual(len(gotten_obj), 5000)
self.assertEqual(gotten_obj, self.obj[-5000:])
def test_overlarge_suffix(self):
# The last N+1 bytes of an N-byte object
status, headers, gotten_obj = self._get_obj(
"bytes=-%d" % (len(self.obj) + 1))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '14513')
self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_small_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-5", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5')
self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
self.assertEqual(gotten_obj, self.tiny_obj[12:])
def test_overlarge_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-1234567890", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '17')
self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
self.assertEqual(len(gotten_obj), len(self.tiny_obj))
self.assertEqual(gotten_obj, self.tiny_obj)
def test_multiple_ranges(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4490-5010", self.obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers["Content-Length"], str(len(gotten_obj)))
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4490-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4490:5011])
def test_multiple_ranges_overlapping_in_segment(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-9,20-29,40-49,60-69,80-89")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 5)
def test_multiple_ranges_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14500-14513") # there is no byte 14513, only 0-14512
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_suffix_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,-13")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_one_barely_unsatisfiable(self):
# The thing about 14515-14520 is that it comes from the last segment
# in the object. When we turn this range into a fragment range,
# it'll be for the last fragment, so the object servers see
# something satisfiable.
#
# Basically, we'll get 3 byteranges from the object server, but we
# have to filter out the unsatisfiable one on our own.
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14515-14520,40-50")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[0].get_payload(), self.obj[0:11])
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 40-50/14513")
self.assertEqual(got_byteranges[1].get_payload(), self.obj[40:51])
def test_multiple_ranges_some_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4090-5010,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
def test_two_ranges_one_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
# According to RFC 7233, this could be either a multipart/byteranges
# response with one part or it could be a single-part response (just
# the bytes, no MIME). We're locking it down here: single-part
# response. That's what replicated objects do, and we don't want any
# client-visible differences between EC objects and replicated ones.
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[:101])
def test_two_ranges_one_unsatisfiable_same_segment(self):
# Like test_two_ranges_one_unsatisfiable(), but where both ranges
# fall within the same EC segment.
status, headers, gotten_obj = self._get_obj(
"bytes=14500-14510,14520-14530")
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[14500:14511])
def test_multiple_ranges_some_unsatisfiable_out_of_order(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,99999998-99999999,4090-5010", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
StoragePolicy(2, 'two', False, True, object_ring=FakeRing(base_port=3000))
])
class TestContainerController(unittest.TestCase):
"Test swift.proxy_server.ContainerController"
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(base_port=2000),
logger=debug_logger())
def test_convert_policy_to_index(self):
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
expected = {
'zero': 0,
'ZeRo': 0,
'one': 1,
'OnE': 1,
}
for name, index in expected.items():
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': name})
self.assertEqual(controller._convert_policy_to_index(req), index)
# default test
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.assertEqual(controller._convert_policy_to_index(req), None)
# negative test
req = Request.blank('/a/c',
headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'nada'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
# storage policy two is deprecated
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'two'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
def test_convert_index_to_name(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': int(policy)},
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], policy.name)
def test_no_convert_index_to_name_when_container_not_found(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 404, 404, 404,
headers={'X-Backend-Storage-Policy-Index':
int(policy)}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Storage-Policy'], None)
def test_error_convert_index_to_name(self):
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': '-1'}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], None)
error_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines))
for msg in error_lines:
expected = "Could not translate " \
"X-Backend-Storage-Policy-Index ('-1')"
self.assertTrue(expected in msg)
def test_transfer_headers(self):
src_headers = {'x-remove-versions-location': 'x',
'x-container-read': '*:user',
'x-remove-container-sync-key': 'x'}
dst_headers = {'x-versions-location': 'backup'}
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-versions-location': '',
'x-container-read': '*:user',
'x-container-sync-key': ''}
self.assertEqual(dst_headers, expected_headers)
def assert_status_map(self, method, statuses, expected,
raise_exc=False, missing_container=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
kwargs['missing_container'] = missing_container
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
def test_HEAD_GET(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def test_status_map(statuses, expected,
c_expected=None, a_expected=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.HEAD(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if c_expected:
self.assertTrue('swift.container/a/c' in res.environ)
self.assertEqual(
res.environ['swift.container/a/c']['status'],
c_expected)
else:
self.assertTrue('swift.container/a/c' not in res.environ)
if a_expected:
self.assertTrue('swift.account/a' in res.environ)
self.assertEqual(res.environ['swift.account/a']['status'],
a_expected)
else:
self.assertTrue('swift.account/a' not in res.environ)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if c_expected:
self.assertTrue('swift.container/a/c' in res.environ)
self.assertEqual(
res.environ['swift.container/a/c']['status'],
c_expected)
else:
self.assertTrue('swift.container/a/c' not in res.environ)
if a_expected:
self.assertTrue('swift.account/a' in res.environ)
self.assertEqual(res.environ['swift.account/a']['status'],
a_expected)
else:
self.assertTrue('swift.account/a' not in res.environ)
# In all the following tests cache 200 for account
# return and ache vary for container
# return 200 and cache 200 for and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
# return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
# return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# In all the following tests cache 404 for account
# return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# This should make no difference
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 404)
def test_PUT_policy_headers(self):
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
if method == 'PUT':
backend_requests.append(headers)
def test_policy(requested_policy):
with save_globals():
mock_conn = set_http_connect(200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/test', method='PUT',
headers={'Content-Length': 0})
if requested_policy:
expected_policy = requested_policy
req.headers['X-Storage-Policy'] = policy.name
else:
expected_policy = POLICIES.default
res = req.get_response(self.app)
if expected_policy.is_deprecated:
self.assertEqual(res.status_int, 400)
self.assertEqual(0, len(backend_requests))
expected = 'is deprecated'
self.assertTrue(expected in res.body,
'%r did not include %r' % (
res.body, expected))
return
self.assertEqual(res.status_int, 201)
self.assertEqual(
expected_policy.object_ring.replicas,
len(backend_requests))
for headers in backend_requests:
if not requested_policy:
self.assertFalse('X-Backend-Storage-Policy-Index' in
headers)
self.assertTrue(
'X-Backend-Storage-Policy-Default' in headers)
self.assertEqual(
int(expected_policy),
int(headers['X-Backend-Storage-Policy-Default']))
else:
self.assertTrue('X-Backend-Storage-Policy-Index' in
headers)
self.assertEqual(int(headers
['X-Backend-Storage-Policy-Index']),
int(policy))
# make sure all mocked responses are consumed
self.assertRaises(StopIteration, mock_conn.code_iter.next)
test_policy(None) # no policy header
for policy in POLICIES:
backend_requests = [] # reset backend requests
test_policy(policy)
def test_PUT(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
# fail to retrieve account info
test_status_map(
(503, 503, 503), # account_info fails on 503
404, missing_container=True)
# account fail after creation
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
404, 404, 404), # account_info fail
404, missing_container=True)
test_status_map(
(503, 503, 404, # account_info fails on 404
503, 503, 503, # PUT account
503, 503, 404), # account_info fail
404, missing_container=True)
# put fails
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
503, 503, 201), # put container fail
503, missing_container=True)
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True)
test_status_map(
(503, 404, 404, # account_info fails on 404
503, 201, 201, # PUT account
503, 200, # account_info success
503, 201, 201), # put container success
201, missing_container=True)
def test_PUT_autocreate_account_with_sysmeta(self):
# x-account-sysmeta headers in a container PUT request should be
# transferred to the account autocreate PUT request
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, headers=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {}, headers=headers)
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
self.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True,
headers=headers,
give_connect=callback)
self.assertEqual(10, len(calls))
for call in calls[3:6]:
self.assertEqual('/account', call['path'])
self.assertTrue(key in call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_POST(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.POST(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, missing_container=True)
def test_PUT_max_containers_per_account(self):
with save_globals():
self.app.max_containers_per_account = 12346
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
self.app.max_containers_per_account = 12345
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'container_new')
self.assert_status_map(controller.PUT, (200, 404, 404, 404), 403,
missing_container=True)
self.app.max_containers_per_account = 12345
self.app.max_containers_whitelist = ['account']
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
def test_PUT_max_container_name_length(self):
with save_globals():
limit = constraints.MAX_CONTAINER_NAME_LENGTH
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400,
missing_container=True)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT, (200, 201, 201, -1), 201,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 201, -1, -1), 503,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 503, 503, -1), 503,
missing_container=True)
def test_acc_missing_returns_404(self):
for meth in ('DELETE', 'PUT'):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = proxy_server.ContainerController(self.app,
'account',
'container')
if meth == 'PUT':
set_http_connect(200, 200, 200, 200, 200, 200,
missing_container=True)
else:
set_http_connect(200, 200, 200, 200)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
self.app.update_request(req)
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, raise_exc=True)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200, 200, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
def test_put_locking(self):
class MockMemcache(FakeMemcache):
def __init__(self, allow_lock=None):
self.allow_lock = allow_lock
super(MockMemcache, self).__init__()
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
if self.allow_lock:
yield True
else:
raise NotImplementedError
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.app.memcache = MockMemcache(allow_lock=True)
set_http_connect(200, 201, 201, 201,
missing_container=True)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 201)
def test_error_limiting(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
container_ring = controller.app.container_ring
controller.app.sort_nodes = lambda l: l
self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200,
missing_container=False)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD,
(200, 503, 503, 503), 503)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503,
missing_container=True)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 200)
self.assert_status_map(controller.DELETE, (200, 204, 204, 204),
404, raise_exc=True)
def test_DELETE(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 503), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 503), 503)
self.assert_status_map(controller.DELETE,
(200, 204, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 404), 503)
self.app.memcache = FakeMemcacheReturnsNone()
# 200: Account check, 404x3: Container check
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Container-Meta-TestHeader', 'TestValue'),
('X-Container-Meta-TestHeader', ''),
('X-Remove-Container-Meta-TestHeader', 'anything'),
('X-Container-Read', '.r:*'),
('X-Remove-Container-Read', 'anything'),
('X-Container-Write', 'anyone'),
('X-Remove-Container-Write', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
controller = \
proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201, give_connect=test_connect)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': method, 'swift_owner': True},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_POST_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_GET_no_content(self):
with save_globals():
set_http_connect(200, 204, 204, 204)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c')
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status_int, 204)
self.assertEqual(
res.environ['swift.container/a/c']['status'], 204)
self.assertEqual(res.content_length, 0)
self.assertTrue('transfer-encoding' not in res.headers)
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.environ['swift.container/a/c']['status'], 201)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_unauthorized_requests_when_account_not_found(self):
# verify unauthorized container requests always return response
# from swift.authorize
called = [0, 0]
def authorize(req):
called[0] += 1
return HTTPUnauthorized(request=req)
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE'):
# no delay_denial on method, expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([1, 0], called)
for method in ('HEAD', 'GET'):
# delay_denial on method, expect two calls to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([2, 1], called)
def test_authorized_requests_when_account_not_found(self):
# verify authorized container requests always return 404 when
# account not found
called = [0, 0]
def authorize(req):
called[0] += 1
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE', 'HEAD', 'GET'):
# expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(404, res.status_int)
self.assertEqual([1, 1], called)
def test_OPTIONS_get_info_drops_origin(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
count = [0]
def my_get_info(app, env, account, container=None,
ret_not_found=False, swift_source=None):
if count[0] > 11:
return {}
count[0] += 1
if not container:
return {'some': 'stuff'}
return proxy_base.was_get_info(
app, env, account, container, ret_not_found, swift_source)
proxy_base.was_get_info = proxy_base.get_info
with mock.patch.object(proxy_base, 'get_info', my_get_info):
proxy_base.get_info = my_get_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
controller.OPTIONS(req)
self.assertTrue(count[0] < 11)
def test_OPTIONS(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Headers':
'x-foo, x-bar, x-auth-token',
'Access-Control-Request-Method': 'GET'}
)
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
sortHeaderNames('x-foo, x-bar, x-auth-token'),
sortHeaderNames(resp.headers['access-control-allow-headers']))
def test_CORS_valid(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def containerGET(controller, req):
return Response(headers={
'X-Container-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(containerGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-container-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertTrue('access-control-expose-headers' in resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set(['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp',
'x-trans-id', 'x-container-meta-color'])
self.assertEqual(expected_exposed, exposed)
def _gather_x_account_headers(self, controller_call, req, *connect_args,
**kwargs):
seen_headers = []
to_capture = ('X-Account-Partition', 'X-Account-Host',
'X-Account-Device')
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in to_capture:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account HEAD, so throw away the
# first element
return sorted(seen_headers[1:],
key=lambda d: d['X-Account-Host'] or 'Z')
def test_PUT_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_PUT_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_DELETE_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_DELETE_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_PUT_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_DELETE_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_node_read_timeout_retry_to_container(self):
with save_globals():
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
self.app.node_timeout = 0.1
set_http_connect(200, 200, 200, body='abcdef', slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
def assert_status_map(self, method, statuses, expected, env_expected=None,
headers=None, **kwargs):
headers = headers or {}
with save_globals():
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a', {}, headers=headers)
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(res.environ['swift.account/a']['status'],
env_expected)
set_http_connect(*statuses)
req = Request.blank('/v1/a/', {})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(res.environ['swift.account/a']['status'],
env_expected)
def test_OPTIONS(self):
with save_globals():
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
# Test a CORS OPTIONS request (i.e. including Origin and
# Access-Control-Request-Method headers)
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank(
'/v1/account', {'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
def test_GET(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
# GET returns after the first successful call to an Account Server
self.assert_status_map(controller.GET, (200,), 200, 200)
self.assert_status_map(controller.GET, (503, 200), 200, 200)
self.assert_status_map(controller.GET, (503, 503, 200), 200, 200)
self.assert_status_map(controller.GET, (204,), 204, 204)
self.assert_status_map(controller.GET, (503, 204), 204, 204)
self.assert_status_map(controller.GET, (503, 503, 204), 204, 204)
self.assert_status_map(controller.GET, (404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 503, 204), 204, 204)
# If Account servers fail, if autocreate = False, return majority
# response
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
self.assert_status_map(controller.GET, (404, 404, 503), 404, 404)
self.assert_status_map(controller.GET, (404, 503, 503), 503)
self.app.memcache = FakeMemcacheReturnsNone()
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
def test_GET_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
# Repeat the test for autocreate = False and 404 by all
self.assert_status_map(controller.GET,
(404, 404, 404), 404)
self.assert_status_map(controller.GET,
(404, 503, 404), 404)
# When autocreate is True, if none of the nodes respond 2xx
# And quorum of the nodes responded 404,
# ALL nodes are asked to create the account
# If successful, the GET request is repeated.
controller.app.account_autocreate = True
self.assert_status_map(controller.GET,
(404, 404, 404), 204)
self.assert_status_map(controller.GET,
(404, 503, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.GET,
(500, 500, 400), 503)
def test_HEAD(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.HEAD, (200,), 200, 200)
self.assert_status_map(controller.HEAD, (503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (503, 503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (503, 204), 204, 204)
self.assert_status_map(controller.HEAD, (204, 503, 503), 204, 204)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (404, 404, 404), 404, 404)
self.assert_status_map(controller.HEAD, (404, 404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 404, 503), 404, 404)
self.assert_status_map(controller.HEAD, (404, 503, 503), 503)
self.assert_status_map(controller.HEAD, (404, 503, 204), 204, 204)
def test_HEAD_autocreate(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.HEAD,
(404, 404, 404), 404)
controller.app.account_autocreate = True
self.assert_status_map(controller.HEAD,
(404, 404, 404), 204)
self.assert_status_map(controller.HEAD,
(500, 404, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.HEAD,
(500, 500, 400), 503)
def test_POST_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201)
# account_info PUT account POST account
self.assert_status_map(
controller.POST,
(404, 404, 503, 201, 201, 503, 204, 204, 504), 204)
# what if create fails
self.assert_status_map(
controller.POST,
(404, 404, 404, 403, 403, 403, 400, 400, 400), 400)
def test_POST_autocreate_with_sysmeta(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201,
# POST , autocreate PUT, POST again
headers=headers,
give_connect=callback)
self.assertEqual(9, len(calls))
for call in calls:
self.assertTrue(key in call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_connection_refused(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1 # can't connect on this port
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_other_socket_error(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = -1 # invalid port number
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
res.body
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_PUT_max_account_name_length(self):
with save_globals():
self.app.allow_account_management = True
limit = constraints.MAX_ACCOUNT_NAME_LENGTH
controller = proxy_server.AccountController(self.app, '1' * limit)
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, '2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400)
def test_PUT_connect_exceptions(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, -1), 201)
self.assert_status_map(controller.PUT, (201, -1, -1), 503)
self.assert_status_map(controller.PUT, (503, 503, -1), 503)
def test_PUT_status(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, 202), 202)
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Account-Meta-TestHeader', 'TestValue'),
('X-Account-Meta-TestHeader', ''),
('X-Remove-Account-Meta-TestHeader', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
self.app.allow_account_management = True
controller = \
proxy_server.AccountController(self.app, 'a')
set_http_connect(201, 201, 201, give_connect=test_connect)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'a')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_DELETE(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_DELETE_with_query_string(self):
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a?whoops',
environ={'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 400)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 400)
test_status_map((201, 201, 500), 400)
test_status_map((201, 500, 500), 400)
test_status_map((204, 500, 404), 400)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountControllerFakeGetResponse(unittest.TestCase):
"""
Test all the faked-out GET responses for accounts that don't exist. They
have to match the responses for empty accounts that really exist.
"""
def setUp(self):
conf = {'account_autocreate': 'yes'}
self.app = proxy_server.Application(conf, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.app.memcache = FakeMemcacheReturnsNone()
def test_GET_autocreate_accept_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank(
'/v1/a', headers={'Accept': 'application/json'},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_format_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=json',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=json'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_accept_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "text/xml"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('text/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_format_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=xml',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=xml'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_accept_unknown(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "mystery/meat"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(406, resp.status_int)
def test_GET_autocreate_format_invalid_utf8(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=\xff\xfe',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=\xff\xfe'})
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_account_acl_header_access(self):
acl = {
'admin': ['AUTH_alice'],
'read-write': ['AUTH_bob'],
'read-only': ['AUTH_carol'],
}
prefix = get_sys_meta_prefix('account')
privileged_headers = {(prefix + 'core-access-control'): format_acl(
version=2, acl_dict=acl)}
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
# Mock account server will provide privileged information (ACLs)
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET'})
resp = app.handle_request(req)
# Not a swift_owner -- ACLs should NOT be in response
header = 'X-Account-Access-Control'
self.assertTrue(header not in resp.headers, '%r was in %r' % (
header, resp.headers))
# Same setup -- mock acct server will provide ACLs
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET',
'swift_owner': True})
resp = app.handle_request(req)
# For a swift_owner, the ACLs *should* be in response
self.assertTrue(header in resp.headers, '%r not in %r' % (
header, resp.headers))
def test_account_acls_through_delegation(self):
# Define a way to grab the requests sent out from the AccountController
# to the Account Server, and a way to inject responses we'd like the
# Account Server to return.
resps_to_send = []
@contextmanager
def patch_account_controller_method(verb):
old_method = getattr(proxy_server.AccountController, verb)
new_method = lambda self, req, *_, **__: resps_to_send.pop(0)
try:
setattr(proxy_server.AccountController, verb, new_method)
yield
finally:
setattr(proxy_server.AccountController, verb, old_method)
def make_test_request(http_method, swift_owner=True):
env = {
'REQUEST_METHOD': http_method,
'swift_owner': swift_owner,
}
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {} if http_method in ('GET', 'HEAD') else {
'x-account-access-control': format_acl(version=2, acl_dict=acl)
}
return Request.blank('/v1/a', environ=env, headers=headers)
# Our AccountController will invoke methods to communicate with the
# Account Server, and they will return responses like these:
def make_canned_response(http_method):
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {'x-account-sysmeta-core-access-control': format_acl(
version=2, acl_dict=acl)}
canned_resp = Response(headers=headers)
canned_resp.environ = {
'PATH_INFO': '/acct',
'REQUEST_METHOD': http_method,
}
resps_to_send.append(canned_resp)
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
app.allow_account_management = True
ext_header = 'x-account-access-control'
with patch_account_controller_method('GETorHEAD_base'):
# GET/HEAD requests should remap sysmeta headers from acct server
for verb in ('GET', 'HEAD'):
make_canned_response(verb)
req = make_test_request(verb)
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
# swift_owner = False: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
resp = app.handle_request(req)
h = resp.headers
self.assertEqual(None, h.get(ext_header))
# swift_owner unset: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
del req.environ['swift_owner']
resp = app.handle_request(req)
h = resp.headers
self.assertEqual(None, h.get(ext_header))
# Verify that PUT/POST requests remap sysmeta headers from acct server
with patch_account_controller_method('make_requests'):
make_canned_response('PUT')
req = make_test_request('PUT')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
make_canned_response('POST')
req = make_test_request('POST')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
class FakeObjectController(object):
def __init__(self):
self.app = self
self.logger = self
self.account_name = 'a'
self.container_name = 'c'
self.object_name = 'o'
self.trans_id = 'tx1'
self.object_ring = FakeRing()
self.node_timeout = 1
self.rate_limit_after_segment = 3
self.rate_limit_segments_per_sec = 2
self.GETorHEAD_base_args = []
def exception(self, *args):
self.exception_args = args
self.exception_info = sys.exc_info()
def GETorHEAD_base(self, *args):
self.GETorHEAD_base_args.append(args)
req = args[0]
path = args[4]
body = data = path[-1] * int(path[-1])
if req.range:
r = req.range.ranges_for_length(len(data))
if r:
(start, stop) = r[0]
body = data[start:stop]
resp = Response(app_iter=iter(body))
return resp
def iter_nodes(self, ring, partition):
for node in ring.get_part_nodes(partition):
yield node
for node in ring.get_more_nodes(partition):
yield node
def sort_nodes(self, nodes):
return nodes
def set_node_timing(self, node, timing):
return
class TestProxyObjectPerformance(unittest.TestCase):
def setUp(self):
# This is just a simple test that can be used to verify and debug the
# various data paths between the proxy server and the object
# server. Used as a play ground to debug buffer sizes for sockets.
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is transmitting in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
# Small, fast for testing
obj_len = 2 * 64 * 1024
# Use 1 GB or more for measurements
# obj_len = 2 * 512 * 1024 * 1024
self.path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.path, str(obj_len)))
fd.write('a' * obj_len)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
self.obj_len = obj_len
def test_GET_debug_large_file(self):
for i in range(10):
start = time.time()
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is reading in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
fd.write('GET %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.path)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
total = 0
while True:
buf = fd.read(100000)
if not buf:
break
total += len(buf)
self.assertEqual(total, self.obj_len)
end = time.time()
print("Run %02d took %07.03f" % (i, end - start))
@patch_policies([StoragePolicy(0, 'migrated', object_ring=FakeRing()),
StoragePolicy(1, 'ernie', True, object_ring=FakeRing()),
StoragePolicy(2, 'deprecated', is_deprecated=True,
object_ring=FakeRing()),
StoragePolicy(3, 'bert', object_ring=FakeRing())])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
si = utils.get_swift_info()['swift']
self.assertTrue('version' in si)
self.assertEqual(si['max_file_size'], constraints.MAX_FILE_SIZE)
self.assertEqual(si['max_meta_name_length'],
constraints.MAX_META_NAME_LENGTH)
self.assertEqual(si['max_meta_value_length'],
constraints.MAX_META_VALUE_LENGTH)
self.assertEqual(si['max_meta_count'], constraints.MAX_META_COUNT)
self.assertEqual(si['max_header_size'], constraints.MAX_HEADER_SIZE)
self.assertEqual(si['max_meta_overall_size'],
constraints.MAX_META_OVERALL_SIZE)
self.assertEqual(si['account_listing_limit'],
constraints.ACCOUNT_LISTING_LIMIT)
self.assertEqual(si['container_listing_limit'],
constraints.CONTAINER_LISTING_LIMIT)
self.assertEqual(si['max_account_name_length'],
constraints.MAX_ACCOUNT_NAME_LENGTH)
self.assertEqual(si['max_container_name_length'],
constraints.MAX_CONTAINER_NAME_LENGTH)
self.assertEqual(si['max_object_name_length'],
constraints.MAX_OBJECT_NAME_LENGTH)
self.assertTrue('strict_cors_mode' in si)
self.assertEqual(si['allow_account_management'], False)
self.assertEqual(si['account_autocreate'], False)
# This setting is by default excluded by disallowed_sections
self.assertEqual(si['valid_api_versions'],
constraints.VALID_API_VERSIONS)
# this next test is deliberately brittle in order to alert if
# other items are added to swift info
self.assertEqual(len(si), 18)
self.assertTrue('policies' in si)
sorted_pols = sorted(si['policies'], key=operator.itemgetter('name'))
self.assertEqual(len(sorted_pols), 3)
for policy in sorted_pols:
self.assertNotEquals(policy['name'], 'deprecated')
self.assertEqual(sorted_pols[0]['name'], 'bert')
self.assertEqual(sorted_pols[1]['name'], 'ernie')
self.assertEqual(sorted_pols[2]['name'], 'migrated')
class TestSocketObjectVersions(unittest.TestCase):
def setUp(self):
global _test_sockets
self.prolis = prolis = listen(('localhost', 0))
self._orig_prolis = _test_sockets[0]
allowed_headers = ', '.join([
'content-encoding',
'x-object-manifest',
'content-disposition',
'foo'
])
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers': allowed_headers}
prosrv = versioned_writes.VersionedWritesMiddleware(
proxy_logging.ProxyLoggingMiddleware(
_test_servers[0], conf,
logger=_test_servers[0].logger),
{})
self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger())
# replace global prosrv with one that's filtered with version
# middleware
self.sockets = list(_test_sockets)
self.sockets[0] = prolis
_test_sockets = tuple(self.sockets)
def tearDown(self):
self.coro.kill()
# put the global state back
global _test_sockets
self.sockets[0] = self._orig_prolis
_test_sockets = tuple(self.sockets)
def test_version_manifest(self, oc='versions', vc='vers', o='name'):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
pre = quote('%03x' % len(o))
osub = '%s/sub' % o
presub = quote('%03x' % len(osub))
osub = quote(osub)
presub = quote(presub)
oc = quote(oc)
vc = quote(vc)
def put_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: %s\r\n\r\n'
% (oc, vc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
headers = put_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def get_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# check that the header was set
headers, body = get_container()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('X-Versions-Location: %s' % vc, headers)
def put_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# make the container for the object versions
headers = put_version_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def put(version):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish%s'
'\r\n\r\n%05d\r\n' % (oc, o, version, version))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def get(container=oc, obj=o):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n'
'\r\n' % (container, obj))
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Create the versioned file
headers = put(0)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the object versions
for version in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
headers = put(version)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % version, headers)
self.assertNotIn('X-Object-Meta-Foo: barbaz', headers)
self.assertEqual(body, '%05d' % version)
def get_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Ensure we have the right number of versions saved
headers, body = get_version_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), versions_to_create - 1)
def delete():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r'
'\nConnection: close\r\nX-Storage-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('COPY /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nDestination: %s/copied_name\r\n'
'Content-Length: 0\r\n\r\n' % (oc, o, oc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# copy a version and make sure the version info is stripped
headers = copy()
exp = 'HTTP/1.1 2' # 2xx series response to the COPY
self.assertEqual(headers[:len(exp)], exp)
def get_copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/copied_name HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\n'
'X-Auth-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
headers, body = get_copy()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertEqual(body, '%05d' % version)
def post():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('POST /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nContent-Type: foo/bar\r\nContent-Length: 0\r\n'
'X-Object-Meta-Bar: foo\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# post and make sure it's updated
headers = post()
exp = 'HTTP/1.1 2' # 2xx series response to the POST
self.assertEqual(headers[:len(exp)], exp)
headers, body = get()
self.assertIn('Content-Type: foo/bar', headers)
self.assertIn('X-Object-Meta-Bar: foo', headers)
self.assertEqual(body, '%05d' % version)
# check container listing
headers, body = get_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object versions
for segment in range(versions_to_create - 1, 0, -1):
headers = delete()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % (segment - 1),
headers)
self.assertEqual(body, '%05d' % (segment - 1))
# Ensure we have the right number of versions saved
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r'
'\n' % (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), segment - 1)
# there is now one version left (in the manifest)
# Ensure we have no saved versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# delete the last version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure it's all gone
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
# make sure manifest files will be ignored
for _junk in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 0\r\n'
'Content-Type: text/jibberish0\r\n'
'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n'
% (oc, o, oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nhost: '
'localhost\r\nconnection: close\r\nx-auth-token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# DELETE v1/a/c/obj shouldn't delete v1/a/c/obj/sub versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00000\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00001\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub1\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub2\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, presub, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), 1)
# Check for when the versions target container doesn't exist
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: none\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the versioned file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00000\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create another version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00001\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%swhoops/foo HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx response
self.assertEqual(headers[:len(exp)], exp)
def test_version_manifest_utf8(self):
oc = '0_oc_non_ascii\xc2\xa3'
vc = '0_vc_non_ascii\xc2\xa3'
o = '0_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container(self):
oc = '1_oc_non_ascii\xc2\xa3'
vc = '1_vc_ascii'
o = '1_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container(self):
oc = '2_oc_ascii'
vc = '2_vc_non_ascii\xc2\xa3'
o = '2_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_containers(self):
oc = '3_oc_non_ascii\xc2\xa3'
vc = '3_vc_non_ascii\xc2\xa3'
o = '3_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_object(self):
oc = '4_oc_ascii'
vc = '4_vc_ascii'
o = '4_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container_utf_object(self):
oc = '5_oc_ascii'
vc = '5_vc_non_ascii\xc2\xa3'
o = '5_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container_utf_object(self):
oc = '6_oc_non_ascii\xc2\xa3'
vc = '6_vc_ascii'
o = '6_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
if __name__ == '__main__':
setup()
try:
unittest.main()
finally:
teardown()
|
dencaval/swift
|
test/unit/proxy/test_server.py
|
Python
|
apache-2.0
| 415,761
|
[
"MOOSE"
] |
e5f8850c845e4490d6baadd549c8d645517d46c6a37e7e085931121c0b85cdf1
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.optimizers.schedules.LearningRateSchedule")
class LearningRateSchedule(object):
"""A serializable learning rate decay schedule.
`LearningRateSchedule`s can be passed in as the learning rate of optimizers in
`tf.keras.optimizers`. They can be serialized and deserialized using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
"""
@abc.abstractmethod
def __call__(self, step):
raise NotImplementedError("Learning rate schedule must override __call__")
@abc.abstractmethod
def get_config(self):
raise NotImplementedError("Learning rate schedule must override get_config")
@classmethod
def from_config(cls, config):
"""Instantiates a `LearningRateSchedule` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `LearningRateSchedule` instance.
"""
return cls(**config)
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
"""
super(ExponentialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
return math_ops.multiply(
initial_learning_rate, math_ops.pow(decay_rate, p), name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
class PiecewiseConstantDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a piecewise constant decay schedule.
The function returns a 1-arg callable to compute the piecewise constant
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as the boundary tensors.
The output of the 1-arg function that takes the `step`
is `values[0]` when `step <= boundaries[0]`,
`values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ...,
and values[-1] when `step > boundaries[-1]`.
"""
def __init__(
self,
boundaries,
values,
name=None):
"""Piecewise constant from boundaries and interval values.
Args:
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as the
optimizer step.
values: A list of `Tensor`s or `float`s or `int`s that specifies the
values for the intervals defined by `boundaries`. It should have one
more element than `boundaries`, and all elements should have the same
type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Raises:
ValueError: if the number of elements in the lists do not match.
"""
super(PiecewiseConstantDecay, self).__init__()
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of values")
self.boundaries = boundaries
self.values = values
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PiecewiseConstant"):
boundaries = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.boundaries))
values = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.values))
x_recomp = ops.convert_to_tensor_v2_with_dispatch(step)
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We cast the boundaries to have the same type as the step
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
pred_fn_pairs = []
pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x_recomp > low) & (x_recomp <= high)
pred_fn_pairs.append((pred, lambda v=v: v))
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
def get_config(self):
return {
"boundaries": self.boundaries,
"values": self.values,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PolynomialDecay")
class PolynomialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a polynomial decay schedule.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This schedule applies a polynomial decay function to an optimizer step,
given a provided `initial_learning_rate`, to reach an `end_learning_rate`
in the given `decay_steps`.
It requires a `step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training
step.
The schedule is a 1-arg callable that produces a decayed learning rate
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `step`.
```python
def decayed_learning_rate(step):
decay_steps = decay_steps * ceil(step / decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using
sqrt (i.e. power=0.5):
```python
...
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
"""
super(PolynomialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PolynomialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
power = math_ops.cast(self.power, dtype)
global_step_recomp = math_ops.cast(step, dtype)
decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / self.decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp,
decay_steps_recomp)
p = math_ops.divide(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(initial_learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"end_learning_rate": self.end_learning_rate,
"power": self.power,
"cycle": self.cycle,
"name": self.name
}
@keras_export("keras.optimizers.schedules.InverseTimeDecay")
class InverseTimeDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an inverse time decay schedule.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies the inverse decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * step / decay_step)
```
or, if `staircase` is `True`, as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a Keras model when decaying 1/t with a rate of 0.5:
```python
...
initial_learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate, decay_steps, decay_rate)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
"""
super(InverseTimeDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "InverseTimeDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), dtype)
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
return math_ops.divide(initial_learning_rate, denom, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.experimental.CosineDecay")
class CosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a cosine decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.keras.experimental.CosineDecay(
initial_learning_rate, decay_steps)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
alpha=0.0,
name=None):
"""Applies cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
"""
super(CosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = 0.5 * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.experimental.CosineDecayRestarts")
class CosineDecayRestarts(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule with restarts.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
"""
super(CosineDecayRestarts, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "SGDRDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)
alpha = math_ops.cast(self.alpha, dtype)
t_mul = math_ops.cast(self._t_mul, dtype)
m_mul = math_ops.cast(self._m_mul, dtype)
global_step_recomp = math_ops.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = control_flow_ops.cond(
math_ops.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True))
m_fac = m_mul**i_restart
cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(initial_learning_rate, decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"first_decay_steps": self.first_decay_steps,
"t_mul": self._t_mul,
"m_mul": self._m_mul,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.experimental.LinearCosineDecay")
class LinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.LinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
"""
super(LinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "LinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
return math_ops.multiply(initial_learning_rate, linear_cosine_decayed,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.experimental.NoisyLinearCosineDecay")
class NoisyLinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a noisy linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a noisy linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
return initial_learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
"""
super(NoisyLinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.initial_variance = initial_variance
self.variance_decay = variance_decay
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "NoisyLinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
initial_variance = math_ops.cast(self.initial_variance, dtype)
variance_decay = math_ops.cast(self.variance_decay, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
variance = initial_variance / (
math_ops.pow(1.0 + global_step_recomp, variance_decay))
std = math_ops.sqrt(variance)
noisy_linear_decayed = (
linear_decayed + random_ops.random_normal(
linear_decayed.shape, stddev=std))
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
noisy_linear_cosine_decayed = (
(alpha + noisy_linear_decayed) * cosine_decayed + beta)
return math_ops.multiply(
initial_learning_rate, noisy_linear_cosine_decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"initial_variance": self.initial_variance,
"variance_decay": self.variance_decay,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.optimizers.schedules.serialize")
def serialize(learning_rate_schedule):
return generic_utils.serialize_keras_object(learning_rate_schedule)
@keras_export("keras.optimizers.schedules.deserialize")
def deserialize(config, custom_objects=None):
return generic_utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="decay")
|
karllessard/tensorflow
|
tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py
|
Python
|
apache-2.0
| 38,455
|
[
"Gaussian"
] |
99fff8c8f1d9d41426f30edb197f7272edda42626493d5c79f193c7ddb25a549
|
import math
import cStringIO
import os
import copy as cp
from itertools import chain, ifilter, ifilterfalse
from functools import partial
import re
import operator as op
from string import Template as T
import logging
from lib.typecheck import *
import lib.const as C
from lib.enum import enum
import util
import sample
from meta import methods, classes, class_lookup
from meta.template import Template
from meta.clazz import Clazz, find_fld, find_mtds_by_name, find_mtds_by_sig, find_base
from meta.method import Method, sig_match
from meta.field import Field
from meta.statement import Statement
import meta.statement as st
from meta.expression import Expression, typ_of_e
import meta.expression as exp
# constants regarding sketch
C.SK = enum(z=u"bit", self=u"self")
# global constants that should be placed at every sketch file
_const = u''
# among class declarations in the template
# exclude subclasses so that only the base class remains
# (will make a virtual struct representing all the classes in that hierarchy)
@takes(list_of(Clazz))
@returns(list_of(Clazz))
def rm_subs(clss):
# { cname: Clazz(cname, ...), ... }
decls = { cls.name: cls for cls in clss }
# remove subclasses
for cname in decls.keys():
if util.is_collection(cname): continue
cls = class_lookup(cname)
if not cls.is_class: continue
if cls.is_aux: continue # virtual relations; don't remove sub classes
for sub in cls.subs:
if sub.name in decls:
logging.debug("{} < {}".format(sub.name, cname))
del decls[sub.name]
for sup in util.ffilter([cls.sup]):
if sup in decls and cname in decls:
logging.debug("{} < {}".format(cname, sup))
del decls[cname]
return decls.values()
# convert the given type name into a newer one
_ty = {} # { tname : new_tname }
@takes(dict_of(unicode, unicode))
@returns(nothing)
def add_ty_map(m):
global _ty
for key in m: _ty[key] = m[key]
@takes(unicode)
@returns(unicode)
def trans_ty(tname):
_tname = util.sanitize_ty(tname.strip())
array_regex = r"([^ \[\]]+)((\[\])+)"
m = re.match(array_regex, _tname)
global _ty
r_ty = _tname
# to avoid primitive types that Sketch doesn't support
if _tname == C.J.z: r_ty = C.SK.z
elif _tname in [C.J.b, C.J.s, C.J.j]: r_ty = C.J.i
# unboxing primitive Classes, e.g., Character -> char
elif _tname in C.autoboxing: r_ty = util.unboxing(_tname)
# TODO: parameterize len?
elif _tname in [C.J.c+"[]"]: r_ty = u"{}[51]".format(C.J.c)
elif _tname in [C.J.B, C.J.S, C.J.J, C.J.I]: r_ty = C.J.i
# array bounds
elif m:
r_ty = trans_ty(m.group(1)) + \
"[{}]".format(len(methods())) * len(re.findall(r"\[\]", m.group(2)))
# use memoized type conversion
elif _tname in _ty: r_ty = _ty[_tname]
# convert Java collections into an appropriate struct name
# Map<K,V> / List<T> / ... -> Map_K_V / List_T / ...
elif util.is_collection(_tname):
r_ty = '_'.join(util.of_collection(_tname))
logging.debug("{} => {}".format(_tname, r_ty))
_ty[_tname] = r_ty
return r_ty
# check whether the given type is replaced due to class hierarchy
@takes(unicode)
@returns(bool)
def is_replaced(tname):
return tname != trans_ty(tname)
# sanitize method name
# e.g., JComboBox(E[]) => JComboBox_JComboBox_E[] => JComboBox_JComboBox_Es
@takes(unicode)
@returns(unicode)
def sanitize_mname(mname):
return mname.replace("[]",'s')
# convert the given method name into a new one
# considering parameterized types (e.g., collections) and inheritances
_mtds = {} # { cname_mname_... : new_mname }
@takes(unicode, unicode, list_of(unicode))
@returns(unicode)
def trans_mname(cname, mname, arg_typs=[]):
global _mtds
r_mtd = mname
mid = u'_'.join([cname, mname] + arg_typs)
# use memoized method name conversion
if mid in _mtds:
return _mtds[mid]
# methods of Java collections
elif util.is_collection(cname):
_arg_typs = map(trans_ty, arg_typs)
r_mtd = u'_'.join([mname, trans_ty(cname)] + _arg_typs)
else:
if is_replaced(cname):
tr_name = trans_ty(cname)
cls = class_lookup(tr_name)
if cls and cls.is_aux: cname = tr_name
mtds = find_mtds_by_sig(cname, mname, arg_typs)
if mtds and 1 == len(mtds):
r_mtd = unicode(repr(mtds[0]))
else: # ambiguous or not found
r_mtd = '_'.join([mname, util.sanitize_ty(cname)])
r_mtd = sanitize_mname(r_mtd)
_mtds[mid] = r_mtd
return r_mtd
# basic Java libraries
@takes(nothing)
@returns(unicode)
def trans_lib():
return u''
# to avoid duplicate structs for collections
_collections = set([])
# Java collections -> C-style struct (along with basic functions)
@takes(Clazz)
@returns(unicode)
def col_to_struct(cls):
buf = cStringIO.StringIO()
cname = cls.name
sname = trans_ty(cname)
global _collections
if sname in _collections:
logging.debug("collection: {} (duplicated)".format(cname))
return u''
else:
_collections.add(sname)
logging.debug("collection: " + cname)
buf.write("struct ${sname} {\n int idx;\n")
if C.J.MAP in cname:
_, k, v = util.of_collection(cname)
k = trans_ty(k)
v = trans_ty(v)
# Map<K,V> -> struct Map_K_V { int idx; K[S] key; V[S] val; }
buf.write(" ${k}[S] key;\n ${v}[S] val;\n}\n")
# Map<K,V>.containsKey -> containsKey_Map_K_V
buf.write("""
bit {} (${{sname}} map, ${{k}} k) {{
int i;
for (i = 0; map.val[i] != null && i < S; i++) {{
if (map.key[i] == k) return 1;
}}
return 0;
}}
""".format(trans_mname(cname, u"containsKey", [k])))
# Map<K,V>.get -> get_Map_K_V
buf.write("""
${{v}} {} (${{sname}} map, ${{k}} k) {{
int i;
for (i = 0; map.val[i] != null && i < S; i++) {{
if (map.key[i] == k) return map.val[i];
}}
return null;
}}
""".format(trans_mname(cname, u"get", [k])))
# Map<K,V>.put -> put_Map_K_V
buf.write("""
void {} (${{sname}} map, ${{k}} k, ${{v}} v) {{
map.key[map.idx] = k;
map.val[map.idx] = v;
map.idx = (map.idx + 1) % S;
}}
""".format(trans_mname(cname, u"put", [k, v])))
# Map<K,V>.clear -> clear_Map_K_V
if util.is_class_name(k): default_k = "null"
else: default_k = "0"
buf.write("""
void {} (${{sname}} map) {{
map.idx = 0;
for (int i = 0; i < S; i++) {{
map.key[i] = {};
map.val[i] = null;
}}
}}
""".format(trans_mname(cname, u"clear", []), default_k))
else:
collection, t = util.of_collection(cname)
t = trans_ty(t)
if C.J.QUE in collection: buf.write(" int head;\n")
# Collection<T> -> struct Collection_T { int idx; T[S] elts; }
buf.write(" ${t}[S] elts;\n}\n")
if C.J.STK in collection:
# Stack<T>.peek -> peek_Stack_T
buf.write("""
${{t}} {} (${{sname}} stk) {{
if (stk.idx == 0) return null;
${{t}} top = stk.elts[stk.idx - 1];
return top;
}}
""".format(trans_mname(cname, u"peek", [])))
# Stack<T>.push -> push_Stack_T
buf.write("""
${{t}} {} (${{sname}} stk, ${{t}} elt) {{
stk.elts[stk.idx] = elt;
stk.idx = (stk.idx + 1) % S;
return elt;
}}
""".format(trans_mname(cname, u"push", [t])))
# Stack<T>.pop -> pop_Stack_T
buf.write("""
${{t}} {} (${{sname}} stk) {{
if (stk.idx == 0) return null;
stk.idx = stk.idx - 1;
${{t}} top = stk.elts[stk.idx];
stk.elts[stk.idx] = null;
return top;
}}
""".format(trans_mname(cname, u"pop", [])))
elif C.J.QUE in collection:
# Queue<T>.add -> add_Queue_T
buf.write("""
bit {} (${{sname}} que, ${{t}} elt) {{
que.elts[que.idx] = elt;
que.idx = (que.idx + 1) % S;
return true;
}}
""".format(trans_mname(cname, u"add", [t])))
# Queue<T>.remove -> remove_Queue_T
buf.write("""
${{t}} {} (${{sname}} que) {{
if (que.head == que.idx) return null;
${{t}} top = que.elts[que.head];
que.elts[que.head] = null;
que.head = (que.head + 1) % S;
return top;
}}
""".format(trans_mname(cname, u"remove", [])))
# Queue<T>.isEmpty -> isEmpty_Queue_T
buf.write("""
bit {} (${{sname}} que) {{
return que.head == que.idx;
}}
""".format(trans_mname(cname, u"isEmpty", [])))
elif C.J.LST in collection:
# List<T>.add -> add_List_T
buf.write("""
bit {} (${{sname}} lst, ${{t}} elt) {{
lst.elts[lst.idx] = elt;
lst.idx = (lst.idx + 1) % S;
return true;
}}
""".format(trans_mname(cname, u"add", [t])))
# List<T>.remove(T) -> remove_List_T_T
buf.write("""
bit {} (${{sname}} lst, ${{t}} elt) {{
int i;
for (i = 0; lst.elts[i] != null && i < S; i++) {{
if (lst.elts[i] == elt) {{
lst.elts[i] = null;
int j;
for (j = i + 1; lst.elts[j] != null && j < lst.idx; j++) {{
lst.elts[j-1] = lst.elts[j];
}}
lst.idx = (lst.idx - 1) % S;
return true;
}}
}}
return false;
}}
""".format(trans_mname(cname, u"remove", [t])))
# List<T>.remove(int) -> remove_List_T_int
buf.write("""
${{t}} {} (${{sname}} lst, int index) {{
${{t}} res = null;
if (0 <= index && index < lst.idx) {{
res = lst.elts[index];
lst.elts[index] = null;
int i;
for (i = index + 1; lst.elts[i] != null && i < lst.idx; i++) {{
lst.elts[i-1] = lst.elts[i];
}}
lst.idx = (lst.idx - 1) % S;
}}
return res;
}}
""".format(trans_mname(cname, u"remove", [C.J.i])))
# List<T>.get -> get_List_T
buf.write("""
${{t}} {} (${{sname}} lst, int index) {{
${{t}} res = null;
if (0 <= index && index < lst.idx) {{
res = lst.elts[index];
}}
return res;
}}
""".format(trans_mname(cname, u"get", [C.J.i])))
# List<T>.isEmpty -> isEmpty_List_T
buf.write("""
bit {} (${{sname}} lst) {{
return lst.idx == 0;
}}
""".format(trans_mname(cname, u"isEmpty", [])))
return T(buf.getvalue()).safe_substitute(locals())
_flds = {} # { cname.fname : new_fname }
_s_flds = {} # { cname.fname : accessor }
# from the given base class,
# generate a virtual struct that encompasses all the class in the hierarchy
@takes(Clazz)
@returns(Clazz)
def to_v_struct(cls):
cls_v = Clazz(name=cls.name)
fld_ty = Field(clazz=cls_v, typ=C.J.i, name=u"__cid")
cls_v.flds.append(fld_ty)
global _ty, _flds, _s_flds
@takes(dict_of(unicode, Field), Clazz)
@returns(nothing)
def per_cls(sup_flds, cls):
aux_name = None
# if this class is suppose to be replaced (due to pattern rewriting)
# apply that replacement first, and then replace that aux type as well
if not cls.is_aux and cls.name in _ty:
aux_name = _ty[cls.name]
logging.debug("{} => {}".format(aux_name, cls_v.name))
# check that aux type is already involved in this family
if aux_name not in _ty: _ty[aux_name] = cls_v.name
# keep mappings from original subclasses to the representative
# so that subclasses can refer to the representative
# e.g., for C < B < A, { B : A, C : A }
cname = util.sanitize_ty(cls.name)
if cname != cls_v.name: # exclude the root of this family
logging.debug("{} => {}".format(cname, cls_v.name))
_ty[cname] = cls_v.name
if cls.is_inner: # to handle inner class w/ outer class name
logging.debug("{} => {}".format(repr(cls), cls_v.name))
_ty[unicode(repr(cls))] = cls_v.name
# if this class implements an interface which has constants,
# then copy those constants
for itf in cls.itfs:
cls_i = class_lookup(itf)
if not cls_i or not cls_i.flds: continue
for fld in cls_i.flds:
sup_flds[fld.name] = fld
# also, keep mappings from original member fields to newer ones
# so that usage of old fields can be replaced accordingly
# e.g., for A.f1 and B.f2, { A.f1 : f1_A, B.f1 : f1_A, B.f2 : f2_B }
for sup_fld in sup_flds.keys():
fld = sup_flds[sup_fld]
fname = unicode(repr(fld))
fid = '.'.join([cname, sup_fld])
logging.debug("{} => {}".format(fid, fname))
if fld.is_static: _s_flds[fid] = fname
else: _flds[fid] = fname # { ..., B.f1 : f1_A }
cur_flds = cp.deepcopy(sup_flds) # { f1 : f1_A }
@takes(Field)
@returns(nothing)
def cp_fld(fld):
cur_flds[fld.name] = fld # { ..., f2 : f2_B }
fname = unicode(repr(fld))
fld_v = cp.deepcopy(fld)
fld_v.clazz = cls_v
fld_v.name = fname
cls_v.flds.append(fld_v)
def upd_flds(cname):
fid = '.'.join([cname, fld.name])
# if A.f1 exists and B redefines f1, then B.f1 : f1_A
# except for enum, which can (re)define its own fields
# e.g., SwingConstands.LEADING vs. GroupLayout.Alignment.LEADING
if not cls.is_enum and (fid in _s_flds or fid in _flds): return
logging.debug("{} => {}".format(fid, fname))
if fld.is_static: _s_flds[fid] = fname
else: _flds[fid] = fname # { ..., B.f2 : f2_B }
upd_flds(cname)
if aux_name: upd_flds(aux_name)
map(cp_fld, cls.flds)
# subclass relations of aux types are virtual, so do not visit further
if not cls.is_aux:
map(partial(per_cls, cur_flds), cls.subs)
per_cls({}, cls)
return cls_v
@takes(Field)
@returns(str)
def trans_fld(fld):
buf = cStringIO.StringIO()
buf.write(' '.join([trans_ty(fld.typ), fld.name]))
if fld.is_static and fld.init and \
not fld.init.has_call and not fld.init.has_str and not fld.is_aliasing:
buf.write(" = " + trans_e(None, fld.init))
buf.write(';')
return buf.getvalue()
# Java class (along with subclasses) -> C-style struct
@takes(Clazz)
@returns(str)
def to_struct(cls):
# make mappings from static fields to corresponding accessors
def gen_s_flds_accessors(cls):
s_flds = filter(op.attrgetter("is_static"), cls.flds)
global _s_flds
for fld in ifilterfalse(op.attrgetter("is_private"), s_flds):
cname = fld.clazz.name
fid = '.'.join([cname, fld.name])
fname = unicode(repr(fld))
logging.debug("{} => {}".format(fid, fname))
_s_flds[fid] = fname
cname = util.sanitize_ty(cls.name)
global _ty
# if this is an interface, merge this into another family of classes
# as long as classes that implement this interface are in the same family
if cls.is_itf:
# interface may have static constants
gen_s_flds_accessors(cls)
subss = util.flatten_classes(cls.subs, "subs")
bases = util.rm_dup(map(lambda sub: find_base(sub), subss))
# filter out interfaces that extend other interfaces, e.g., Action
base_clss, _ = util.partition(op.attrgetter("is_class"), bases)
if not base_clss:
logging.debug("no implementer of {}".format(cname))
elif len(base_clss) > 1:
logging.debug("ambiguous inheritance of {}: {}".format(cname, base_clss))
else: # len(base_clss) == 1
base = base_clss[0]
base_name = base.name
logging.debug("{} => {}".format(cname, base_name))
_ty[cname] = base_name
if cls.is_inner: # to handle inner interface w/ outer class name
logging.debug("{} => {}".format(repr(cls), base_name))
_ty[unicode(repr(cls))] = base_name
return ''
# if this is the base class having subclasses,
# make a virtual struct first
if cls.subs and not cls.is_aux:
cls = to_v_struct(cls)
cname = cls.name
# cls can be modified above, thus generate static fields accessors here
gen_s_flds_accessors(cls)
# for unique class numbering, add an identity mapping
if cname not in _ty: _ty[cname] = cname
buf = cStringIO.StringIO()
buf.write("struct " + cname + " {\n int hash;\n")
# to avoid static fields, which will be bound to a class-representing package
_, i_flds = util.partition(op.attrgetter("is_static"), cls.flds)
buf.write('\n'.join(map(trans_fld, i_flds)))
if len(i_flds) > 0: buf.write('\n')
buf.write("}\n")
return buf.getvalue()
# convert the given field name into a newer one
# only if the field belongs to a virtual representative struct
@takes(unicode, unicode, optional(bool))
@returns(unicode)
def trans_fname(cname, fname, is_static=False):
global _flds, _s_flds
r_fld = fname
fid = '.'.join([cname, fname])
if is_static:
if fid in _s_flds: r_fld = _s_flds[fid]
else:
if fid in _flds: r_fld = _flds[fid]
return r_fld
# collect method/field declarations in the given class and its inner classes
@takes(Clazz)
@returns(list_of((Method, Field)))
def collect_decls(cls, attr):
clss = util.flatten_classes([cls], "inners")
declss = map(op.attrgetter(attr), clss)
return util.flatten(declss)
# TODO: no longer used?
# translate class <init> into sketch's initializer with named parameters
@takes(unicode, list_of(unicode), list_of(unicode))
@returns(str)
def trans_init(cls_name, arg_typs, args):
buf = cStringIO.StringIO()
cls = class_lookup(cls_name)
if util.is_collection(cls_name) or not cls:
buf.write(trans_ty(cls_name) + "()")
elif is_replaced(cls_name):
buf.write(trans_ty(cls_name) + "(hash=nonce())")
else:
add_on = []
if args:
# NOTE: assume the order of arguments is same as that of fields
# NOTE: for missing fields, just call default constructors
# TODO: use template.sig_match
kwargs = zip(cls.flds, args)
if kwargs: assigned, _ = zip(*kwargs)
else: assigned = []
not_assigned = [fld for fld in cls.flds if fld not in assigned]
if not_assigned:
def default_init(fld):
if util.is_class_name(fld.typ):
return C.J.NEW + ' ' + trans_init(fld.typ, [], [])
else: return '0'
add_on = map(default_init, not_assigned)
# else: # means, default constructor
flds = ["hash"] + map(op.attrgetter("name"), cls.flds)
vals = ["nonce()"] + args + add_on
kwargs = map(lambda (f, v): "{}={}".format(f, v), zip(flds, vals))
buf.write('_'.join([cls_name] + arg_typs))
buf.write('(' + ", ".join(kwargs) + ')')
return buf.getvalue()
# sanitize id by removing package name
# e.g., javax.swing.SwingUtilities.invokeLater -> SwingUtilities.invokeLater
@takes(unicode)
@returns(unicode)
def sanitize_id(dot_id):
pkg, cls, mtd = util.explode_mname(dot_id)
if cls and util.is_class_name(cls) and class_lookup(cls):
clazz = class_lookup(cls)
if clazz.pkg and pkg and clazz.pkg != pkg: # to avoid u'' != None
raise Exception("wrong package", pkg, clazz.pkg)
return '.'.join([cls, mtd])
return dot_id
# need to check log conformity except for calls inside the platform
# i.e., client -> client, platform -> client or vice versa
# also discard super calls towards the platform, e.g.,
# class MyActivity extends Activity {
# ... onCreate(...) { super.onCreate(...); ... }
# }
@takes(Method, Method)
@returns(bool)
def check_logging(caller, callee):
return (caller.clazz.client or callee.clazz.client) and \
not caller.is_supercall(callee)
@takes(optional(Method), Expression)
@returns(str)
def trans_e(mtd, e):
curried = partial(trans_e, mtd)
buf = cStringIO.StringIO()
if e.kind == C.E.ANNO:
anno = e.anno
if anno.name == C.A.NEW: pass # TODO
elif anno.name == C.A.OBJ:
buf.write("retrieve_{}@log({})".format(util.sanitize_ty(anno.typ), anno.idx))
# @Compare(exps) => {| exps[0] (< | <= | == | != | >= | >) exps[1] |}
# @CompareString(exps) => exps[0].eqauls(exps[1])
elif anno.name in [C.A.CMP, C.A.CMP_STR]:
le = curried(anno.exps[0])
re = curried(anno.exps[1])
if anno.name == C.A.CMP:
buf.write("{| " + le + " (< | <= | == | != | >= | >) " + re + " |}")
else:
buf.write("{}({},{})".format(trans_mname(C.J.STR, u"equals"), le, re))
elif e.kind == C.E.GEN:
if e.es:
buf.write("{| ")
buf.write(" | ".join(map(curried, e.es)))
buf.write(" |}")
else:
buf.write(C.T.HOLE)
elif e.kind == C.E.ID:
if hasattr(e, "ty"): buf.write(trans_ty(e.ty) + ' ')
fld = None
if mtd and e.id not in mtd.param_vars:
fld = find_fld(mtd.clazz.name, e.id)
if fld: # fname -> self.new_fname (unless the field is static)
new_fname = trans_fname(fld.clazz.name, e.id, fld.is_static)
if fld.is_static:
# access to the static field inside the same class
if fld.clazz.name == mtd.clazz.name: buf.write(e.id)
# o.w., e.g., static constant in an interface, call the accessor
else: buf.write(new_fname + "()")
else: buf.write('.'.join([C.SK.self, new_fname]))
elif e.id in [C.J.THIS, C.J.SUP]: buf.write(C.SK.self)
elif util.is_str(e.id): # constant string, such as "Hello, World"
str_init = trans_mname(C.J.STR, C.J.STR, [u"char[]", C.J.i, C.J.i])
s_hash = hash(e.id) % 256 # hash string value itself
buf.write("{}(new Object(hash={}), {}, 0, {})".format(str_init, s_hash, e.id, len(e.id)))
else: buf.write(e.id)
elif e.kind == C.E.UOP:
buf.write(' '.join([e.op, curried(e.e)]))
elif e.kind == C.E.BOP:
buf.write(' '.join([curried(e.le), e.op, curried(e.re)]))
elif e.kind == C.E.DOT:
# with package names, e.g., javax.swing.SwingUtilities
if util.is_class_name(e.re.id) and class_lookup(e.re.id):
buf.write(curried(e.re))
elif e.re.id == C.J.THIS: # ClassName.this
buf.write(C.SK.self)
else:
rcv_ty = typ_of_e(mtd, e.le)
fld = find_fld(rcv_ty, e.re.id)
new_fname = trans_fname(rcv_ty, e.re.id, fld.is_static)
if fld.is_static:
# access to the static field inside the same class
if mtd and rcv_ty == mtd.clazz.name: buf.write(e.re.id)
# o.w., e.g., static constant in an interface, call the accessor
else: buf.write(new_fname + "()")
else: buf.write('.'.join([curried(e.le), new_fname]))
elif e.kind == C.E.IDX:
buf.write(curried(e.e) + '[' + curried(e.idx) + ']')
elif e.kind == C.E.NEW:
if e.e.kind == C.E.CALL:
ty = typ_of_e(mtd, e.e.f)
cls = class_lookup(ty)
if cls and cls.has_init:
arg_typs = map(partial(typ_of_e, mtd), e.e.a)
mname = trans_mname(cls.name, cls.name, arg_typs)
obj = "alloc@log({})".format(cls.id)
args = [obj] + map(unicode, map(curried, e.e.a))
buf.write("{}({})".format(mname, ", ".join(args)))
else: # collection or Object
buf.write(C.J.NEW + ' ' + trans_ty(ty) + "()")
else: # o.w., array initialization, e.g., new int[] { ... }
buf.write(str(e.init))
elif e.kind == C.E.CALL:
arg_typs = map(partial(typ_of_e, mtd), e.a)
def trans_call(callee, rcv_ty, rcv):
if callee.is_static: rcv = None
logging = None
if not util.is_collection(callee.clazz.name):
logging = str(check_logging(mtd, callee)).lower()
args = util.rm_none([rcv] + map(curried, e.a) + [logging])
mid = trans_mname(rcv_ty, callee.name, arg_typs)
return u"{}({})".format(mid, ", ".join(args))
def dynamic_dispatch(rcv_ty, rcv, acc, callee):
_dispatched = trans_call(callee, callee.clazz.name, rcv)
_guarded = "{}.__cid == {} ? {}".format(rcv, callee.clazz.id, _dispatched)
return "({} : {})".format(_guarded, acc)
if e.f.kind == C.E.DOT: # rcv.mid
rcv_ty = typ_of_e(mtd, e.f.le)
rcv = curried(e.f.le)
mname = e.f.re.id
mtd_callees = find_mtds_by_sig(rcv_ty, mname, arg_typs)
if mtd_callees and 1 < len(mtd_callees): # needs dynamic dispatch
curried_dispatch = partial(dynamic_dispatch, rcv_ty, rcv)
# TODO: use least upper bound?
default_v = util.default_value(mtd_callees[0].typ)
buf.write(reduce(curried_dispatch, mtd_callees, default_v))
elif mtd_callees and 1 == len(mtd_callees):
mtd_callee = mtd_callees[0]
buf.write(trans_call(mtd_callee, rcv_ty, rcv))
else: # unresolved, maybe library method
mid = trans_mname(rcv_ty, mname, arg_typs)
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
else: # mid
mname = e.f.id
# pre-defined meta information or Sketch primitive functions
if mname in C.typ_arrays + [u"minimize"]:
mid = mname
rcv = None
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
elif mname == C.J.SUP and mtd.is_init: # super(...) inside <init>
sup = class_lookup(mtd.clazz.sup)
mid = trans_mname(sup.name, sup.name, arg_typs)
rcv = C.SK.self
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
else: # member methods
mtd_callees = find_mtds_by_sig(mtd.clazz.name, mname, arg_typs)
if mtd_callees and 1 < len(mtd_callees): # needs dynamic dispatch
curried_dispatch = partial(dynamic_dispatch, mtd.clazz.name, C.SK.self)
# TODO: use least upper bound?
default_v = util.default_value(mtd_callees[0].typ)
buf.write(reduce(curried_dispatch, mtd_callees, default_v))
elif mtd_callees and 1 == len(mtd_callees):
mtd_callee = mtd_callees[0]
buf.write(trans_call(mtd_callee, mtd.clazz.name, C.SK.self))
else: # unresolved, maybe library method
mid = trans_mname(mtd.clazz.name, mname, arg_typs)
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
elif e.kind == C.E.CAST:
# since a family of classes is merged, simply ignore the casting
buf.write(curried(e.e))
elif e.kind == C.E.INS_OF:
ty = typ_of_e(mtd, e.ty)
cls = class_lookup(ty)
if cls:
buf.write(curried(e.e) + ".__cid == " + str(cls.id))
else:
logging.debug("unknown type: {}".format(ty))
buf.write("0")
else: buf.write(str(e))
return buf.getvalue()
@takes(Method, Statement)
@returns(str)
def trans_s(mtd, s):
curried_e = partial(trans_e, mtd)
curried_s = partial(trans_s, mtd)
buf = cStringIO.StringIO()
if s.kind == C.S.IF:
e = curried_e(s.e)
t = '\n'.join(map(curried_s, s.t))
f = '\n'.join(map(curried_s, s.f))
buf.write("if (" + e + ") {\n" + t + "\n}")
if f: buf.write("\nelse {\n" + f + "\n}")
elif s.kind == C.S.WHILE:
e = curried_e(s.e)
b = '\n'.join(map(curried_s, s.b))
buf.write("while (" + e + ") {\n" + b + "\n}")
elif s.kind == C.S.REPEAT:
e = curried_e(s.e)
b = '\n'.join(map(curried_s, s.b))
if e == "??": buf.write("minrepeat {\n" + b + "\n}")
else: buf.write("repeat (" + e + ") {\n" + b + "\n}")
elif s.kind == C.S.MINREPEAT:
b = '\n'.join(map(curried_s, s.b))
buf.write("minrepeat {\n" + b + "\n}")
elif s.kind == C.S.FOR:
# assume "for" is used for List<T> and LinkedList<T> only
col = mtd.vars[s.init.id]
if not util.is_collection(col) or \
util.of_collection(col)[0] not in [C.J.LST, C.J.LNK]:
raise Exception("not iterable type", col)
# if this is about observers, let sketch choose iteration direction
is_obs = hasattr(class_lookup(util.of_collection(col)[1]), "obs")
s_init = curried_e(s.init)
if is_obs: init = "{{| 0 | {}.idx - 1 |}}".format(s_init)
else: init = '0'
buf.write(" int idx = {};".format(init))
s_i_typ = trans_ty(s.i.ty)
buf.write("""
while (0 <= idx && idx < S && {s_init}.elts[idx] != null) {{
{s_i_typ} {s.i.id} = {s_init}.elts[idx];
""".format(**locals()))
buf.write('\n'.join(map(curried_s, s.b)))
if is_obs: upd = "{| idx (+ | -) 1 |}"
else: upd = "idx + 1"
buf.write("""
idx = {};
}}
""".format(upd))
elif s.kind == C.S.TRY:
# NOTE: no idea how to handle catch blocks
# at this point, just walk through try/finally blocks
buf.write('\n'.join(map(curried_s, s.b + s.fs)))
else: buf.write(s.__str__(curried_e))
return buf.getvalue()
@takes(tuple_of(unicode))
@returns(tuple_of(unicode))
def log_param( (ty, nm) ):
ty = trans_ty(ty)
if util.is_class_name(ty):
if nm == C.J.N:
return (u'', u'')
else:
nm_hash = nm + u"_hash"
retrival = u"""
int {nm_hash} = 0;
if ({nm} != null) {{ {nm_hash} = {nm}.hash; }}
""".format(**locals())
return (retrival, nm_hash)
elif ty in [C.SK.z] + C.primitives:
return (u'', nm)
else:
return (u'', u'')
# Java member method -> C-style function
_mids = set([]) # to maintain which methods are logged
_inits = set([]) # to maintain which <init> are translated
@takes(list_of(sample.Sample), Method)
@returns(str)
def to_func(smpls, mtd):
buf = cStringIO.StringIO()
if C.mod.GN in mtd.mods: buf.write(C.mod.GN + ' ')
elif C.mod.HN in mtd.mods: buf.write(C.mod.HN + ' ')
ret_ty = trans_ty(mtd.typ)
cname = unicode(repr(mtd.clazz))
mname = mtd.name
arg_typs = mtd.param_typs
buf.write(ret_ty + ' ' + trans_mname(cname, mname, arg_typs) + '(')
@takes(tuple_of(unicode))
@returns(unicode)
def trans_param( (ty, nm) ):
return ' '.join([trans_ty(ty), nm])
# for instance methods, add "this" pointer into parameters
if mtd.is_static:
params = mtd.params[:]
else:
self_ty = trans_ty(unicode(repr(mtd.clazz)))
params = [ (self_ty, C.SK.self) ] + mtd.params[:]
# add "logging" flag into parameters
# to check log conformity only if invocations cross the boundary
if not mtd.is_init and not mtd.is_clinit:
params.append( (C.SK.z, u"logging") )
if len(params) > 0:
buf.write(", ".join(map(trans_param, params)))
buf.write(") {\n")
# once function signature is dumped out, remove "logging" flag
if not mtd.is_init and not mtd.is_clinit:
params.pop()
clss = util.flatten_classes([mtd.clazz], "subs")
logged = (not mtd.is_init) and sample.mtd_appears(smpls, clss, mtd.name)
mid = unicode(repr(mtd))
m_ent = mid + "_ent()"
m_ext = mid + "_ext()"
if logged:
global _mids
_mids.add(mid)
if logged: # logging method entry (>)
_log_params = map(log_param, params)
_retrievals, _hashes = util.split([(u'', m_ent)] + _log_params)
ent_retrievals = util.ffilter(_retrievals)
ent_hashes = util.ffilter(_hashes)
buf.write("""{}
int[P] __params = {{ {} }};
if (logging) check_log@log(__params);
""".format(u''.join(ent_retrievals), u", ".join(ent_hashes)))
is_void = C.J.v == mtd.typ
if mtd.body:
if not is_void and not mtd.is_init:
bodies = mtd.body[:-1] # exclude the last 'return' statement
else: bodies = mtd.body
buf.write('\n'.join(map(partial(trans_s, mtd), bodies)))
if logged: # logging method exit (<)
_log_params = []
if mtd.body and not is_void and not mtd.is_init:
ret_v = mtd.body[-1].e
ret_u = unicode(trans_e(mtd, ret_v))
# retrieve the return value to a temporary variable
buf.write(u"""
{} __ret = {};
""".format(ret_ty, ret_u))
# then, try to obtain a hash from that temporary variable
_log_params.append(log_param( (ret_ty, u"__ret") ))
_retrievals, _hashes = util.split([(u'', m_ext)] + _log_params)
ext_retrievals = util.ffilter(_retrievals)
ext_hashes = util.ffilter(_hashes)
buf.write("""{}
__params = {{ {} }};
if (logging) check_log@log(__params);
""".format(u''.join(ext_retrievals), u", ".join(ext_hashes)))
if mtd.body and not is_void and not mtd.is_init:
buf.write(os.linesep)
if logged:
# return the return value stored at the temporary variable
buf.write("return __ret;")
else:
buf.write(trans_s(mtd, mtd.body[-1]))
if mtd.is_init:
evt_srcs = map(util.sanitize_ty, sample.evt_sources(smpls))
cname = unicode(repr(mtd.clazz))
if cname in evt_srcs:
global _inits
_inits.add(cname)
buf.write("\nreturn {};".format(C.SK.self))
buf.write("\n}\n")
return buf.getvalue()
# generate type.sk
@takes(str, list_of(Clazz))
@returns(nothing)
def gen_type_sk(sk_dir, bases):
buf = cStringIO.StringIO()
buf.write("package type;\n")
buf.write(_const)
buf.write(trans_lib())
buf.write('\n')
cols, decls = util.partition(lambda c: util.is_collection(c.name), bases)
decls = filter(lambda c: not util.is_array(c.name), decls)
itfs, clss = util.partition(op.attrgetter("is_itf"), decls)
logging.debug("# interface(s): {}".format(len(itfs)))
logging.debug("# class(es): {}".format(len(clss)))
# convert interfaces first, then usual classes
buf.write('\n'.join(util.ffilter(map(to_struct, itfs))))
buf.write('\n'.join(util.ffilter(map(to_struct, clss))))
# convert collections at last
logging.debug("# collection(s): {}".format(len(cols)))
buf.write('\n'.join(map(col_to_struct, cols)))
# argument number of methods
arg_num = map(lambda mtd: len(mtd.params), methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id) {{
return _{0}[id];
}}
""".format(C.typ.argNum, ", ".join(map(str, arg_num))))
# argument types of methods
def get_args_typ(mtd):
def get_arg_typ(param): return str(class_lookup(param[0]).id)
return '{' + ", ".join(map(get_arg_typ, mtd.params)) + '}'
args_typ = map(get_args_typ, methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id, int idx) {{
return _{0}[id][idx];
}}
""".format(C.typ.argType, ", ".join(args_typ)))
# return type of methods
def get_ret_typ(mtd):
cls = class_lookup(mtd.typ)
if cls: return cls.id
else: return -1
ret_typ = map(get_ret_typ, methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id) {{
return _{0}[id];
}}
""".format(C.typ.retType, ", ".join(map(str, ret_typ))))
# belonging class of methods
belongs = map(lambda mtd: mtd.clazz.id, methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id) {{
return _{0}[id];
}}
""".format(C.typ.belongsTo, ", ".join(map(str, belongs))))
subcls = \
map(lambda cls_i: '{' + ", ".join( \
map(lambda cls_j: str(cls_i <= cls_j).lower(), classes()) \
) + '}', classes())
buf.write("""
#define _{0} {{ {1} }}
bit {0}(int i, int j) {{
return _{0}[i][j];
}}
""".format(C.typ.subcls, ", ".join(subcls)))
## sub type relations
#subcls = []
#for cls_i in classes():
# row = []
# for cls_j in classes():
# row.append(int(cls_i <= cls_j))
# subcls.append(row)
## sub type relations in yale format
#_, IA, JA = util.yale_format(subcls)
#li, lj = len(IA), len(JA)
#si = ", ".join(map(str, IA))
#sj = ", ".join(map(str, JA))
#buf.write("""
# #define _iA {{ {si} }}
# #define _jA {{ {sj} }}
# int iA(int i) {{
# return _iA[i];
# }}
# int jA(int j) {{
# return _jA[j];
# }}
# bit subcls(int i, int j) {{
# int col_i = iA(i);
# int col_j = iA(i+1);
# for (int col = col_i; col < col_j; col++) {{
# if (j == jA(col)) return true;
# }}
# return false;
# }}
#""".format(**locals()))
with open(os.path.join(sk_dir, "type.sk"), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
# generate cls.sk
@takes(str, list_of(sample.Sample), Clazz)
@returns(optional(unicode))
def gen_cls_sk(sk_dir, smpls, cls):
mtds = collect_decls(cls, "mtds")
flds = collect_decls(cls, "flds")
s_flds = filter(op.attrgetter("is_static"), flds)
if cls.is_class:
if not mtds and not s_flds: return None
else: # cls.is_itf or cls.is_enum
if not s_flds: return None
cname = util.sanitize_ty(cls.name)
buf = cStringIO.StringIO()
buf.write("package {};\n".format(cname))
buf.write(_const)
# static fields
buf.write('\n'.join(map(trans_fld, s_flds)))
if len(s_flds) > 0: buf.write('\n')
# migrating static fields' initialization to <clinit>
for fld in ifilter(op.attrgetter("init"), s_flds):
if not fld.init.has_call and not fld.init.has_str and not fld.is_aliasing: continue
# retrieve (or declare) <clinit>
clinit = fld.clazz.get_or_declare_clinit()
if clinit not in mtds: mtds.append(clinit)
# add assignment
assign = st.gen_S_assign(exp.gen_E_id(fld.name), fld.init)
clinit.body.append(assign)
# accessors for static fields
for fld in ifilterfalse(op.attrgetter("is_private"), s_flds):
fname = fld.name
accessor = trans_fname(fld.clazz.name, fname, True)
buf.write("""
{0} {1}() {{ return {2}; }}
""".format(trans_ty(fld.typ), accessor, fname))
# methods
clinits, mtds = util.partition(lambda m: m.is_clinit, mtds)
inits, mtds = util.partition(lambda m: m.is_init, mtds)
# <init>/<clinit> should be dumped out in any case
buf.write('\n'.join(map(partial(to_func, smpls), clinits)))
buf.write('\n'.join(map(partial(to_func, smpls), inits)))
for mtd in mtds:
# interface won't have method bodies
if mtd.clazz.is_itf: continue
buf.write(to_func(smpls, mtd) + os.linesep)
cls_sk = cname + ".sk"
with open(os.path.join(sk_dir, cls_sk), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
return cls_sk
# max # of objects in samples
max_objs = 0
# generate sample_x.sk
@takes(str, sample.Sample, Template, Method)
@returns(nothing)
def gen_smpl_sk(sk_path, smpl, tmpl, main):
buf = cStringIO.StringIO()
buf.write("package {};\n".format(smpl.name))
buf.write(_const)
buf.write("harness void {} () {{\n".format(smpl.name))
# insert call-return sequences
buf.write("""
clear_log@log();
int[P] log = { 0 };
""")
global _mids
obj_cnt = 0
objs = { C.J.N: 0, C.J.FALSE: 0, C.J.TRUE: 1, } # { @Obj...aaa : 2, ... }
for i in xrange(10):
objs[str(i)] = i
obj_cnt = obj_cnt + 1
call_stack = []
for io in smpl.IOs:
# ignore <init>
if io.is_init: continue
elif isinstance(io, sample.CallExt):
# ignore method exits whose counterparts are missed
if not call_stack: continue
mid = call_stack.pop()
# ignore methods that are not declared in the template
if not mid: continue
else: # sample.CallEnt
mid = None
# TODO: retrieve arg types
mtd = None # find_mtd_by_sig(io.cls, io.mtd, ...)
if mtd: # found the method that matches the argument types
mid = repr(mtd)
if mid not in _mids: continue
else: # try other possible methods
mtds = find_mtds_by_name(io.cls, io.mtd)
argn = len(io.vals)
min_gap = argn
for mtd in mtds:
_gap = abs((argn - (0 if mtd.is_static else 1)) - len(mtd.params))
if _gap <= min_gap: # eq is needed for zero parameter
min_gap = _gap
mid = repr(mtd)
if mid not in _mids: mid = None
call_stack.append(mid)
# ignore methods that are not declared in the template
if not mid: continue
if isinstance(io, sample.CallEnt):
mid = mid + "_ent()"
else: # sample.CallExt
mid = mid + "_ext()"
vals = []
for val in io.vals:
kind = sample.kind(val)
if type(kind) is type: val = str(val)
# every occurrence of constant string will be uniquely allocated,
# hence different hash => assign unique obj_cnt
# also, primitive value doesn't have hash,
# so we can't compare via obj array; just assign unique obj_cnt
## 1) primitive, including string
# 2) this object never occurs
#if type(kind) is type or val not in objs:
if val not in objs:
obj_cnt = obj_cnt + 1
objs[val] = obj_cnt
vals.append(str(objs[val]))
buf.write("""
log = (int[P]){{ {} }};
write_log@log(log);
""".format(", ".join([mid] + vals)))
buf.write("""
int len_log = get_log_cnt@log();
reset_log_cnt@log();
""")
global max_objs
max_objs = max(max_objs, obj_cnt)
# invoke class initializers
for cls in util.flatten_classes(tmpl.classes, "inners"):
clinit = cls.mtd_by_sig(C.J.CLINIT)
if not clinit: continue
# to only call the base class's <clinit>
if clinit.clazz != cls: continue
buf.write(" {}();\n".format(trans_mname(unicode(repr(cls)), clinit.name)))
# execute template's *main*
cname = unicode(repr(main.clazz))
mname = main.name
arg_typs = main.param_typs
params = main.params + [ (C.J.z, u"logging") ]
args = ", ".join(sig_match(params, []))
buf.write("\n {}({});\n".format(trans_mname(cname, mname, arg_typs), args))
buf.write("assert len_log == get_log_cnt@log();")
buf.write("\n}\n")
with open(sk_path, 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
# generate log.sk
@takes(str, Template)
@returns(nothing)
def gen_log_sk(sk_dir, tmpl):
buf = cStringIO.StringIO()
buf.write("package log;\n")
buf.write(_const)
global max_objs
buf.write("int O = {}; // # of objects\n".format(max_objs + 1))
buf.write("""
int log_cnt = 0;
int[P][N] ev;
int[O] obj;
// to enforce the length of logs
int get_log_cnt() {
return log_cnt;
}
// after writing logs, reset the cursor in order to check logs in order
void reset_log_cnt() {
log_cnt = 0;
}
// to clean up the logs totally
void clear_log() {
reset_log_cnt();
ev = {};
obj = {};
}
// to write the log from samples
void write_log (int[P] params) {
ev[log_cnt++] = params;
}
// to check whether control-flow conforms to the samples
@Native("{ std::cout << \\\"log::check_log::\\\" << params[0] << std::endl; }")
void check_log (int[P] params) {
assert params[0] == ev[log_cnt][0]; // check mid
for (int i = 1; i < P; i++) {
if (ev[log_cnt][i] != 0) {
if (obj[ev[log_cnt][i]] == 0) { // not set yet
obj[ev[log_cnt][i]] = params[i];
}
else { // o.w. check obj eq.
assert obj[ev[log_cnt][i]] == params[i];
}
}
}
log_cnt++; // advance
}
// distinct hash values for runtime objects
int obj_cnt = 0;
int nonce () {
return obj_cnt++;
}
""")
global _inits
reg_codes = []
for ty in _inits:
cls = class_lookup(ty)
if not cls: continue
buf.write("""
int obj_{0}_cnt = 0;
{1}[O] obj_{0};
// to register runtime instances of {0}
void register_{0} ({1} {2}) {{
if (obj_{0}_cnt < O) {{
obj_{0}[obj_{0}_cnt++] = {2};
}}
}}
// to access to a certain instance of {0}
{1} retrieve_{0} (int idx) {{
if (0 <= idx && idx < obj_{0}_cnt) {{
return obj_{0}[idx];
}}
else {{
return null;
}}
}}
""".format(ty, trans_ty(ty), ty.lower()))
reg_code = "if (ty == {0}) register_{1}@log({2});".format(cls.id, repr(cls), C.SK.self)
reg_codes.append(reg_code)
# factory of Object
buf.write("""
// factory of Object
Object alloc(int ty) {{
Object {0} = new Object(hash=nonce(), __cid=ty);
{1}
return {0};
}}
""".format(C.SK.self, "\nelse ".join(reg_codes)))
global _ty;
_clss = []
for ty in _ty.keys():
if util.is_collection(ty): continue
if util.is_array(ty): continue
cls = class_lookup(ty)
if not cls: continue # to avoid None definition
# inner class may appear twice: w/ and w/o outer class name
if cls not in _clss: _clss.append(cls)
buf.write("\n// distinct class IDs\n")
for cls in _clss:
buf.write("int {cls!r} () {{ return {cls.id}; }}\n".format(**locals()))
buf.write("\n// distinct method IDs\n")
for cls in tmpl.classes:
mtds = collect_decls(cls, "mtds")
if not mtds: continue
for mtd in mtds:
mname = sanitize_mname(unicode(repr(mtd)))
buf.write("""
int {mname}_ent () {{ return {mtd.id}; }}
int {mname}_ext () {{ return -{mtd.id}; }}
""".format(**locals()))
with open(os.path.join(sk_dir, "log.sk"), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
# reset global variables
@takes(nothing)
@returns(nothing)
def reset():
global _ty, _mtds, _flds, _s_flds
global _collections, _mids, _inits
global max_objs
_ty = {}
_mtds = {}
_flds = {}
_s_flds = {}
_collections = set([])
_mids = set([])
_inits = set([])
max_objs = 0
# translate the high-level templates into low-level sketches
# using information at the samples
@takes(str, list_of(sample.Sample), Template, str)
@returns(nothing)
def to_sk(cmd, smpls, tmpl, sk_dir):
# clean up result directory
if os.path.isdir(sk_dir): util.clean_dir(sk_dir)
else: os.makedirs(sk_dir)
# reset global variables so that we can run this encoding phase per demo
reset()
# update global constants
def logged(mtd):
if mtd.is_init: return False
clss = util.flatten_classes([mtd.clazz], "subs")
return sample.mtd_appears(smpls, clss, mtd.name)
mtds = filter(logged, methods())
if mtds:
n_params = 2 + max(map(len, map(op.attrgetter("params"), mtds)))
else: # no meaningful logs in the sample?
n_params = 2
n_evts = sample.max_evts(smpls)
if cmd == "android":
n_views = sample.max_views(smpls)
magic_S = max(3, n_evts + 1, n_views)
else:
magic_S = max(5, n_evts + 1) # at least 5, just in case
n_ios = sample.max_IOs(smpls)
global _const
_const = u"""
int P = {}; // length of parameters (0: (>|<)mid, 1: receiver, 2...)
int S = {}; // length of arrays for Java collections
int N = {}; // length of logs
""".format(n_params, magic_S, n_ios)
# type.sk
logging.info("building class hierarchy")
tmpl.consist()
# merge all classes and interfaces, except for primitive types
clss, _ = util.partition(lambda c: util.is_class_name(c.name), classes())
bases = rm_subs(clss)
gen_type_sk(sk_dir, bases)
# cls.sk
cls_sks = []
for cls in tmpl.classes:
# skip the collections, which will be encoded at type.sk
if repr(cls).split('_')[0] in C.collections: continue
cls_sk = gen_cls_sk(sk_dir, smpls, cls)
if cls_sk: cls_sks.append(cls_sk)
# sample_x.sk
smpl_sks = []
for smpl in smpls:
smpl_sk = "sample_" + smpl.name + ".sk"
smpl_sks.append(smpl_sk)
sk_path = os.path.join(sk_dir, smpl_sk)
gen_smpl_sk(sk_path, smpl, tmpl, tmpl.harness(smpl.name))
# log.sk
gen_log_sk(sk_dir, tmpl)
# sample.sk that imports all the other sketch files
buf = cStringIO.StringIO()
# deprecated as we use regex generator for class/method roles
## --bnd-cbits: the number of bits for integer holes
#bits = max(5, int(math.ceil(math.log(len(methods()), 2))))
#buf.write("pragma options \"--bnd-cbits {}\";\n".format(bits))
# --bnd-unroll-amnt: the unroll amount for loops
unroll_amnt = max(n_params, magic_S)
buf.write("pragma options \"--bnd-unroll-amnt {}\";\n".format(unroll_amnt))
# --bnd-inline-amnt: bounds inlining to n levels of recursion
inline_amnt = None # use a default value if not set
if cmd == "android":
#inline_amnt = 2 # depth of View hierarchy (at findViewByTraversal)
inline_amnt = 1 # no recursion for flat Views
elif cmd == "gui":
# setting it 1 means there is no recursion
inline_amnt = 1
if inline_amnt:
buf.write("pragma options \"--bnd-inline-amnt {}\";\n".format(inline_amnt))
buf.write("pragma options \"--bnd-bound-mode CALLSITE\";\n")
sks = ["log.sk", "type.sk"] + cls_sks + smpl_sks
for sk in sks:
buf.write("include \"{}\";\n".format(sk))
with open(os.path.join(sk_dir, "sample.sk"), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
|
plum-umd/pasket
|
pasket/encoder.py
|
Python
|
mit
| 48,157
|
[
"VisIt"
] |
4a0c7b5c7407df98fdcd70c61857e78ff917e90094e401e2c385651e4dc5955d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import os
import re
import json
import warnings
from io import open
from enum import Enum
from pymatgen.core.units import Mass, Length, unitized, FloatWithUnit, Unit, \
SUPPORTED_UNIT_NAMES
from pymatgen.util.string import formula_double_format
from monty.json import MSONable
"""
Module contains classes presenting Element and Specie (Element + oxidation
state) and PeriodicTable.
"""
__author__ = "Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
# Loads element data from json file
with open(os.path.join(os.path.dirname(__file__),
"periodic_table.json"), "rt") as f:
_pt_data = json.load(f)
_pt_row_sizes = (2, 8, 8, 18, 18, 32, 32)
class Element(Enum):
"""
Basic immutable element object with all relevant properties.
Only one instance of Element for each symbol is stored after creation,
ensuring that a particular element behaves like a singleton. For all
attributes, missing data (i.e., data for which is not available) is
represented by a None unless otherwise stated.
Args:
symbol (str): Element symbol, e.g., "H", "Fe"
.. attribute:: Z
Atomic number
.. attribute:: symbol
Element symbol
.. attribute:: X
Pauling electronegativity. Elements without an electronegativity
number are assigned a value of zero by default.
.. attribute:: number
Alternative attribute for atomic number
.. attribute:: max_oxidation_state
Maximum oxidation state for element
.. attribute:: min_oxidation_state
Minimum oxidation state for element
.. attribute:: oxidation_states
Tuple of all known oxidation states
.. attribute:: common_oxidation_states
Tuple of all common oxidation states
.. attribute:: full_electronic_structure
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
.. attribute:: row
Returns the periodic table row of the element.
.. attribute:: group
Returns the periodic table group of the element.
.. attribute:: block
Return the block character "s,p,d,f"
.. attribute:: is_noble_gas
True if element is noble gas.
.. attribute:: is_transition_metal
True if element is a transition metal.
.. attribute:: is_rare_earth_metal
True if element is a rare earth metal.
.. attribute:: is_metalloid
True if element is a metalloid.
.. attribute:: is_alkali
True if element is an alkali metal.
.. attribute:: is_alkaline
True if element is an alkaline earth metal (group II).
.. attribute:: is_halogen
True if element is a halogen.
.. attribute:: is_lanthanoid
True if element is a lanthanoid.
.. attribute:: is_actinoid
True if element is a actinoid.
.. attribute:: name
Long name for element. E.g., "Hydrogen".
.. attribute:: atomic_mass
Atomic mass for the element.
.. attribute:: atomic_radius
Atomic radius for the element. This is the empirical value. Data is
obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: atomic_radius_calculated
Calculated atomic radius for the element. This is the empirical value.
Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: van_der_waals_radius
Van der Waals radius for the element. This is the empirical
value. Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: mendeleev_no
Mendeleev number
.. attribute:: electrical_resistivity
Electrical resistivity
.. attribute:: velocity_of_sound
Velocity of sound
.. attribute:: reflectivity
Reflectivity
.. attribute:: refractive_index
Refractice index
.. attribute:: poissons_ratio
Poisson's ratio
.. attribute:: molar_volume
Molar volume
.. attribute:: electronic_structure
Electronic structure. Simplified form with HTML formatting.
E.g., The electronic structure for Fe is represented as
[Ar].3d<sup>6</sup>.4s<sup>2</sup>
.. attribute:: thermal_conductivity
Thermal conductivity
.. attribute:: boiling_point
Boiling point
.. attribute:: melting_point
Melting point
.. attribute:: critical_temperature
Critical temperature
.. attribute:: superconduction_temperature
Superconduction temperature
.. attribute:: liquid_range
Liquid range
.. attribute:: bulk_modulus
Bulk modulus
.. attribute:: youngs_modulus
Young's modulus
.. attribute:: brinell_hardness
Brinell hardness
.. attribute:: rigidity_modulus
Rigidity modulus
.. attribute:: mineral_hardness
Mineral hardness
.. attribute:: vickers_hardness
Vicker's hardness
.. attribute:: density_of_solid
Density of solid phase
.. attribute:: coefficient_of_linear_thermal_expansion
Coefficient of linear thermal expansion
.. attribute:: average_ionic_radius
Average ionic radius for element in ang. The average is taken over all
oxidation states of the element for which data is present.
.. attribute:: ionic_radii
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
# This name = value convention is redundant and dumb, but unfortunately is
# necessary to preserve backwards compatibility with a time when Element is
# a regular object that is constructed with Element(symbol).
H = "H"
He = "He"
Li = "Li"
Be = "Be"
B = "B"
C = "C"
N = "N"
O = "O"
F = "F"
Ne = "Ne"
Na = "Na"
Mg = "Mg"
Al = "Al"
Si = "Si"
P = "P"
S = "S"
Cl = "Cl"
Ar = "Ar"
K = "K"
Ca = "Ca"
Sc = "Sc"
Ti = "Ti"
V = "V"
Cr = "Cr"
Mn = "Mn"
Fe = "Fe"
Co = "Co"
Ni = "Ni"
Cu = "Cu"
Zn = "Zn"
Ga = "Ga"
Ge = "Ge"
As = "As"
Se = "Se"
Br = "Br"
Kr = "Kr"
Rb = "Rb"
Sr = "Sr"
Y = "Y"
Zr = "Zr"
Nb = "Nb"
Mo = "Mo"
Tc = "Tc"
Ru = "Ru"
Rh = "Rh"
Pd = "Pd"
Ag = "Ag"
Cd = "Cd"
In = "In"
Sn = "Sn"
Sb = "Sb"
Te = "Te"
I = "I"
Xe = "Xe"
Cs = "Cs"
Ba = "Ba"
La = "La"
Ce = "Ce"
Pr = "Pr"
Nd = "Nd"
Pm = "Pm"
Sm = "Sm"
Eu = "Eu"
Gd = "Gd"
Tb = "Tb"
Dy = "Dy"
Ho = "Ho"
Er = "Er"
Tm = "Tm"
Yb = "Yb"
Lu = "Lu"
Hf = "Hf"
Ta = "Ta"
W = "W"
Re = "Re"
Os = "Os"
Ir = "Ir"
Pt = "Pt"
Au = "Au"
Hg = "Hg"
Tl = "Tl"
Pb = "Pb"
Bi = "Bi"
Po = "Po"
At = "At"
Rn = "Rn"
Fr = "Fr"
Ra = "Ra"
Ac = "Ac"
Th = "Th"
Pa = "Pa"
U = "U"
Np = "Np"
Pu = "Pu"
Am = "Am"
Cm = "Cm"
Bk = "Bk"
Cf = "Cf"
Es = "Es"
Fm = "Fm"
Md = "Md"
No = "No"
Lr = "Lr"
def __init__(self, symbol):
self.symbol = "%s" % symbol
d = _pt_data[symbol]
# Store key variables for quick access
self.Z = d["Atomic no"]
at_r = d.get("Atomic radius", "no data")
if str(at_r).startswith("no data"):
self.atomic_radius = None
else:
self.atomic_radius = Length(at_r, "ang")
self.atomic_mass = Mass(d["Atomic mass"], "amu")
self._data = d
@property
def X(self):
if "X" in self._data:
return self._data["X"]
else:
warnings.warn("No electronegativity for %s. Setting to infinity. "
"This has no physical meaning, and is mainly done to "
"avoid errors caused by the code expecting a float."
% self.symbol)
return float("inf")
def __getattr__(self, item):
if item in ["mendeleev_no", "electrical_resistivity",
"velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"electronic_structure", "thermal_conductivity",
"boiling_point", "melting_point",
"critical_temperature", "superconduction_temperature",
"liquid_range", "bulk_modulus", "youngs_modulus",
"brinell_hardness", "rigidity_modulus",
"mineral_hardness", "vickers_hardness",
"density_of_solid", "atomic_radius_calculated",
"van_der_waals_radius",
"coefficient_of_linear_thermal_expansion"]:
kstr = item.capitalize().replace("_", " ")
val = self._data.get(kstr, None)
if str(val).startswith("no data"):
val = None
else:
try:
val = float(val)
except ValueError:
nobracket = re.sub(r'\(.*\)', "", val)
toks = nobracket.replace("about", "").strip().split(" ", 1)
if len(toks) == 2:
try:
if "10<sup>" in toks[1]:
base_power = re.findall(r'([+-]?\d+)', toks[1])
factor = "e" + base_power[1]
if toks[0] in [">", "high"]:
toks[0] = "1" # return the border value
toks[0] += factor
if item == "electrical_resistivity":
unit = "ohm m"
elif (
item ==
"coefficient_of_linear_thermal_expansion"
):
unit = "K^-1"
else:
unit = toks[1]
val = FloatWithUnit(toks[0], unit)
else:
unit = toks[1].replace("<sup>", "^").replace(
"</sup>", "").replace("Ω",
"ohm")
units = Unit(unit)
if set(units.keys()).issubset(
SUPPORTED_UNIT_NAMES):
val = FloatWithUnit(toks[0], unit)
except ValueError as ex:
# Ignore error. val will just remain a string.
pass
return val
raise AttributeError
@property
def data(self):
"""
Returns dict of data for element.
"""
return self._data.copy()
@property
@unitized("ang")
def average_ionic_radius(self):
"""
Average ionic radius for element (with units). The average is taken
over all oxidation states of the element for which data is present.
"""
if "Ionic radii" in self._data:
radii = self._data["Ionic radii"]
return sum(radii.values()) / len(radii)
else:
return 0
@property
@unitized("ang")
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): v for k, v in self._data["Ionic radii"].items()}
else:
return {}
@property
def number(self):
"""Alternative attribute for atomic number"""
return self.Z
@property
def max_oxidation_state(self):
"""Maximum oxidation state for element"""
if "Oxidation states" in self._data:
return max(self._data["Oxidation states"])
return 0
@property
def min_oxidation_state(self):
"""Minimum oxidation state for element"""
if "Oxidation states" in self._data:
return min(self._data["Oxidation states"])
return 0
@property
def oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Oxidation states", list()))
@property
def common_oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Common oxidation states", list()))
@property
def icsd_oxidation_states(self):
"""Tuple of all oxidation states with at least 10 instances in
ICSD database AND at least 1% of entries for that element"""
return tuple(self._data.get("ICSD oxidation states", list()))
@property
def full_electronic_structure(self):
"""
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
"""
estr = self._data["Electronic structure"]
def parse_orbital(orbstr):
m = re.match(r"(\d+)([spdfg]+)<sup>(\d+)</sup>", orbstr)
if m:
return int(m.group(1)), m.group(2), int(m.group(3))
return orbstr
data = [parse_orbital(s) for s in estr.split(".")]
if data[0][0] == "[":
sym = data[0].replace("[", "").replace("]", "")
data = Element(sym).full_electronic_structure + data[1:]
return data
def __eq__(self, other):
return isinstance(other, Element) and self.Z == other.Z
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.Z
def __repr__(self):
return "Element " + self.symbol
def __str__(self):
return self.symbol
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted into LiFePO4.
"""
if self.X != other.X:
return self.X < other.X
else:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
@staticmethod
def from_Z(z):
"""
Get an element from an atomic number.
Args:
z (int): Atomic number
Returns:
Element with atomic number z.
"""
for sym, data in _pt_data.items():
if data["Atomic no"] == z:
return Element(sym)
raise ValueError("No element with this atomic number %s" % z)
@staticmethod
def from_row_and_group(row, group):
"""
Returns an element from a row and group number.
Args:
row (int): Row number
group (int): Group number
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
"""
for sym in _pt_data.keys():
el = Element(sym)
if el.row == row and el.group == group:
return el
raise ValueError("No element with this row and group!")
@staticmethod
def is_valid_symbol(symbol):
"""
Returns true if symbol is a valid element symbol.
Args:
symbol (str): Element symbol
Returns:
True if symbol is a valid element (e.g., "H"). False otherwise
(e.g., "Zebra").
"""
try:
Element(symbol)
return True
except:
return False
@property
def row(self):
"""
Returns the periodic table row of the element.
"""
z = self.Z
total = 0
if 57 <= z <= 71:
return 8
elif 89 <= z <= 103:
return 9
for i in range(len(_pt_row_sizes)):
total += _pt_row_sizes[i]
if total >= z:
return i + 1
return 8
@property
def group(self):
"""
Returns the periodic table group of the element.
"""
z = self.Z
if z == 1:
return 1
if z == 2:
return 18
if 3 <= z <= 18:
if (z - 2) % 8 == 0:
return 18
elif (z - 2) % 8 <= 2:
return (z - 2) % 8
else:
return 10 + (z - 2) % 8
if 19 <= z <= 54:
if (z - 18) % 18 == 0:
return 18
else:
return (z - 18) % 18
if (z - 54) % 32 == 0:
return 18
elif (z - 54) % 32 >= 18:
return (z - 54) % 32 - 14
else:
return (z - 54) % 32
@property
def block(self):
"""
Return the block character "s,p,d,f"
"""
block = ""
if (self.is_actinoid or self.is_lanthanoid) and \
self.Z not in [71, 103]:
block = "f"
elif self.is_actinoid or self.is_lanthanoid:
block = "d"
elif self.group in [1, 2]:
block = "s"
elif self.group in range(13, 19):
block = "p"
elif self.group in range(3, 13):
block = "d"
else:
raise ValueError("unable to determine block")
return block
@property
def is_noble_gas(self):
"""
True if element is noble gas.
"""
return self.Z in (2, 10, 18, 36, 54, 86, 118)
@property
def is_transition_metal(self):
"""
True if element is a transition metal.
"""
ns = list(range(21, 31))
ns.extend(list(range(39, 49)))
ns.append(57)
ns.extend(list(range(72, 81)))
ns.append(89)
ns.extend(list(range(104, 113)))
return self.Z in ns
@property
def is_rare_earth_metal(self):
"""
True if element is a rare earth metal.
"""
return self.is_lanthanoid or self.is_actinoid
@property
def is_metalloid(self):
"""
True if element is a metalloid.
"""
return self.symbol in ("B", "Si", "Ge", "As", "Sb", "Te", "Po")
@property
def is_alkali(self):
"""
True if element is an alkali metal.
"""
return self.Z in (3, 11, 19, 37, 55, 87)
@property
def is_alkaline(self):
"""
True if element is an alkaline earth metal (group II).
"""
return self.Z in (4, 12, 20, 38, 56, 88)
@property
def is_halogen(self):
"""
True if element is a halogen.
"""
return self.Z in (9, 17, 35, 53, 85)
@property
def is_chalcogen(self):
"""
True if element is a chalcogen.
"""
return self.Z in (8, 16, 34, 52, 84)
@property
def is_lanthanoid(self):
"""
True if element is a lanthanoid.
"""
return 56 < self.Z < 72
@property
def is_actinoid(self):
"""
True if element is a actinoid.
"""
return 88 < self.Z < 104
def __deepcopy__(self, memo):
return Element(self.symbol)
@staticmethod
def from_dict(d):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return Element(d["element"])
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol}
@staticmethod
def print_periodic_table(filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
"""
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
try:
el = Element.from_row_and_group(row, group)
except ValueError:
el = None
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
class Specie(MSONable):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
Args:
symbol (str): Element symbol, e.g., Fe
oxidation_state (float): Oxidation state of element, e.g., 2 or -2
properties: Properties associated with the Specie, e.g.,
{"spin": 5}. Defaults to None. Properties must be one of the
Specie supported_properties.
.. attribute:: oxi_state
Oxidation state associated with Specie
.. attribute:: ionic_radius
Ionic radius of Specie (with specific oxidation state).
.. versionchanged:: 2.6.7
Properties are now checked when comparing two Species for equality.
"""
cache = {}
def __new__(cls, *args, **kwargs):
key = (cls,) + args + tuple(kwargs.items())
try:
inst = Specie.cache.get(key, None)
except TypeError:
# Can't cache this set of arguments
inst = key = None
if inst is None:
inst = object.__new__(cls)
if key is not None:
Specie.cache[key] = inst
return inst
supported_properties = ("spin",)
def __init__(self, symbol, oxidation_state=None, properties=None):
self._el = Element(symbol)
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doesn't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
return isinstance(other, Specie) and self.symbol == other.symbol \
and self.oxi_state == other.oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Equal Specie should have the same str representation, hence
should hash equally. Unequal Specie will have differnt str
representations.
"""
return self.__str__().__hash__()
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
else:
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def element(self):
"""
Underlying element object
"""
return self._el
@property
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
elif oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None
@property
def oxi_state(self):
"""
Oxidation state of Specie.
"""
return self._oxi_state
@staticmethod
def from_string(species_string):
"""
Returns a Specie from a string representation.
Args:
species_string (str): A typical string representation of a
species, e.g., "Mn2+", "Fe3+", "O2-".
Returns:
A Specie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9\.]*)([\+\-])(.*)", species_string)
if m:
sym = m.group(1)
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).replace(",","").split("=")
properties = {toks[0]: float(toks[1])}
return Specie(sym, oxi, properties)
else:
raise ValueError("Invalid Species String")
def __repr__(self):
return "Specie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_crystal_field_spin(self, coordination="oct", spin_config="high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or \
spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(
self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}"
.format(self.oxi_state, self.symbol))
if spin_config == "high":
return nelectrons if nelectrons <= 5 else 10 - nelectrons
elif spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
elif nelectrons <= 6:
return 6 - nelectrons
elif nelectrons <= 8:
return nelectrons - 6
else:
return 10 - nelectrons
elif coordination == "tet":
if nelectrons <= 2:
return nelectrons
elif nelectrons <= 4:
return 4 - nelectrons
elif nelectrons <= 7:
return nelectrons - 4
else:
return 10 - nelectrons
def __deepcopy__(self, memo):
return Specie(self.symbol, self.oxi_state, self._properties)
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
class DummySpecie(Specie):
"""
A special specie for representing non-traditional elements or species. For
example, representation of vacancies (charged or otherwise), or special
sites, etc.
Args:
symbol (str): An assigned symbol for the dummy specie. Strict
rules are applied to the choice of the symbol. The dummy
symbol cannot have any part of first two letters that will
constitute an Element symbol. Otherwise, a composition may
be parsed wrongly. E.g., "X" is fine, but "Vac" is not
because Vac contains V, a valid Element.
oxidation_state (float): Oxidation state for dummy specie.
Defaults to zero.
.. attribute:: symbol
Symbol for the DummySpecie.
.. attribute:: oxi_state
Oxidation state associated with Specie.
.. attribute:: Z
DummySpecie is always assigned an atomic number of 0.
.. attribute:: X
DummySpecie is always assigned an electronegativity of 0.
"""
def __init__(self, symbol="X", oxidation_state=0, properties=None):
for i in range(1, min(2, len(symbol)) + 1):
if Element.is_valid_symbol(symbol[:i]):
raise ValueError("{} contains {}, which is a valid element "
"symbol.".format(symbol, symbol[:i]))
# Set required attributes for DummySpecie to function like a Specie in
# most instances.
self._symbol = symbol
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __hash__(self):
return self.symbol.__hash__()
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, DummySpecie):
return False
return self.symbol == other.symbol \
and self._oxi_state == other._oxi_state
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
else:
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def Z(self):
"""
DummySpecie is always assigned an atomic number equal to the hash of
the symbol. The expectation is that someone would be an actual dummy
to use atomic numbers for a Dummy specie.
"""
return self.symbol.__hash__()
@property
def oxi_state(self):
"""
Oxidation state associated with DummySpecie
"""
return self._oxi_state
@property
def X(self):
"""
DummySpecie is always assigned an electronegativity of 0. The effect of
this is that DummySpecie are always sorted in front of actual Specie.
"""
return 0
@property
def symbol(self):
return self._symbol
def __deepcopy__(self, memo):
return DummySpecie(self.symbol, self._oxi_state)
@staticmethod
def from_string(species_string):
"""
Returns a Dummy from a string representation.
Args:
species_string (str): A string representation of a dummy
species, e.g., "X2+", "X3+".
Returns:
A DummySpecie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-]*)(.*)", species_string)
if m:
sym = m.group(1)
if m.group(2) == "" and m.group(3) == "":
oxi = 0
else:
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return DummySpecie(sym, oxi, properties)
raise ValueError("Invalid DummySpecies String")
@classmethod
def safe_from_composition(cls, comp, oxidation_state=0):
"""
Returns a DummySpecie object that can be safely used
with (i.e. not present in) a given composition
"""
# We don't want to add a DummySpecie with the same
# symbol as anything in the composition, even if the
# oxidation state is different
els = comp.element_composition.elements
for c in 'abcdfghijklmnopqrstuvwxyz':
if DummySpecie('X' + c) not in els:
return DummySpecie('X' + c, oxidation_state)
raise ValueError("All attempted DummySpecies already "
"present in {}".format(comp))
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
def __repr__(self):
return "DummySpecie " + self.__str__()
def __str__(self):
output = self.symbol
if self._oxi_state >= 0:
output += formula_double_format(self._oxi_state) + "+"
else:
output += formula_double_format(-self._oxi_state) + "-"
return output
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj))
|
matk86/pymatgen
|
pymatgen/core/periodic_table.py
|
Python
|
mit
| 38,356
|
[
"CRYSTAL",
"pymatgen"
] |
5ce0a447ee4d8c8e6542cfd7e15dd8d0dc724810fa25d596f4785017b65310c1
|
#!/usr/bin/env python
'''
Script to convert .xsd file to VASP input files.
'''
import argparse
import os
import re
import sys
import logging
import numpy as np
from vaspy.matstudio import XsdFile
from vaspy.incar import InCar
from vaspy import PY2
if PY2:
import commands as subprocess
else:
import subprocess
_logger = logging.getLogger("vaspy.script")
if "__main__" == __name__:
# Copy INCAR vasp.script
subprocess.getstatusoutput('cp $HOME/example/INCAR $HOME/example/vasp.script ./')
# Set argument parser.
parser = argparse.ArgumentParser()
# Add optional argument.
parser.add_argument("-k", "--kpoints", help="set k-points")
parser.add_argument("--nnode", help="node number used for the job")
parser.add_argument("--ncpu", help="cpu number on each node")
parser.add_argument("-q", "--queue", help="pbs queue type")
# Add all possible arguments in INCAR file.
if os.path.exists("INCAR"):
incar = InCar()
parameters = incar.pnames
for parameter in parameters:
help_info = "Set '{}' in INCAR".format(parameter)
parser.add_argument("--{}".format(parameter), help=help_info)
args = parser.parse_args()
# Create POSCAR
status, output = subprocess.getstatusoutput('ls *.xsd | head -1')
xsd = XsdFile(filename=output)
poscar_content = xsd.get_poscar_content(bases_const=1.0)
with open('POSCAR', 'w') as f:
f.write(poscar_content)
# Create POTCAR
potdir = r'/data/pot/vasp/potpaw_PBE2010/'
# delete old POTCAR
if os.path.exists('./POTCAR'):
os.remove('./POTCAR')
for elem in xsd.atom_types:
# if os.path.exists(potdir + elem + '_new/'):
# potcar = potdir + elem + '_new/POTCAR'
if os.path.exists(potdir + elem):
potcar = potdir + elem + '/POTCAR'
else:
_logger.info('No POTCAR for ' + elem)
sys.exit(1)
subprocess.getstatusoutput('cat ' + potcar + ' >> ./POTCAR')
# Creat KPOINTS
if not args.kpoints:
kpoints = []
for base in xsd.bases:
l = np.dot(base, base)**0.5
kpt = int(20/l) + 1
kpoints.append(str(kpt))
else:
kpoints = [i.strip() for i in args.kpoints.split(",")]
_logger.info("Set k-point -> {} {} {}".format(*kpoints))
kpt_str = ' '.join(kpoints)
kpt_content = 'mesh auto\n0\nG\n' + kpt_str + '\n0 0 0\n'
with open('KPOINTS', 'w') as f:
f.write(kpt_content)
# Get content line list.
jobname = ".".join(output.split('.')[: -1])
with open('vasp.script', 'r') as f:
content_list = f.readlines()
# Change job name.
content_list[1] = '#PBS -N ' + jobname + '\n'
_logger.info("job name -> {}".format(jobname))
# Change node number and cpu number.
if args.nnode or args.ncpu:
regex = re.compile(r'nodes=(\d):ppn=(\d)')
match = regex.search(content_list[5])
if not match:
msg = "Regular expressioon match error, please check your pbs script."
raise ValueError(msg)
nnode, ncpu = match.groups()
nnode = args.nnode if args.nnode else nnode
ncpu = args.ncpu if args.ncpu else ncpu
content_list[5] = "#PBS -l nodes={}:ppn={}\n".format(nnode, ncpu)
_logger.info("nodes -> {}, ppn -> {}".format(nnode, ncpu))
# Change node type.
if args.queue:
content_list[6] = "#PBS -q {}\n".format(args.queue)
_logger.info("queue type -> {}".format(args.queue))
with open('vasp.script', 'w') as f:
f.writelines(content_list)
# Create fort.188
atom_idxs = []
atom_names = []
for idx, atom_name in enumerate(xsd.atom_names):
if atom_name.endswith('_c'):
atom_idxs.append(idx)
atom_names.append(atom_name)
# If constrained get distance and create fort.188
if atom_idxs:
if len(atom_idxs) > 2:
raise ValueError("More than two atoms end with '_c'")
pt1, pt2 = [xsd.data[idx, :] for idx in atom_idxs]
# Use Ax=b convert to cartisan coordinate
diff = pt1 - pt2
A = np.matrix(xsd.bases.T)
x = np.matrix(diff).T
b = A*x
distance = np.linalg.norm(b)
# Create fort.188
content = '1\n3\n6\n4\n0.04\n%-5d%-5d%f\n0\n' % \
(atom_idxs[0]+1, atom_idxs[1]+1, distance)
with open('fort.188', 'w') as f:
f.write(content)
_logger.info("fort.188 has been created.")
_logger.info('-'*20)
_logger.info("atom number: {:<5d}{:<5d}".format(atom_idxs[0]+1, atom_idxs[1]+1))
_logger.info("atom name: {} {}".format(*atom_names))
_logger.info("distance: {:f}".format(distance))
_logger.info('-'*20)
# Set IBRION = 1
incar.set('IBRION', 1)
_logger.info("{} -> {}".format("IBRION", "1"))
if PY2:
pname_value_pairs = args.__dict__.iteritems()
else:
pname_value_pairs = args.__dict__.items()
for pname, value in pname_value_pairs :
if (value is not None) and (pname in incar.pnames):
incar.set(pname, value)
_logger.info("{} -> {}".format(pname, value))
# Generate new INCAR file.
incar.tofile()
|
PytLab/VASPy
|
scripts/create_inputs.py
|
Python
|
mit
| 5,291
|
[
"VASP"
] |
9180bc93e4886768ce6d5abbf1e7b34da58122cd564e8c3aeb982a7bd9e724b7
|
# ex01.py
#
# Functions and data useful in exercise 1 (bacterial GC content, etc.) of
# the BS32010 course at the University of Dundee
from Bio import SeqIO # For working with sequence data
from Bio.Graphics.ColorSpiral import get_color_dict # For defining colours
import matplotlib.pyplot as plt # For creating graphics
import pandas as pd # For working with dataframes
import os # For working with local files
bact_datadir = "genome_data/gc_content"
bact_files = {"Mycoplasma genitalium": ("NC_018495.fna",
"NC_018496.fna",
"NC_018497.fna",
"NC_018498.fna"),
"Mycoplasma pneumoniae": ("NC_000912.fna",
"NC_016807.fna",
"NC_017504.fna",
"NC_020076.fna"),
"Nostoc punctiforme": ("NC_010628.fna",),
"Escherichia coli": ("NC_000913.fna",
"NC_002695.fna",
"NC_004431.fna",
"NC_010468.fna"),
"Mycobacterium tuberculosis": ("NC_016934.fna",
"NC_017523.fna",
"NC_022350.fna",
"NC_000962.fna")}
bacteria = bact_files.keys()
unknown = pd.DataFrame([dict(species="Unknown", length=4391174,
GC=0.656209, color=(1, 0.2, 0.2)), ])
def calc_size_gc(*names):
""" When passed names corresponding to the bacteria
listed in bact_files, returns a Pandas dataframe
representing sequence length and GC content for
each chromosome.
"""
# Use a Pandas DataFrame to hold data. Dataframes are
# useful objects/concepts, and support a number of
# operations that we will exploit later.
df = pd.DataFrame(columns=['species', 'length', 'GC', 'color'])
# Get one colour for each species, from Biopython's
# ColorSpiral module
colors = get_color_dict(names, a=6, b=0.2)
# Loop over the passed species names, and collect data
for name in names:
try:
for filename in bact_files[name]:
ch = SeqIO.read(os.path.join(bact_datadir, filename), 'fasta')
ch_size = len(ch.seq)
ch_gc = float(ch.seq.count('C') + ch.seq.count('G')) / ch_size
df = df.append(pd.DataFrame([dict(species=name, length=ch_size,
GC=ch_gc, filename=filename,
descrtiption=ch.description,
color=colors[name]), ]),
ignore_index=True)
except KeyError:
print("Did not recognise species: %s" % name)
continue
return df
# Plot chromosome size and GC data
def plot_data(dataframe, filename=None, return_fig=False):
""" When passed a dataframe corresponding to the output
of calc_size_gc, renders a scatterplot of chromosome length
against GC content.
"""
# One advantage of using a Pandas dataframe is that we can
# operate on the data by the content of the data. Here we're
# treating the dataframe as a series of subsets on the basis
# of named species. This allows us to label our scatterplot
# by species, too.
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(111)
ax.set_position([0.15, 0.15, 0.45, 0.75])
for k, sub in dataframe.groupby("species"):
ax.scatter(x=sub.GC, y=sub.length, c=list(sub.color), label=k, s=50)
ax.set_xlabel("GC content/%")
ax.set_ylabel("chromosome length/bp")
ax.set_title("Chr length vs GC%, grouped by species")
leg = ax.legend(bbox_to_anchor=(1.0, 0.5), loc='center left')
if filename is not None:
fig.savefig(filename)
if return_fig:
return fig
|
widdowquinn/Teaching-2015-03-17-UoD_compgenvis
|
workshop_1/bs32010/ex01.py
|
Python
|
mit
| 4,059
|
[
"Biopython"
] |
691c352180a197cb6e6d4dfe5ff03523175a900feb458c23b1ad1dc41c92dd81
|
from __future__ import absolute_import
from typing import Any, List, Set, Tuple, TypeVar, \
Union, Optional, Sequence, AbstractSet
from typing.re import Match
from zerver.lib.str_utils import NonBinaryStr
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import Manager
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
from django.dispatch import receiver
from zerver.lib.cache import cache_with_key, flush_user_profile, flush_realm, \
user_profile_by_id_cache_key, user_profile_by_email_cache_key, \
generic_bulk_cached_fetch, cache_set, flush_stream, \
display_recipient_cache_key, cache_delete, \
get_stream_cache_key, active_user_dicts_in_realm_cache_key, \
active_bot_dicts_in_realm_cache_key, active_user_dict_fields, \
active_bot_dict_fields
from zerver.lib.utils import make_safe_digest, generate_random_token
from zerver.lib.str_utils import force_bytes, ModelReprMixin, dict_with_str_keys
from django.db import transaction
from zerver.lib.avatar import gravatar_hash, get_avatar_url
from zerver.lib.camo import get_camo_url
from django.utils import timezone
from django.contrib.sessions.models import Session
from zerver.lib.timestamp import datetime_to_timestamp
from django.db.models.signals import pre_save, post_save, post_delete
from django.core.validators import MinLengthValidator, RegexValidator
from django.utils.translation import ugettext_lazy as _
import zlib
from bitfield import BitField
from bitfield.types import BitHandler
from collections import defaultdict
from datetime import timedelta
import pylibmc
import re
import ujson
import logging
from six import binary_type, text_type
import time
import datetime
# TODO: see #1379 to eliminate bugdown dependencies
bugdown = None # type: Any
MAX_SUBJECT_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH = 50 # type: int
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[text_type], AbstractSet[text_type])
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
per_request_display_recipient_cache = {} # type: Dict[int, List[Dict[str, Any]]]
def get_display_recipient_by_id(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[text_type, List[Dict[str, Any]]]
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient):
# type: (Recipient) -> Union[text_type, List[Dict[str, Any]]]
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id
)
def flush_per_request_caches():
# type: () -> None
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
@cache_with_key(lambda *args: display_recipient_cache_key(args[0]),
timeout=3600*24*7)
def get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[text_type, List[Dict[str, Any]]]
"""
returns: an appropriate object describing the recipient. For a
stream this will be the stream name as a string. For a huddle or
personal, it will be an array of dicts about each recipient.
"""
if recipient_type == Recipient.STREAM:
stream = Stream.objects.get(id=recipient_type_id)
return stream.name
# We don't really care what the ordering is, just that it's deterministic.
user_profile_list = (UserProfile.objects.filter(subscription__recipient_id=recipient_id)
.select_related()
.order_by('email'))
return [{'email': user_profile.email,
'domain': user_profile.realm.domain,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy,} for user_profile in user_profile_list]
def completely_open(domain):
# type: (text_type) -> bool
# This domain is completely open to everyone on the internet to
# join. E-mail addresses do not need to match the domain and
# an invite from an existing user is not required.
realm = get_realm(domain)
if not realm:
return False
return not realm.invite_required and not realm.restricted_to_domain
def get_unique_open_realm():
# type: () -> Optional[Realm]
"""We only return a realm if there is a unique non-system-only realm
and it is completely open."""
realms = Realm.objects.filter(deactivated=False)
# On production installations, the (usually "zulip.com") system
# realm is an empty realm just used for system bots, so don't
# include it in this accounting.
realms = realms.exclude(domain__in=settings.SYSTEM_ONLY_REALMS)
if len(realms) != 1:
return None
realm = realms[0]
if realm.invite_required or realm.restricted_to_domain:
return None
return realm
def get_realm_emoji_cache_key(realm):
# type: (Realm) -> text_type
return u'realm_emoji:%s' % (realm.id,)
class Realm(ModelReprMixin, models.Model):
# domain is a domain in the Internet sense. It must be structured like a
# valid email domain. We use is to restrict access, identify bots, etc.
domain = models.CharField(max_length=40, db_index=True, unique=True) # type: text_type
# name is the user-visible identifier for the realm. It has no required
# structure.
name = models.CharField(max_length=40, null=True) # type: Optional[text_type]
restricted_to_domain = models.BooleanField(default=True) # type: bool
invite_required = models.BooleanField(default=False) # type: bool
invite_by_admins_only = models.BooleanField(default=False) # type: bool
create_stream_by_admins_only = models.BooleanField(default=False) # type: bool
mandatory_topics = models.BooleanField(default=False) # type: bool
show_digest_email = models.BooleanField(default=True) # type: bool
name_changes_disabled = models.BooleanField(default=False) # type: bool
allow_message_editing = models.BooleanField(default=True) # type: bool
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js
message_content_edit_limit_seconds = models.IntegerField(default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS) # type: int
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
notifications_stream = models.ForeignKey('Stream', related_name='+', null=True, blank=True) # type: Optional[Stream]
deactivated = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: text_type
DEFAULT_NOTIFICATION_STREAM_NAME = u'announce'
def __unicode__(self):
# type: () -> text_type
return u"<Realm: %s %s>" % (self.domain, self.id)
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self):
# type: () -> Dict[text_type, Dict[str, text_type]]
return get_realm_emoji_uncached(self)
@property
def deployment(self):
# type: () -> Any # returns a Deployment from zilencer.models
try:
return self._deployments.all()[0]
except IndexError:
return None
@deployment.setter # type: ignore # https://github.com/python/mypy/issues/220
def set_deployments(self, value):
# type: (Any) -> None
self._deployments = [value] # type: Any
def get_admin_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_realm_admin=True,
is_active=True).select_related()
def get_active_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
@property
def uri(self):
# type: () -> str
return settings.SERVER_URI
@property
def host(self):
# type: () -> str
return settings.EXTERNAL_HOST
@property
def is_zephyr_mirror_realm(self):
# type: () -> bool
return self.domain == "mit.edu"
@property
def webathena_enabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
class Meta(object):
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
class RealmAlias(models.Model):
realm = models.ForeignKey(Realm, null=True) # type: Optional[Realm]
domain = models.CharField(max_length=80, db_index=True, unique=True) # type: text_type
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email):
# type: (text_type) -> text_type
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def split_email_to_domain(email):
# type: (text_type) -> text_type
return email.split("@")[-1].lower()
# Returns the domain, potentually de-aliased, for the realm
# that this user's email is in
def resolve_email_to_domain(email):
# type: (text_type) -> text_type
domain = split_email_to_domain(email)
alias = alias_for_realm(domain)
if alias is not None:
domain = alias.realm.domain
return domain
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email, realm):
# type: (text_type, Realm) -> bool
# Anyone can be in an open realm
if not realm.restricted_to_domain:
return True
# Otherwise, domains must match (case-insensitively)
email_domain = resolve_email_to_domain(email)
return email_domain == realm.domain.lower()
def alias_for_realm(domain):
# type: (text_type) -> Optional[RealmAlias]
try:
return RealmAlias.objects.get(domain=domain)
except RealmAlias.DoesNotExist:
return None
def remote_user_to_email(remote_user):
# type: (text_type) -> text_type
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
class RealmEmoji(ModelReprMixin, models.Model):
realm = models.ForeignKey(Realm) # type: Realm
# Second part of the regex (negative lookbehind) disallows names ending with one of the punctuation characters
name = models.TextField(validators=[MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-zA-Z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in Emoji name"))]) # type: text_type
# URLs start having browser compatibility problem below 2000
# characters, so 1000 seems like a safe limit.
img_url = models.URLField(max_length=1000) # type: text_type
class Meta(object):
unique_together = ("realm", "name")
def __unicode__(self):
# type: () -> text_type
return u"<RealmEmoji(%s): %s %s>" % (self.realm.domain, self.name, self.img_url)
def get_realm_emoji_uncached(realm):
# type: (Realm) -> Dict[text_type, Dict[str, text_type]]
d = {}
for row in RealmEmoji.objects.filter(realm=realm):
d[row.name] = dict(source_url=row.img_url,
display_url=get_camo_url(row.img_url))
return d
def flush_realm_emoji(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
class RealmFilter(ModelReprMixin, models.Model):
realm = models.ForeignKey(Realm) # type: Realm
pattern = models.TextField() # type: text_type
url_format_string = models.TextField() # type: text_type
class Meta(object):
unique_together = ("realm", "pattern")
def __unicode__(self):
# type: () -> text_type
return u"<RealmFilter(%s): %s %s>" % (self.realm.domain, self.pattern, self.url_format_string)
def get_realm_filters_cache_key(domain):
# type: (text_type) -> text_type
return u'all_realm_filters:%s' % (domain,)
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache = {} # type: Dict[text_type, List[Tuple[text_type, text_type]]]
def realm_filters_for_domain(domain):
# type: (text_type) -> List[Tuple[text_type, text_type]]
domain = domain.lower()
if domain not in per_request_realm_filters_cache:
per_request_realm_filters_cache[domain] = realm_filters_for_domain_remote_cache(domain)
return per_request_realm_filters_cache[domain]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_domain_remote_cache(domain):
# type: (text_type) -> List[Tuple[text_type, text_type]]
filters = []
for realm_filter in RealmFilter.objects.filter(realm=get_realm(domain)):
filters.append((realm_filter.pattern, realm_filter.url_format_string))
return filters
def all_realm_filters():
# type: () -> Dict[text_type, List[Tuple[text_type, text_type]]]
filters = defaultdict(list) # type: Dict[text_type, List[Tuple[text_type, text_type]]]
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm.domain].append((realm_filter.pattern, realm_filter.url_format_string))
return filters
def flush_realm_filter(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_delete(get_realm_filters_cache_key(realm.domain))
try:
per_request_realm_filters_cache.pop(realm.domain.lower())
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
class UserProfile(ModelReprMixin, AbstractBaseUser, PermissionsMixin):
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# Fields from models.AbstractUser minus last_name and first_name,
# which we don't use; email is modified to make it indexed and unique.
email = models.EmailField(blank=False, db_index=True, unique=True) # type: text_type
is_staff = models.BooleanField(default=False) # type: bool
is_active = models.BooleanField(default=True, db_index=True) # type: bool
is_realm_admin = models.BooleanField(default=False, db_index=True) # type: bool
is_bot = models.BooleanField(default=False, db_index=True) # type: bool
bot_type = models.PositiveSmallIntegerField(null=True, db_index=True) # type: Optional[int]
is_api_super_user = models.BooleanField(default=False, db_index=True) # type: bool
date_joined = models.DateTimeField(default=timezone.now) # type: datetime.datetime
is_mirror_dummy = models.BooleanField(default=False) # type: bool
bot_owner = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) # type: Optional[UserProfile]
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
# Our custom site-specific fields
full_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: text_type
short_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: text_type
# pointer points to Message.id, NOT UserMessage.id.
pointer = models.IntegerField() # type: int
last_pointer_updater = models.CharField(max_length=64) # type: text_type
realm = models.ForeignKey(Realm) # type: Realm
api_key = models.CharField(max_length=32) # type: text_type
tos_version = models.CharField(null=True, max_length=10) # type: text_type
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications = models.BooleanField(default=False) # type: bool
enable_stream_sounds = models.BooleanField(default=False) # type: bool
# PM + @-mention notifications.
enable_desktop_notifications = models.BooleanField(default=True) # type: bool
enable_sounds = models.BooleanField(default=True) # type: bool
enable_offline_email_notifications = models.BooleanField(default=True) # type: bool
enable_offline_push_notifications = models.BooleanField(default=True) # type: bool
enable_digest_emails = models.BooleanField(default=True) # type: bool
# Old notification field superseded by existence of stream notification
# settings.
default_desktop_notifications = models.BooleanField(default=True) # type: bool
###
last_reminder = models.DateTimeField(default=timezone.now, null=True) # type: Optional[datetime.datetime]
rate_limits = models.CharField(default=u"", max_length=100) # type: text_type # comma-separated list of range:max pairs
# Default streams
default_sending_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_events_register_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_all_public_streams = models.BooleanField(default=False) # type: bool
# UI vars
enter_sends = models.NullBooleanField(default=True) # type: Optional[bool]
autoscroll_forever = models.BooleanField(default=False) # type: bool
left_side_userlist = models.BooleanField(default=False) # type: bool
# display settings
twenty_four_hour_time = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: text_type
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Minutes to wait before warning a bot owner that her bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
AVATAR_FROM_GRAVATAR = u'G'
AVATAR_FROM_USER = u'U'
AVATAR_FROM_SYSTEM = u'S'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
(AVATAR_FROM_SYSTEM, 'System generated'),
)
avatar_source = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1) # type: text_type
TUTORIAL_WAITING = u'W'
TUTORIAL_STARTED = u'S'
TUTORIAL_FINISHED = u'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1) # type: text_type
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps = models.TextField(default=u'[]') # type: text_type
invites_granted = models.IntegerField(default=0) # type: int
invites_used = models.IntegerField(default=0) # type: int
alert_words = models.TextField(default=u'[]') # type: text_type # json-serialized list of strings
# Contains serialized JSON of the form:
# [["social", "mit"], ["devel", "ios"]]
muted_topics = models.TextField(default=u'[]') # type: text_type
objects = UserManager() # type: UserManager
def can_admin_user(self, target_user):
# type: (UserProfile) -> bool
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def last_reminder_tzaware(self):
# type: () -> Optional[datetime.datetime]
if self.last_reminder is not None and timezone.is_naive(self.last_reminder):
logging.warning(u"Loaded a user_profile.last_reminder for user %s that's not tz-aware: %s"
% (self.email, text_type(self.last_reminder)))
return self.last_reminder.replace(tzinfo=timezone.utc)
return self.last_reminder
def __unicode__(self):
# type: () -> text_type
return u"<UserProfile: %s %s>" % (self.email, self.realm)
@property
def is_incoming_webhook(self):
# type: () -> bool
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@staticmethod
def emails_from_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, text_type]
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def can_create_streams(self):
# type: () -> bool
if self.is_realm_admin or not self.realm.create_stream_by_admins_only:
return True
else:
return False
def major_tos_version(self):
# type: () -> int
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def receives_offline_notifications(user_profile):
# type: (UserProfile) -> bool
return ((user_profile.enable_offline_email_notifications or
user_profile.enable_offline_push_notifications) and
not user_profile.is_bot)
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
email = models.EmailField() # type: text_type
referred_by = models.ForeignKey(UserProfile, null=True) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
invited_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
realm_creation = models.BooleanField(default=False)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, null=True) # type: Optional[Realm]
class PushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind = models.PositiveSmallIntegerField(choices=KINDS) # type: int
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token = models.CharField(max_length=4096, unique=True) # type: text_type
last_updated = models.DateTimeField(auto_now=True) # type: datetime.datetime
# The user who's device this is
user = models.ForeignKey(UserProfile, db_index=True) # type: UserProfile
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id = models.TextField(null=True) # type: Optional[text_type]
def generate_email_token_for_stream():
# type: () -> text_type
return generate_random_token(32)
class Stream(ModelReprMixin, models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True) # type: text_type
realm = models.ForeignKey(Realm, db_index=True) # type: Realm
invite_only = models.NullBooleanField(default=False) # type: Optional[bool]
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token = models.CharField(
max_length=32, default=generate_email_token_for_stream) # type: text_type
description = models.CharField(max_length=1024, default=u'') # type: text_type
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
deactivated = models.BooleanField(default=False) # type: bool
def __unicode__(self):
# type: () -> text_type
return u"<Stream: %s>" % (self.name,)
def is_public(self):
# type: () -> bool
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.realm.is_zephyr_mirror_realm
class Meta(object):
unique_together = ("name", "realm")
def num_subscribers(self):
# type: () -> int
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=self.id,
user_profile__is_active=True,
active=True
).count()
# This is stream information that is sent to clients
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name,
stream_id=self.id,
description=self.description,
invite_only=self.invite_only)
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
def valid_stream_name(name):
# type: (text_type) -> bool
return name != ""
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(ModelReprMixin, models.Model):
type_id = models.IntegerField(db_index=True) # type: int
type = models.PositiveSmallIntegerField(db_index=True) # type: int
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta(object):
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle' }
def type_name(self):
# type: () -> str
# Raises KeyError if invalid
return self._type_names[self.type]
def __unicode__(self):
# type: () -> text_type
display_recipient = get_display_recipient(self)
return u"<Recipient: %s (%d, %s)>" % (display_recipient, self.type_id, self.type)
class Client(ModelReprMixin, models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True) # type: text_type
def __unicode__(self):
# type: () -> text_type
return u"<Client: %s>" % (self.name,)
get_client_cache = {} # type: Dict[text_type, Client]
def get_client(name):
# type: (text_type) -> Client
if name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[name] = result
return get_client_cache[name]
def get_client_cache_key(name):
# type: (text_type) -> text_type
return u'get_client:%s' % (make_safe_digest(name),)
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name):
# type: (text_type) -> Client
(client, _) = Client.objects.get_or_create(name=name)
return client
# get_stream_backend takes either a realm id or a realm
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_stream_backend(stream_name, realm):
# type: (text_type, Realm) -> Stream
return Stream.objects.select_related("realm").get(
name__iexact=stream_name.strip(), realm_id=realm.id)
def get_active_streams(realm):
# type: (Realm) -> QuerySet
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name, realm):
# type: (text_type, Realm) -> Optional[Stream]
try:
return get_stream_backend(stream_name, realm)
except Stream.DoesNotExist:
return None
def bulk_get_streams(realm, stream_names):
# type: (Realm, STREAM_NAMES) -> Dict[text_type, Any]
def fetch_streams_by_name(stream_names):
# type: (List[text_type]) -> Sequence[Stream]
#
# This should be just
#
# Stream.objects.select_related("realm").filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(stream_names) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(stream_names))
where_clause = "UPPER(zerver_stream.name::text) IN (%s)" % (upper_list,)
return get_active_streams(realm.id).select_related("realm").extra(
where=[where_clause],
params=stream_names)
return generic_bulk_cached_fetch(lambda stream_name: get_stream_cache_key(stream_name, realm),
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=lambda stream: stream.name.lower())
def get_recipient_cache_key(type, type_id):
# type: (int, int) -> text_type
return u"get_recipient:%s:%s" % (type, type_id,)
@cache_with_key(get_recipient_cache_key, timeout=3600*24*7)
def get_recipient(type, type_id):
# type: (int, int) -> Recipient
return Recipient.objects.get(type_id=type_id, type=type)
def bulk_get_recipients(type, type_ids):
# type: (int, List[int]) -> Dict[int, Any]
def cache_key_function(type_id):
# type: (int) -> text_type
return get_recipient_cache_key(type, type_id)
def query_function(type_ids):
# type: (List[int]) -> Sequence[Recipient]
# TODO: Change return type to QuerySet[Recipient]
return Recipient.objects.filter(type=type, type_id__in=type_ids)
return generic_bulk_cached_fetch(cache_key_function, query_function, type_ids,
id_fetcher=lambda recipient: recipient.type_id)
def extract_message_dict(message_bytes):
# type: (binary_type) -> Dict[str, Any]
return dict_with_str_keys(ujson.loads(zlib.decompress(message_bytes).decode("utf-8")))
def stringify_message_dict(message_dict):
# type: (Dict[str, Any]) -> binary_type
return zlib.compress(force_bytes(ujson.dumps(message_dict)))
def to_dict_cache_key_id(message_id, apply_markdown):
# type: (int, bool) -> text_type
return u'message_dict:%d:%d' % (message_id, apply_markdown)
def to_dict_cache_key(message, apply_markdown):
# type: (Message, bool) -> text_type
return to_dict_cache_key_id(message.id, apply_markdown)
class Message(ModelReprMixin, models.Model):
sender = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
subject = models.CharField(max_length=MAX_SUBJECT_LENGTH, db_index=True) # type: text_type
content = models.TextField() # type: text_type
rendered_content = models.TextField(null=True) # type: Optional[text_type]
rendered_content_version = models.IntegerField(null=True) # type: Optional[int]
pub_date = models.DateTimeField('date published', db_index=True) # type: datetime.datetime
sending_client = models.ForeignKey(Client) # type: Client
last_edit_time = models.DateTimeField(null=True) # type: Optional[datetime.datetime]
edit_history = models.TextField(null=True) # type: Optional[text_type]
has_attachment = models.BooleanField(default=False, db_index=True) # type: bool
has_image = models.BooleanField(default=False, db_index=True) # type: bool
has_link = models.BooleanField(default=False, db_index=True) # type: bool
def topic_name(self):
# type: () -> text_type
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def __unicode__(self):
# type: () -> text_type
display_recipient = get_display_recipient(self.recipient)
return u"<Message: %s / %s / %r>" % (display_recipient, self.subject, self.sender)
def get_realm(self):
# type: () -> Realm
return self.sender.realm
def render_markdown(self, content, domain=None):
# type: (text_type, Optional[text_type]) -> text_type
"""Return HTML for given markdown. Bugdown may add properties to the
message object such as `mentions_user_ids` and `mentions_wildcard`.
These are only on this Django object and are not saved in the
database.
"""
# TODO: see #1379 to eliminate bugdown dependencies
global bugdown
if bugdown is None:
import zerver.lib.bugdown as bugdown
# 'from zerver.lib import bugdown' gives mypy error in python 3 mode.
self.mentions_wildcard = False
self.is_me_message = False
self.mentions_user_ids = set() # type: Set[int]
self.user_ids_with_alert_words = set() # type: Set[int]
if not domain:
domain = self.sender.realm.domain
if self.sending_client.name == "zephyr_mirror" and self.sender.realm.is_zephyr_mirror_realm:
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
domain = u"zephyr_mirror"
rendered_content = bugdown.convert(content, domain, self)
self.is_me_message = Message.is_status_message(content, rendered_content)
return rendered_content
def set_rendered_content(self, rendered_content, save = False):
# type: (text_type, bool) -> bool
"""Set the content on the message.
"""
# TODO: see #1379 to eliminate bugdown dependencies
global bugdown
if bugdown is None:
import zerver.lib.bugdown as bugdown
# 'from zerver.lib import bugdown' gives mypy error in python 3 mode.
self.rendered_content = rendered_content
self.rendered_content_version = bugdown.version
if self.rendered_content is not None:
if save:
self.save_rendered_content()
return True
else:
return False
def save_rendered_content(self):
# type: () -> None
self.save(update_fields=["rendered_content", "rendered_content_version"])
def maybe_render_content(self, domain, save = False):
# type: (Optional[text_type], bool) -> bool
"""Render the markdown if there is no existing rendered_content"""
# TODO: see #1379 to eliminate bugdown dependencies
global bugdown
if bugdown is None:
import zerver.lib.bugdown as bugdown
# 'from zerver.lib import bugdown' gives mypy error in python 3 mode.
if Message.need_to_render_content(self.rendered_content, self.rendered_content_version):
return self.set_rendered_content(self.render_markdown(self.content, domain), save)
else:
return True
@staticmethod
def need_to_render_content(rendered_content, rendered_content_version):
# type: (Optional[text_type], int) -> bool
return rendered_content is None or rendered_content_version < bugdown.version
def to_dict(self, apply_markdown):
# type: (bool) -> Dict[str, Any]
return extract_message_dict(self.to_dict_json(apply_markdown))
@cache_with_key(to_dict_cache_key, timeout=3600*24)
def to_dict_json(self, apply_markdown):
# type: (bool) -> binary_type
return self.to_dict_uncached(apply_markdown)
def to_dict_uncached(self, apply_markdown):
# type: (bool) -> binary_type
return stringify_message_dict(self.to_dict_uncached_helper(apply_markdown))
def to_dict_uncached_helper(self, apply_markdown):
# type: (bool) -> Dict[str, Any]
return Message.build_message_dict(
apply_markdown = apply_markdown,
message = self,
message_id = self.id,
last_edit_time = self.last_edit_time,
edit_history = self.edit_history,
content = self.content,
subject = self.subject,
pub_date = self.pub_date,
rendered_content = self.rendered_content,
rendered_content_version = self.rendered_content_version,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_realm_domain = self.sender.realm.domain,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sender_avatar_source = self.sender.avatar_source,
sender_is_mirror_dummy = self.sender.is_mirror_dummy,
sending_client_name = self.sending_client.name,
recipient_id = self.recipient.id,
recipient_type = self.recipient.type,
recipient_type_id = self.recipient.type_id,
)
@staticmethod
def build_dict_from_raw_db_row(row, apply_markdown):
# type: (Dict[str, Any], bool) -> Dict[str, Any]
'''
row is a row from a .values() call, and it needs to have
all the relevant fields populated
'''
return Message.build_message_dict(
apply_markdown = apply_markdown,
message = None,
message_id = row['id'],
last_edit_time = row['last_edit_time'],
edit_history = row['edit_history'],
content = row['content'],
subject = row['subject'],
pub_date = row['pub_date'],
rendered_content = row['rendered_content'],
rendered_content_version = row['rendered_content_version'],
sender_id = row['sender_id'],
sender_email = row['sender__email'],
sender_realm_domain = row['sender__realm__domain'],
sender_full_name = row['sender__full_name'],
sender_short_name = row['sender__short_name'],
sender_avatar_source = row['sender__avatar_source'],
sender_is_mirror_dummy = row['sender__is_mirror_dummy'],
sending_client_name = row['sending_client__name'],
recipient_id = row['recipient_id'],
recipient_type = row['recipient__type'],
recipient_type_id = row['recipient__type_id'],
)
@staticmethod
def build_message_dict(
apply_markdown,
message,
message_id,
last_edit_time,
edit_history,
content,
subject,
pub_date,
rendered_content,
rendered_content_version,
sender_id,
sender_email,
sender_realm_domain,
sender_full_name,
sender_short_name,
sender_avatar_source,
sender_is_mirror_dummy,
sending_client_name,
recipient_id,
recipient_type,
recipient_type_id,
):
# type: (bool, Message, int, datetime.datetime, text_type, text_type, text_type, datetime.datetime, text_type, Optional[int], int, text_type, text_type, text_type, text_type, text_type, bool, text_type, int, int, int) -> Dict[str, Any]
# TODO: see #1379 to eliminate bugdown dependencies
global bugdown
if bugdown is None:
import zerver.lib.bugdown as bugdown
# 'from zerver.lib import bugdown' gives mypy error in python 3 mode.
avatar_url = get_avatar_url(sender_avatar_source, sender_email)
display_recipient = get_display_recipient_by_id(
recipient_id,
recipient_type,
recipient_type_id
)
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
assert not isinstance(display_recipient, text_type)
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and his self, preserving ordering
recip = {'email': sender_email,
'domain': sender_realm_domain,
'full_name': sender_full_name,
'short_name': sender_short_name,
'id': sender_id,
'is_mirror_dummy': sender_is_mirror_dummy}
if recip['email'] < display_recipient[0]['email']:
display_recipient = [recip, display_recipient[0]]
elif recip['email'] > display_recipient[0]['email']:
display_recipient = [display_recipient[0], recip]
obj = dict(
id = message_id,
sender_email = sender_email,
sender_full_name = sender_full_name,
sender_short_name = sender_short_name,
sender_domain = sender_realm_domain,
sender_id = sender_id,
type = display_type,
display_recipient = display_recipient,
recipient_id = recipient_id,
subject = subject,
timestamp = datetime_to_timestamp(pub_date),
gravatar_hash = gravatar_hash(sender_email), # Deprecated June 2013
avatar_url = avatar_url,
client = sending_client_name)
obj['subject_links'] = bugdown.subject_links(sender_realm_domain.lower(), subject)
if last_edit_time != None:
obj['last_edit_timestamp'] = datetime_to_timestamp(last_edit_time)
obj['edit_history'] = ujson.loads(edit_history)
if apply_markdown:
if Message.need_to_render_content(rendered_content, rendered_content_version):
if message is None:
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of bugdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the bugdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
# TODO: see #1379 to eliminate bugdown dependencies
message = Message.objects.select_related().get(id=message_id)
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = message.render_markdown(content, sender_realm_domain)
message.set_rendered_content(rendered_content, True)
if rendered_content is not None:
obj['content'] = rendered_content
else:
obj['content'] = u'<p>[Zulip note: Sorry, we could not understand the formatting of your message]</p>'
obj['content_type'] = 'text/html'
else:
obj['content'] = content
obj['content_type'] = 'text/x-markdown'
return obj
def to_log_dict(self):
# type: () -> Dict[str, Any]
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_domain = self.sender.realm.domain,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.pub_date))
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
# This is a special purpose function optimized for
# callers like get_old_messages_backend().
fields = [
'id',
'subject',
'pub_date',
'last_edit_time',
'edit_history',
'content',
'rendered_content',
'rendered_content_version',
'recipient_id',
'recipient__type',
'recipient__type_id',
'sender_id',
'sending_client__name',
'sender__email',
'sender__full_name',
'sender__short_name',
'sender__realm__id',
'sender__realm__domain',
'sender__avatar_source',
'sender__is_mirror_dummy',
]
return Message.objects.filter(id__in=needed_ids).values(*fields)
@classmethod
def remove_unreachable(cls):
# type: (Any) -> None
"""Remove all Messages that are not referred to by any UserMessage."""
cls.objects.exclude(id__in = UserMessage.objects.values('message_id')).delete()
def sent_by_human(self):
# type: () -> bool
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'website', 'ios', 'android')) or \
('desktop app' in sending_client)
@staticmethod
def content_has_attachment(content):
# type: (text_type) -> Match
return re.search(r'[/\-]user[\-_]uploads[/\.-]', content)
@staticmethod
def content_has_image(content):
# type: (text_type) -> bool
return bool(re.search(r'[/\-]user[\-_]uploads[/\.-]\S+\.(bmp|gif|jpg|jpeg|png|webp)', content, re.IGNORECASE))
@staticmethod
def content_has_link(content):
# type: (text_type) -> bool
return 'http://' in content or 'https://' in content or '/user_uploads' in content
@staticmethod
def is_status_message(content, rendered_content):
# type: (text_type, text_type) -> bool
"""
Returns True if content and rendered_content are from 'me_message'
"""
if content.startswith('/me ') and '\n' not in content:
if rendered_content.startswith('<p>') and rendered_content.endswith('</p>'):
return True
return False
def update_calculated_fields(self):
# type: () -> None
# TODO: rendered_content could also be considered a calculated field
content = self.content
self.has_attachment = bool(Message.content_has_attachment(content))
self.has_image = bool(Message.content_has_image(content))
self.has_link = bool(Message.content_has_link(content))
@receiver(pre_save, sender=Message)
def pre_save_message(sender, **kwargs):
# type: (Any, **Any) -> None
if kwargs['update_fields'] is None or "content" in kwargs['update_fields']:
message = kwargs['instance']
message.update_calculated_fields()
def get_context_for_message(message):
# type: (Message) -> Sequence[Message]
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
pub_date__gt=message.pub_date - timedelta(minutes=15),
).order_by('-id')[:10]
def flush_message(sender, **kwargs):
# type: (Any, **Any) -> None
message = kwargs['instance']
cache_delete(to_dict_cache_key(message, False))
cache_delete(to_dict_cache_key(message, True))
post_save.connect(flush_message, sender=Message)
# Whenever a message is sent, for each user current subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table, which has has columns (id, user profile id, message id,
# flags) indicating which messages each user has received. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred the message, collapsed or was
# mentioned the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class UserMessage(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
message = models.ForeignKey(Message) # type: Message
# We're not using the archived field for now, but create it anyway
# since this table will be an unpleasant one to do schema changes
# on later
ALL_FLAGS = ['read', 'starred', 'collapsed', 'mentioned', 'wildcard_mentioned',
'summarize_in_home', 'summarize_in_stream', 'force_expand', 'force_collapse',
'has_alert_word', "historical", 'is_me_message']
flags = BitField(flags=ALL_FLAGS, default=0) # type: BitHandler
class Meta(object):
unique_together = ("user_profile", "message")
def __unicode__(self):
# type: () -> text_type
display_recipient = get_display_recipient(self.message.recipient)
return u"<UserMessage: %s / %s (%s)>" % (display_recipient, self.user_profile.email, self.flags_list())
def flags_list(self):
# type: () -> List[str]
return [flag for flag in self.flags.keys() if getattr(self.flags, flag).is_set]
def parse_usermessage_flags(val):
# type: (int) -> List[str]
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if val & mask:
flags.append(flag)
mask <<= 1
return flags
class Attachment(ModelReprMixin, models.Model):
MAX_FILENAME_LENGTH = 100
file_name = models.CharField(max_length=MAX_FILENAME_LENGTH, db_index=True) # type: text_type
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id = models.TextField(db_index=True) # type: text_type
owner = models.ForeignKey(UserProfile) # type: UserProfile
realm = models.ForeignKey(Realm, blank=True, null=True) # type: Realm
is_realm_public = models.BooleanField(default=False) # type: bool
messages = models.ManyToManyField(Message) # type: Manager
create_time = models.DateTimeField(default=timezone.now, db_index=True) # type: datetime.datetime
def __unicode__(self):
# type: () -> text_type
return u"<Attachment: %s>" % (self.file_name,)
def is_claimed(self):
# type: () -> bool
return self.messages.count() > 0
def get_url(self):
# type: () -> text_type
return u"/user_uploads/%s" % (self.path_id)
def get_attachments_by_owner_id(uid):
# type: (int) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
return Attachment.objects.filter(owner=uid).select_related('owner')
def get_owners_from_file_name(file_name):
# type: (str) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
# The returned vaule will list of owners since different users can upload
# same files with the same filename.
return Attachment.objects.filter(file_name=file_name).select_related('owner')
def get_old_unclaimed_attachments(weeks_ago):
# type: (int) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone.now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
active = models.BooleanField(default=True) # type: bool
in_home_view = models.NullBooleanField(default=True) # type: Optional[bool]
DEFAULT_STREAM_COLOR = u"#c2c2c2"
color = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR) # type: text_type
pin_to_top = models.BooleanField(default=False) # type: bool
desktop_notifications = models.BooleanField(default=True) # type: bool
audible_notifications = models.BooleanField(default=True) # type: bool
# Combination desktop + audible notifications superseded by the
# above.
notifications = models.BooleanField(default=False) # type: bool
class Meta(object):
unique_together = ("user_profile", "recipient")
def __unicode__(self):
# type: () -> text_type
return u"<Subscription: %r -> %s>" % (self.user_profile, self.recipient)
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid):
# type: (int) -> UserProfile
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email):
# type: (text_type) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(active_user_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_user_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True) \
.values(*active_user_dict_fields)
@cache_with_key(active_bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_bot_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=True) \
.values(*active_bot_dict_fields)
def get_owned_bot_dicts(user_profile, include_all_realm_bots_if_admin=True):
# type: (UserProfile, bool) -> List[Dict[str, Any]]
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_active_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_active=True, is_bot=True,
bot_owner=user_profile).values(*active_bot_dict_fields)
return [{'email': botdict['email'],
'full_name': botdict['full_name'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': get_avatar_url(botdict['avatar_source'], botdict['email']),
}
for botdict in result]
def get_prereg_user_by_email(email):
# type: (text_type) -> PreregistrationUser
# A user can be invited many times, so only return the result of the latest
# invite.
return PreregistrationUser.objects.filter(email__iexact=email.strip()).latest("invited_at")
def get_cross_realm_users():
# type: () -> Set[text_type]
return set(settings.CROSS_REALM_BOT_EMAILS)
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash = models.CharField(max_length=40, db_index=True, unique=True) # type: text_type
def get_huddle_hash(id_list):
# type: (List[int]) -> text_type
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash):
# type: (text_type) -> text_type
return u"huddle_by_hash:%s" % (huddle_hash,)
def get_huddle(id_list):
# type: (List[int]) -> Huddle
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash, id_list):
# type: (text_type, List[int]) -> Huddle
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
with transaction.atomic():
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
subs_to_create = [Subscription(recipient=recipient,
user_profile=get_user_profile_by_id(user_profile_id))
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
def get_realm(domain):
# type: (text_type) -> Optional[Realm]
if not domain:
return None
try:
return Realm.objects.get(domain__iexact=domain.strip())
except Realm.DoesNotExist:
return None
def clear_database():
# type: () -> None
pylibmc.Client(['127.0.0.1']).flush_all()
model = None # type: Any
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
class UserActivity(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
query = models.CharField(max_length=50, db_index=True) # type: text_type
count = models.IntegerField() # type: int
last_visit = models.DateTimeField('last visit') # type: datetime.datetime
class Meta(object):
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
start = models.DateTimeField('start time', db_index=True) # type: datetime.datetime
end = models.DateTimeField('end time', db_index=True) # type: datetime.datetime
class UserPresence(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
# Valid statuses
ACTIVE = 1
IDLE = 2
timestamp = models.DateTimeField('presence changed') # type: datetime.datetime
status = models.PositiveSmallIntegerField(default=ACTIVE) # type: int
@staticmethod
def status_to_string(status):
# type: (int) -> str
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
@staticmethod
def get_status_dict_by_realm(realm_id):
# type: (int) -> defaultdict[Any, Dict[Any, Any]]
user_statuses = defaultdict(dict) # type: defaultdict[Any, Dict[Any, Any]]
query = UserPresence.objects.filter(
user_profile__realm_id=realm_id,
user_profile__is_active=True,
user_profile__is_bot=False
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
'user_profile__is_mirror_dummy',
)
mobile_user_ids = [row['user'] for row in PushDeviceToken.objects.filter(
user__realm_id=1,
user__is_active=True,
user__is_bot=False,
).distinct("user").values("user")]
for row in query:
info = UserPresence.to_presence_dict(
client_name=row['client__name'],
status=row['status'],
dt=row['timestamp'],
push_enabled=row['user_profile__enable_offline_push_notifications'],
has_push_devices=row['user_profile__id'] in mobile_user_ids,
is_mirror_dummy=row['user_profile__is_mirror_dummy'],
)
user_statuses[row['user_profile__email']][row['client__name']] = info
return user_statuses
@staticmethod
def to_presence_dict(client_name=None, status=None, dt=None, push_enabled=None,
has_push_devices=None, is_mirror_dummy=None):
# type: (Optional[text_type], Optional[int], Optional[datetime.datetime], Optional[bool], Optional[bool], Optional[bool]) -> Dict[str, Any]
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self):
# type: () -> Dict[str, Any]
return UserPresence.to_presence_dict(
client_name=self.client.name,
status=self.status,
dt=self.timestamp
)
@staticmethod
def status_from_string(status):
# type: (NonBinaryStr) -> Optional[int]
if status == 'active':
status_val = UserPresence.ACTIVE
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class Meta(object):
unique_together = ("user_profile", "client")
class DefaultStream(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
stream = models.ForeignKey(Stream) # type: Stream
class Meta(object):
unique_together = ("realm", "stream")
class Referral(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
email = models.EmailField(blank=False, null=False) # type: text_type
timestamp = models.DateTimeField(auto_now_add=True, null=False) # type: datetime.datetime
# This table only gets used on Zulip Voyager instances
# For reasons of deliverability (and sending from multiple email addresses),
# we will still send from mandrill when we send things from the (staging.)zulip.com install
class ScheduledJob(models.Model):
scheduled_timestamp = models.DateTimeField(auto_now_add=False, null=False) # type: datetime.datetime
type = models.PositiveSmallIntegerField() # type: int
# Valid types are {email}
# for EMAIL, filter_string is recipient_email
EMAIL = 1
# JSON representation of the job's data. Be careful, as we are not relying on Django to do validation
data = models.TextField() # type: text_type
# Kind if like a ForeignKey, but table is determined by type.
filter_id = models.IntegerField(null=True) # type: Optional[int]
filter_string = models.CharField(max_length=100) # type: text_type
|
ahmadassaf/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 65,012
|
[
"VisIt"
] |
df45b0f642a61e4be7f3db82c73358e05fd47b7c131c41a4fc48196578d2bc42
|
from __future__ import print_function
import numpy as np
import pdb
from . import Utils
"""
This module contains definitions for basic proposal distribution
classes to be used by the MetropolisHastings sampler. Note that
these class definitions should contain pretune() methods, rather
than the MetropolisHastings class (which is what it used to be).
"""
class mv_gaussian():
"""
Take a random draw from a multivariate normal distribution and
add the perturbations to the corresponding Stoch values. covmatrix
is an MxM covariance matrix where M is the number of Stochs, and
covcols is a list containing the labels of each Stoch in the order
corresponding to the covariance matrix columns.
"""
def __init__( self, covmatrix=None, covcols=[] ):
self.proposal_kwargs = { 'covmatrix':covmatrix, 'covcols':covcols }
def step( self, stochs, **kwargs ):
keys = stochs.keys()
npar = len( keys )
meanvec = np.zeros( npar )
steps = np.random.multivariate_normal( meanvec, kwargs['covmatrix'] )
for i in range( npar ):
key = kwargs['covcols'][i]
stochs[key].value += steps[i]
def pretune( self, mcmc, covcols=None, covinit=None, ntune_min=10000, ntune_max=30000, \
tune_interval=1000, nconsecutive=1, verbose=True, pretune_individ_step_sizes=False ):
npar = len( covcols )
if pretune_individ_step_sizes==True:
step_sizes_init = {}
for i in range( npar ):
step_sizes_init[covcols[i]] = np.sqrt( covinit[i,i] )
step_sizes_tuned = tune_diagonal_gaussian_step_sizes( mcmc, step_sizes_init, \
ntune_iterlim=ntune_max, \
tune_interval=tune_interval, \
rescale_all_together=False, \
verbose=verbose, nconsecutive=nconsecutive )
for i in range( npar ):
covinit[i,i] = step_sizes_tuned[covcols[i]]**2.
self.proposal_kwargs = { 'covmatrix':covinit, 'covcols':covcols }
covtuned = tune_proposal_covmatrix( mcmc, ntune_min=ntune_min, ntune_max=ntune_max, \
tune_interval=tune_interval, nconsecutive=nconsecutive, \
verbose=verbose )
self.proposal_kwargs = { 'covmatrix':covtuned, 'covcols':covcols }
class diagonal_gaussian():
"""
Add a random Gaussian perturbation to each Stoch value, where
step_sizes is a dictionary containing the widths of each
Gaussian perturbation.
"""
def __init__( self, step_sizes={} ):
self.proposal_kwargs = { 'step_sizes':step_sizes }
def step( self, stochs, **kwargs ):
keys = stochs.keys()
for key in keys:
stochs[key].value += Utils.gaussian_random_draw( mu=0.0, sigma=kwargs['step_sizes'][key] )
def pretune( self, mcmc, ntune_iterlim=0, tune_interval=None, verbose=False, nconsecutive=4 ):
keys = mcmc.model.free.keys()
if self.proposal_kwargs['step_sizes'] is None:
self.proposal_kwargs['step_sizes'] = {}
for key in keys:
self.proposal_kwargs['step_sizes'][key] = 1.
untuned_step_sizes = self.proposal_kwargs['step_sizes']
tuned_step_sizes = tune_diagonal_gaussian_step_sizes( mcmc, untuned_step_sizes, \
ntune_iterlim=ntune_iterlim, \
tune_interval=tune_interval, \
rescale_all_together=True, \
verbose=verbose, nconsecutive=nconsecutive )
self.proposal_kwargs['step_sizes'] = tuned_step_sizes
def tune_proposal_covmatrix( mcmc, ntune_min=0, ntune_max=0, tune_interval=500, nconsecutive=3, verbose=False ):
"""
Tunes a general multivariate normal proposal distribution.
TODO: I find that the rescale_factors seem fairly robust
to different problems/datasets/analyses. However, they're
not based on anything scientific, just guesses. So it
might be worth considering how to make it more principled
in the future...
"""
nsteps = 0
covcols = mcmc.step_method.proposal_distribution.proposal_kwargs['covcols']
npar = len( covcols )
chains = []
accfrac = 0
nsuccess = 0
rescale_factor = ( 2.4**2 )/float( npar )
mcmc.step_method.proposal_distribution.proposal_kwargs['covmatrix'] *= rescale_factor
while ( nsteps<ntune_min )+( ( nsteps<ntune_max )*( nsuccess<nconsecutive ) ):
Utils.mcmc_sampling( mcmc, nsteps=tune_interval, verbose=verbose )
mcmc._overwrite_existing_chains = False
cov1 = mcmc.step_method.proposal_distribution.proposal_kwargs['covmatrix']
nsteps += tune_interval
chains = []
for key in covcols:
chains += [ mcmc.chain[key][-tune_interval:] ]
chains = np.row_stack( chains )
cov2 = np.cov( chains )
covnew = 0.3*cov1 + 0.7*cov2
nacc = mcmc.chain['accepted'][-tune_interval:].sum()
accfrac = float( nacc )/float( tune_interval )
if ( accfrac>=0.2 )*( accfrac<=0.4 ):
nsuccess +=1
else:
nsuccess = 0
rescale_factor *= ( 1./0.25 )*np.min( [ 0.9, np.max( [ 0.1, float( accfrac ) ] ) ] )
covtuned = covnew*rescale_factor
mcmc.step_method.proposal_distribution.proposal_kwargs['covmatrix'] = covtuned
if verbose==True:
print( '\nAcceptance fraction = {0:.2f} (from last {1:.0f} steps)'.format( accfrac, tune_interval ) )
print( 'nconsecutive = {0:.0f} (require {1:.0f})'.format( nsuccess, nconsecutive ) )
print( 'Total tuning steps taken = {0:.0f}'.format( nsteps ) )
print( '(ntune_min={0:.0f}, ntune_max={1:.0f})\n'.format( ntune_min, ntune_max ) )
if nsteps>ntune_max:
warn_str = '\nAborting tuning - maximum step limit reached'
warnings.warn( warn_str )
elif verbose==True:
print( '\nTuning finished with step acceptance rate of {0:.2f}%'.format( accfrac*100 ) )
mcmc.step_method.proposal_distribution.proposal_kwargs['covmatrix'] = cov1
return covtuned
def tune_diagonal_gaussian_step_sizes( mcmc, step_sizes, ntune_iterlim=0, tune_interval=None, \
verbose=False, nconsecutive=4, rescale_all_together=True ):
"""
Tunes a Gaussian proposal distribution with a diagonal covariance matrix.
This is done by holding all parameters fixed except for one, and tuning
the latter. tune_interval steps are taken in the free parameter, the acceptance
rate is calculated, and the step size is rescaled appropriately. The free
parameter is then returned to its starting value, and the process is repeated.
This is continued until nconsecutive iterations produce acceptance rates
in the range 20-35%. This process is repeated for each parameter in turn.
"""
unobs_stochs = mcmc.model.free
keys = unobs_stochs.keys()
m = ntune_iterlim
n = tune_interval
npars = len( keys )
# Make a record of the starting values for each parameter:
orig_stoch_values = {}
for key in keys:
orig_stoch_values[key] = unobs_stochs[key].value
# First of all, we will tune the relative step sizes for
# all of the parameters by taking steps one parameter at
# a time. Initialise the arrays that will record the results:
tuning_chains = {}
for key in keys:
tuning_chains[key] = {}
current_values = {}
for key in keys:
tuning_chains[key]['values'] = np.zeros( n, dtype=unobs_stochs[key].dtype )
tuning_chains[key]['logp'] = np.zeros( n, dtype=float )
tuning_chains[key]['accepted'] = np.zeros( n, dtype=int )
current_values[key] = unobs_stochs[key].value
# Define variables that track the total number of tuning
# steps that have been taken and the consecutive number of
# successful tune_intervals:
for j in range( npars ):
i = 0 # iteration counter
nsuccess = 0 # number of consecutive successes
key_j = keys[j]
# Proceed to perturb the current parameter only, carrying
# on until the iteration limit has been reached:
accfrac_j = 0
while i<m+1:
step_size_j = step_sizes[key_j]
# If there have been nconsecutive successful tune intervals
# in a row, break the loop:
if nsuccess>=nconsecutive:
step_sizes[key_j] *= 0.3
break
# If the iteration limit has been reached, return an error:
elif i==m:
err_str = 'Aborting tuning - exceeded {0} steps'.format( m )
err_str += '\n...consider reducing tune_interval'
raise StandardError( err_str )
# Otherwise, proceed with the tuning:
else:
k = i%n # iteration number within current tuning interval
i += 1
# If this is the first iteration in a new tuning interval,
# reset all parameters to their original values to avoid
# drifting into low likelihood regions of parameter space:
if k==0:
for key in keys:
unobs_stochs[key].value = orig_stoch_values[key]
current_logp = mcmc.logp()
# Take a step in the current parameter while holding the
# rest fixed:
step_size_j = step_sizes[key_j]
unobs_stochs[key_j].value += Utils.gaussian_random_draw( mu=0.0, sigma=step_size_j )
# Decide if the step is to be accepted:
new_logp = mcmc.logp()
tuning_chains[key_j]['accepted'][k] = mcmc.step_method.decide( current_logp, new_logp )
# Update the value of the associated stochastic object:
if ( tuning_chains[key_j]['accepted'][k]==True ):
current_logp = new_logp
current_values[key_j] = unobs_stochs[key_j].value
else:
unobs_stochs[key_j].value = current_values[key_j]
# Add the result to the chain:
tuning_chains[key_j]['values'][k] = current_values[key_j]
tuning_chains[key_j]['logp'][k] = current_logp
# If we have reached the end of the current tuning interval,
# adjust the step size of the current parameter based on the
# fraction of steps that were accepted:
if k==n-1:
naccepted_j = np.sum( tuning_chains[key_j]['accepted'] )
accfrac_j = naccepted_j/float( n )
if ( accfrac_j<=0.01 ):
step_sizes[key_j] /= 5.0
elif ( accfrac_j>0.01 )*( accfrac_j<=0.05 ):
step_sizes[key_j] /= 2.0
elif ( accfrac_j>0.05 )*( accfrac_j<=0.10 ):
step_sizes[key_j] /= 1.5
elif ( accfrac_j>0.10 )*( accfrac_j<=0.15 ):
step_sizes[key_j] /= 1.2
elif ( accfrac_j>0.15 )*( accfrac_j<0.2 ):
step_sizes[key_j] /= 1.1
elif ( accfrac_j>0.20 )*( accfrac_j<0.25 ):
step_sizes[key_j] /= 1.01
elif ( accfrac_j>0.35 )*( accfrac_j<=0.40 ):
step_sizes[key_j] *= 1.01
elif ( accfrac_j>0.40 )*( accfrac_j<=0.45 ):
step_sizes[key_j] *= 1.1
elif ( accfrac_j>0.45 )*( accfrac_j<=0.50 ):
step_sizes[key_j] *= 1.2
elif ( accfrac_j>0.50 )*( accfrac_j<=0.55 ):
step_sizes[key_j] *= 1.5
elif ( accfrac_j>0.55 )*( accfrac_j<=0.60 ):
step_sizes[key_j] *= 2.0
elif ( accfrac_j>0.60 ):
step_sizes[key_j] *= 5.0
# If the end of a tune interval has been reached, check
# if all the acceptance rates were in the required range:
if ( k==n-1 ):
if ( accfrac_j>=0.2 )*( accfrac_j<=0.40 ):
nsuccess += 1
else:
nsuccess = 0
if verbose==True:
print( '\nPre-tuning update for parameter {0} ({1} of {2}):'\
.format( key_j, j+1, npars ) )
print( 'Consecutive successes = {0}'.format( nsuccess ) )
print( 'Accepted fraction from last {0} steps = {1}'\
.format( n, accfrac_j ) )
print( '(require {0} consecutive intervals with acceptance rate 0.2-0.4)'\
.format( nconsecutive ) )
print( 'Median value of last {0} steps: median( {1} )={2} '\
.format( n, key_j, np.median( current_values[key_j] ) ) )
print( 'Starting value for comparison: {0}'.format( orig_stoch_values[key_j] ) )
print( 'Current stepsize: {0}'.format( step_sizes[key_j] ) )
# Having tuned the relative step sizes, now rescale them together
# to refine the joint step sizes if requested:
if rescale_all_together==True:
i = 0
nsuccess = 0
rescale_factor = 1.0/np.sqrt( npars )
tuning_chain = np.zeros( n, dtype=int )
if verbose==True:
print( '\n\nNow tuning the step sizes simultaneously...\n' )
while i<m+1:
# If there have been nconsecutive successful tune intervals
# in a row, break the loop:
if nsuccess>=nconsecutive:
break
# If the iteration limit has been reached, return an error:
elif i==m:
err_str = 'Aborting tuning - exceeded {0} steps'.format( m )
err_str += '\n...consider reducing tune_interval'
raise StandardError( err_str )
# Otherwise, proceed with the tuning:
else:
k = i%n # iteration number within current tuning interval
i += 1
# If this is the first iteration in a new tuning interval,
# reset all parameters to their original values to avoid
# drifting into low likelihood regions of parameter space:
if k==0:
for key in keys:
unobs_stochs[key].value = orig_stoch_values[key]
current_logp = mcmc.logp()
# Take a step in all of the parameters simultaneously:
for key in keys:
# If this is the first iteration in a new tuning interval,
# rescale the step sizes by a constant factor before
# taking the step:
if k==0:
step_sizes[key] *= rescale_factor
unobs_stochs[key].value += Utils.gaussian_random_draw( mu=0.0, sigma=step_sizes[key] )
# Decide if the step is to be accepted:
new_logp = mcmc.logp()
tuning_chain[k] = mcmc.step_method.decide( current_logp, new_logp )
if ( tuning_chain[k]==True ):
current_logp = new_logp
for key in keys:
current_values[key] = unobs_stochs[key].value
else:
for key in keys:
unobs_stochs[key].value = current_values[key]
# If we have reached the end of the current tuning interval,
# adjust the step size rescaling factor based on the fraction
# of steps that were accepted:
if k==n-1:
naccepted = np.sum( tuning_chain )
accfrac = naccepted/float( n )
if ( accfrac>=0.2 )*( accfrac<=0.4 ):
nsuccess += 1
rescale_factor = 1.0
else:
nsuccess = 0
if ( accfrac<=0.01 ):
rescale_factor = 1./1.6
elif ( accfrac>0.01 )*( accfrac<=0.05 ):
rescale_factor = 1./1.4
elif ( accfrac>0.05 )*( accfrac<=0.10 ):
rescale_factor = 1./1.2
elif ( accfrac>0.10 )*( accfrac<=0.15 ):
rescale_factor = 1./1.1
elif ( accfrac>0.15 )*( accfrac<0.2 ):
rescale_factor = 1./1.01
elif ( accfrac>0.35 )*( accfrac<=0.45 ):
rescale_factor = 1.01
elif ( accfrac>0.45 )*( accfrac<=0.50 ):
rescale_factor = 1.1
elif ( accfrac>0.50 )*( accfrac<=0.55 ):
rescale_factor = 1.2
elif ( accfrac>0.55 )*( accfrac<=0.60 ):
rescale_factor = 1.4
elif ( accfrac>0.60 ):
rescale_factor = 1.6
if verbose==True:
print( 'Consecutive successes = {0}'.format( nsuccess ) )
print( 'Accepted fraction from last {0} steps = {1}'\
.format( n, accfrac ) )
print( 'Finished tuning with acceptance rate of {0:.1f}%'.format( accfrac*100 ) )
for key in keys:
unobs_stochs[key].value = orig_stoch_values[key]
return step_sizes
|
tomevans/pyhm
|
pyhm/BuiltinProposals.py
|
Python
|
gpl-2.0
| 18,196
|
[
"Gaussian"
] |
bf6b52c8e06bcd29e1d01e53636dd065a8ba68d48a1e14f1411d58ef04f64e18
|
"""
This script tests the cisd module.
"""
import os
import numpy as np
import time
from frankenstein import scf, ci
from frankenstein.tools.lat_utils import get_mol
def test_cisd_ch4():
nocc = 5
basis = "sto-3g"
if __name__ == "__main__":
xyz = "geom/ch4.zmat"
else:
xyz = os.path.dirname(__file__) + "/geom/ch4.zmat"
h, V, e_nuc = get_mol(xyz, basis, True)
mf = scf.RHF(h=h, V=V, nocc=nocc, e_nuc=e_nuc)
mf.kernel(verbose="mute")
mci = ci.CISD(mf)
start = time.time()
mci.kernel()
end = time.time()
dt1 = end - start
print("SCF energy : % .10f" % (mf.e_tot))
print("CISD correlation energy : % .10f" % (mci.e_corr))
print("CISD total energy : % .10f" % (mci.e_tot))
# check with pyscf
try:
from pyscf import gto
from pyscf import scf as pscf
from pyscf import ci as pci
except:
raise RuntimeError("Failed to import gto, scf and "
"ci from pyscf.")
with open(xyz, "r") as f:
f.readline()
f.readline()
atom = ""
for line in f:
atom += line
mol = gto.M(atom=atom, basis=basis)
pmf = pscf.RHF(mol).run()
pmci = pci.CISD(pmf).run()
assert(np.allclose(mf.e_tot, pmf.e_tot))
assert(np.allclose(mci.e_corr, pmci.e_corr))
assert(np.allclose(mci.e_tot, pmci.e_tot))
# check rdm
mci.kernel(rdm_level=2, ret_ci=True)
e1 = 2. * np.sum(h*mci.rdm1)
e2 = np.einsum("pqrs,pqrs->", V, mci.rdm2, optimize=True)
etot = e1+e2+e_nuc
print("CISD 1e energy : % .10f" % e1)
print("CISD 2e energy : % .10f" % e2)
print("CISD total energy from rdm: % .10f" % etot)
assert(np.allclose(mci.e_tot, etot))
# re-run with t1 and t2, should be faster
start = time.time()
mci.kernel(ci0=mci.ci)
end = time.time()
dt2 = end - start
print("dt1: % .3f sec" % dt1)
print("dt2: % .3f sec" % dt2)
assert(dt2 < dt1)
if __name__ == "__main__":
test_cisd_ch4()
|
hongzhouye/frankenstein
|
tests/cisd_test.py
|
Python
|
bsd-3-clause
| 2,057
|
[
"PySCF"
] |
c37ad5e59f404d08bdf964583ef47561589e6a454d2415971ae758a02d73a2c3
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Controller class for the BoosterCube ship.
When you run this file a script it will spawn a BoosterCube and visit 5
different locations. These locations match those of the platforms created
by 'demo_platform', but the demo will run with other Azrael simulations as
well.
"""
import os
import sys
import time
import demolib
import numpy as np
from IPython import embed as ipshell
import pyazrael
import pyazrael.aztypes as aztypes
from pyazrael.aztypes import Template
def BoostercubeTemplate(scale=1.0):
"""
Return template for BoosterCube.
"""
# Get a Client instance.
client = pyazrael.AzraelClient()
# Load the model.
vert, uv, rgb = demolib.loadBoosterCubeBlender()
frag_cube = {'vert': vert, 'uv': uv, 'rgb': rgb, 'scale': scale,
'pos': (0, 0, 0), 'rot': (0, 0, 0, 1)}
del vert, uv, rgb
# Attach six boosters, two for every axis.
dir_x = np.array([1, 0, 0])
dir_y = np.array([0, 1, 0])
dir_z = np.array([0, 0, 1])
pos = (0, 0, 0)
B = aztypes.Booster
boosters = {
'b_x': B(pos, direction=(1, 0, 0), force=0),
'b_y': B(pos, direction=(0, 1, 0), force=0),
'b_z': B(pos, direction=(0, 0, 1), force=0)
}
del dir_x, dir_y, dir_z, pos, B
# Load sphere and colour it blue(ish). This is going to be the (super
# simple) "flame" that comes out of the (still invisible) boosters.
p = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(p, 'models', 'sphere', 'sphere.obj')
vert, uv, rgb = demolib.loadModel(fname)
rgb = np.tile([0, 0, 0.8], len(vert) // 3)
rgb += 0.2 * np.random.rand(len(rgb))
rgb = np.array(255 * rgb.clip(0, 1), np.uint8)
frag_flame = {'vert': vert, 'uv': [], 'rgb': rgb,
'pos': (0, 0, 0), 'rot': (0, 0, 0, 1)}
del p, fname, vert, uv, rgb
# Add the template to Azrael.
tID = 'spaceship'
cs = aztypes.CollShapeBox(scale, scale, scale)
cs = aztypes.CollShapeMeta('box', (0, 0, 0), (0, 0, 0, 1), cs)
body = demolib.getRigidBody(cshapes={'0': cs})
frags = {
'frag_1': demolib.getFragMetaRaw(**frag_cube),
'b_x': demolib.getFragMetaRaw(**frag_flame),
'b_y': demolib.getFragMetaRaw(**frag_flame),
'b_z': demolib.getFragMetaRaw(**frag_flame),
}
template = Template(tID, body, frags, boosters, {})
return template
class CtrlBoosterCube():
"""
Controller for BoosterCube.
This class merely wraps the Azrael client to provide a high(er) level
interface to spawn and control the BoosterCube space ship.
The `host` and `port` parameters specify the location of the Azrael API.
"""
def __init__(self, host, port=5555):
self.shipID = None
# Connect to Azrael.
self.client = pyazrael.AzraelClient(addr_clerk=host, port_clerk=port)
# Ping Azrael. This call will block if it cannot connect.
ret = self.client.ping()
if not ret.ok:
print('Could not connect to Azrael')
assert False
print('Connected to Azrael')
print('Adding template for this ship...', flush=True, end='')
template = BoostercubeTemplate(scale=1.0)
self.client.addTemplates([template])
print('done')
def __del__(self):
self.removeShip()
def removeShip(self):
if self.shipID is None:
return
self.client.removeObjects([self.shipID])
self.shipID = None
def spawn(self, pos=(0, 0, 0)):
"""
Spawn the ship at position `pos`.
"""
# Compile the parameters for spawning the ship and send it to Azrael.
ship_init = {
'templateID': 'spaceship',
'rbs': {
'imass': 0.1,
'position': pos,
'rotFactor': [0, 0, 0],
}
}
ret = self.client.spawn([ship_init])
# Verify the call succeeded and record the ID of our space ship.
assert ret.ok
self.shipID = ret.data[0]
print('Spawned spaceship', self.shipID)
def getPosition(self):
"""
Return the current position of the space ship.
:return: the current position, eg [1, 2.5, -3.1]
:rtype: NumPy array
"""
# Query the state variables of the ship.
ret = self.client.getObjectStates([self.shipID])
if not ret.ok:
print('Error getPosAndVel: ', ret.msg)
sys.exit(1)
# Extract the position value from the returned data structure.
try:
pos = ret.data[self.shipID]['rbs']['position']
return np.array(pos, np.float64)
except TypeError:
sys.exit(1)
def setPosition(self, pos):
"""
Place the ship at position `pos`.
"""
return self.setPositionAndVelocity(pos, (0, 0, 0))
def setPositionAndVelocity(self, pos, vel):
"""
Set the ship's position and velocity to ``pos`` and ``vel``.
:param vec3 pos: position (eg. [1, 2, 3])
:param vec3 vel: velocity (eg. [1, -2.5, -0.8])
"""
# Send the update request to Azrael and check for errors.
new = {self.shipID: {'position': pos, 'velocityLin': vel}}
ret = self.client.setRigidBodyData(new)
if not ret.ok:
print('Error setPosAndVel: ', ret.msg)
return ret
def setBoosterForce(self, force):
"""
Apply the constituent ``force`` components to the respective thruster.
The net effect is that the ship will start to accelerate in the
specified direction.
:param vec-3 force: force (in Newton) of each booster.
"""
# The ship has three boosters named 'b_x', 'b_y', and 'b_z'. The names
# are hard coded in the template which the `demo_boostercube` script
# generated (see doc string of this module). The thrusters are located
# at the centre of the cube, even though visually the cube has 6
# boosters, one on each surface. The net effect is the same for this
# simple demo, but it is a tad bit simpler to deal with.
#
# The following command specifies the amount of force to apply at each
# booster.
cmd_b = {
'b_x': aztypes.CmdBooster(force=force[0]),
'b_y': aztypes.CmdBooster(force=force[1]),
'b_z': aztypes.CmdBooster(force=force[2]),
}
# Send the command to Azrael and wait for the reply.
ret = self.client.controlParts(self.shipID, cmd_b, {})
if not ret.ok:
print('Error activating the boosters: ', ret.msg)
return ret
def setBoosterFlame(self, force):
"""
Modify the geometry of the ship to provide visual feedback about the
thrusters.
The "flames" are blue spheres placed next to the thruster and scaled
according to `force`. The flames are already part of the model. All we
have to do is place- and scale them.
This method will not activate any forces; it will only modify the
visual appearance of the object to give visual feedback. The
`activateBooster` method will use this method in conjunction
with `setBoosterForce` to combine visual and physical effects of
booster activation.
"""
# Compute the size of the flame that comes out of the thruster, as well
# as the distance of the flame from the cube's centre.
flame_size = 0.1 * np.sqrt(np.abs(force))
flame_pos = -np.sign(force) * (1.3 + flame_size)
# The final position of the flame depends on which thruster was
# activated.
pos_x = np.array([flame_pos[0], 0, 0]).tolist()
pos_y = np.array([0, flame_pos[1], 0]).tolist()
pos_z = np.array([0, 0, flame_pos[2]]).tolist()
flame_size = flame_size.tolist()
# Compile the data for the updated state of the flame fragment.
cmd = {
'b_x': {
'op': 'mod',
'scale': flame_size[0],
'position': pos_x,
},
'b_y': {
'op': 'mod',
'scale': flame_size[1],
'position': pos_y,
},
'b_z': {
'op': 'mod',
'scale': flame_size[2],
'position': pos_z
}
}
cmd = {self.shipID: cmd}
ret = self.client.setFragments(cmd)
if not ret.ok:
print('Error setFragments: ', ret.msg)
def activateBooster(self, force: (tuple, list, np.ndarray)):
"""
Wrapper around :func:setBooster and :func:setFlame to give visual feedback.
See :func:setBooster for the meaning of ``force`` and ``axis``.
"""
# Sanity check: force must be a three element vector.
assert isinstance(force, (tuple, list, np.ndarray))
assert len(force) == 3
# Activate the physics for the boosters.
ret = self.setBoosterForce(force)
if not ret.ok:
return ret
# Update the geometry to give visual feedback about which booster is
# active and how strong.
self.setBoosterFlame(force)
def controller(self, pos_ref, dt, num_steps: int, verbose=False):
"""
Use a simple control algorithm to manoeuvre the ship to `pos_ref`.
The controller will execute ``num_steps`` force updates, one every `dt`
seconds.
"""
# Periodically query the position, compute the error relative to the
# desired reference position, and engage the thrusters accordingly.
time.sleep(dt)
pos_log = [self.getPosition()]
for ii in range(num_steps):
# Wait.
time.sleep(dt)
# Query current position and add it to the log.
p = self.getPosition()
pos_log.append(p)
# Compute the position error and its slope.
err_val = pos_ref - pos_log[ii + 1]
err_slope = (pos_log[ii + 1] - pos_log[ii]) / dt
# Determine the booster output with a Proportional-Differential
# Controller.
force = 10 * err_val - 8 * err_slope
# Engage the boosters with the newly computed force.
self.activateBooster(force=force)
return pos_log
def main():
# Guess Azrael's IP address on the local computer.
host = demolib.azService['clerk'].ip
# Instantiate the controller for a BoosterCube ship. Then spawn it.
c = CtrlBoosterCube(host)
c.spawn((0, 5, 0))
# Successively manoeuvre the ship above each platform. The hard coded
# positions match those of the platforms defined in 'demo_platforms'.
time.sleep(5)
for ii in range(5):
pos_ref = (-10 + ii * 5, -ii * 2 + 2.5, -20)
c.controller(pos_ref, dt=0.1, num_steps=50, verbose=False)
del c
if __name__ == '__main__':
main()
|
olitheolix/azrael
|
demos/ship_boostercube.py
|
Python
|
agpl-3.0
| 11,803
|
[
"VisIt"
] |
e8999e2d052af76ba89b10beaafe6502c5c0984c6df194136c58814361983073
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import logging
import pickle
import numpy as np
import ray
from zoo.common.utils import enable_multi_fs_load, enable_multi_fs_save
from zoo.orca.data.ray_xshards import RayXShards
from zoo.orca.learn.dl_cluster import RayDLCluster
from zoo.orca.learn.tf2.tf_runner import TFRunner
from zoo.orca.learn.ray_estimator import Estimator as OrcaRayEstimator
from zoo.orca.learn.utils import maybe_dataframe_to_xshards, dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, update_predict_xshards, \
process_xshards_of_pandas_dataframe
from zoo.orca.data.utils import process_spark_xshards
from zoo.ray import RayContext
logger = logging.getLogger(__name__)
class Estimator(object):
@staticmethod
def from_keras(*,
model_creator,
config=None,
verbose=False,
workers_per_node=1,
compile_args_creator=None,
backend="tf2",
cpu_binding=False,
):
"""
Create an Estimator for tensorflow 2.
:param model_creator: (dict -> Model) This function takes in the `config`
dict and returns a compiled TF model.
:param config: (dict) configuration passed to 'model_creator',
'data_creator'. Also contains `fit_config`, which is passed
into `model.fit(data, **fit_config)` and
`evaluate_config` which is passed into `model.evaluate`.
:param verbose: (bool) Prints output of one model if true.
:param workers_per_node: (Int) worker number on each node. default: 1.
:param compile_args_creator: (dict -> dict of loss, optimizer and metrics) Only used when
the backend="horovod". This function takes in the `config` dict and returns a
dictionary like {"optimizer": tf.keras.optimizers.SGD(lr), "loss":
"mean_squared_error", "metrics": ["mean_squared_error"]}
:param backend: (string) You can choose "horovod" or "tf2" as backend. Default: `tf2`.
:param cpu_binding: (bool) Whether to binds threads to specific CPUs. Default: False
"""
return TensorFlow2Estimator(model_creator=model_creator, config=config,
verbose=verbose, workers_per_node=workers_per_node,
backend=backend, compile_args_creator=compile_args_creator,
cpu_binding=cpu_binding)
def make_data_creator(refs):
def data_creator(config, batch_size):
return refs
return data_creator
def data_length(data):
x = data["x"]
if isinstance(x, np.ndarray):
return x.shape[0]
else:
return x[0].shape[0]
class TensorFlow2Estimator(OrcaRayEstimator):
def __init__(self,
model_creator,
compile_args_creator=None,
config=None,
verbose=False,
backend="tf2",
workers_per_node=1,
cpu_binding=False):
self.model_creator = model_creator
self.compile_args_creator = compile_args_creator
self.config = {} if config is None else config
self.verbose = verbose
ray_ctx = RayContext.get()
if "batch_size" in self.config:
raise Exception("Please do not specify batch_size in config. Input batch_size in the"
" fit/evaluate function of the estimator instead.")
if "inter_op_parallelism" not in self.config:
self.config["inter_op_parallelism"] = 1
if "intra_op_parallelism" not in self.config:
self.config["intra_op_parallelism"] = ray_ctx.ray_node_cpu_cores // workers_per_node
if backend == "horovod":
assert compile_args_creator is not None, "compile_args_creator should not be None," \
" when backend is set to horovod"
params = {
"model_creator": model_creator,
"compile_args_creator": compile_args_creator,
"config": self.config,
"verbose": self.verbose,
}
if backend == "tf2":
cores_per_node = ray_ctx.ray_node_cpu_cores // workers_per_node
num_nodes = ray_ctx.num_ray_nodes * workers_per_node
self.cluster = RayDLCluster(
num_workers=num_nodes,
worker_cores=cores_per_node,
worker_cls=TFRunner,
worker_param=params,
cpu_binding=cpu_binding
)
self.remote_workers = self.cluster.get_workers()
ips = ray.get(
[worker.get_node_ip.remote() for worker in self.remote_workers])
ports = ray.get(
[worker.find_free_port.remote() for worker in self.remote_workers])
urls = ["{ip}:{port}".format(ip=ips[i], port=ports[i])
for i in range(len(self.remote_workers))]
ray.get([worker.setup.remote() for worker in self.remote_workers])
# Get setup tasks in order to throw errors on failure
ray.get([
worker.setup_distributed.remote(urls, i, len(self.remote_workers))
for i, worker in enumerate(self.remote_workers)])
elif backend == "horovod":
# it is necessary to call self.run first to set horovod environment
from zoo.orca.learn.horovod.horovod_ray_runner import HorovodRayRunner
horovod_runner = HorovodRayRunner(ray_ctx,
worker_cls=TFRunner,
worker_param=params,
workers_per_node=workers_per_node)
horovod_runner.run(lambda: print("worker initialized"))
self.remote_workers = horovod_runner.remote_workers
ray.get([worker.setup.remote() for worker in self.remote_workers])
ray.get([
worker.setup_horovod.remote()
for i, worker in enumerate(self.remote_workers)])
else:
raise Exception("Only \"tf2\" and \"horovod\" are legal "
"values of backend, but got {}".format(backend))
self.num_workers = len(self.remote_workers)
def fit(self, data, epochs=1, batch_size=32, verbose=1,
callbacks=None, validation_data=None, class_weight=None,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
data_config=None, feature_cols=None,
label_cols=None):
"""
Train this tensorflow model with train data.
:param data: train data. It can be XShards, Spark DataFrame or creator function which
returns Iter or DataLoader.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a tuple of
numpy arrays.
:param epochs: Number of epochs to train the model. Default: 1.
:param batch_size: Batch size used for training. Default: 32.
:param verbose: Prints output of one model if true.
:param callbacks: List of Keras compatible callbacks to apply during training.
:param validation_data: validation data. Validation data type should be the same
as train data.
:param class_weight: Optional dictionary mapping class indices (integers) to a weight
(float) value, used for weighting the loss function. This can be useful to tell
the model to "pay more attention" to samples from an under-represented class.
:param steps_per_epoch: Total number of steps (batches of samples) before declaring one
epoch finished and starting the next epoch. If `steps_pre_epoch` is `None`, the
epoch will run until the input dataset is exhausted. When passing an infinitely
repeating dataset, you must specify the `step_per_epoch` argument.
:param validation_steps: Total number of steps (batches of samples) to draw before stopping
when performing validation at the end of every epoch. Default: None.
:param validation_freq: Only relevant if validation data is provided. Integer of
`collections_abc.Container` instance (e.g. list, tuple, etc.). If an integer,
specifies how many training epochs to run before a new validation run is performed,
e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies
the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is a Spark DataFrame or
an XShards of Pandas DataFrame.
Default: None.
:return:
"""
params = dict(
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
callbacks=callbacks,
class_weight=class_weight,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
data_config=data_config
)
from zoo.orca.data import SparkXShards
data, validation_data = maybe_dataframe_to_xshards(data, validation_data,
feature_cols, label_cols,
mode="fit",
num_workers=self.num_workers,
accept_str_col=True)
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data, validation_data = process_xshards_of_pandas_dataframe(data, feature_cols,
label_cols,
validation_data, "fit")
ray_xshards = process_spark_xshards(data, self.num_workers)
if validation_data is None:
def transform_func(worker, partition_refs):
params["data_creator"] = make_data_creator(partition_refs)
return worker.step.remote(**params)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.remote_workers,
transform_func)
else:
val_ray_xshards = process_spark_xshards(validation_data, self.num_workers)
def zip_func(worker, this_partition_refs, that_partition_refs):
params["data_creator"] = make_data_creator(this_partition_refs)
params["validation_data_creator"] = \
make_data_creator(that_partition_refs)
return worker.step.remote(**params)
worker_stats = ray_xshards.zip_reduce_shards_with_actors(val_ray_xshards,
self.remote_workers,
zip_func)
else:
params["data_creator"] = data
params["validation_data_creator"] = validation_data
params_list = [params] * self.num_workers
worker_stats = ray.get([self.remote_workers[i].step.remote(**params_list[i])
for i in range(self.num_workers)])
worker_stats = list(itertools.chain.from_iterable(worker_stats))
stats = worker_stats[0].copy()
return stats
def evaluate(self, data, batch_size=32, num_steps=None, verbose=1,
sample_weight=None, callbacks=None, data_config=None,
feature_cols=None, label_cols=None):
"""
Evaluates the model on the validation data set.
:param data: evaluate data. It can be XShards, Spark DataFrame or creator function which
returns Iter or DataLoader.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a tuple of
numpy arrays.
:param batch_size: Batch size used for evaluation. Default: 32.
:param num_steps: Total number of steps (batches of samples) before declaring the evaluation
round finished. Ignored with the default value of `None`.
:param verbose: Prints output of one model if true.
:param sample_weight: Optional Numpy array of weights for the training samples, used for
weighting the loss function. You can either pass a flat (1D) Numpy array with the
same length as the input samples (1:1 mapping between weights and samples), or in
the case of temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of every sample.
:param callbacks: List of Keras compatible callbacks to apply during evaluation.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is a Spark DataFrame or
an XShards of Pandas DataFrame.
Default: None.
:return: validation result
"""
logger.info("Starting validation step.")
params = dict(
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight,
steps=num_steps,
callbacks=callbacks,
data_config=data_config,
)
from zoo.orca.data import SparkXShards
data, _ = maybe_dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=label_cols,
mode="evaluate",
num_workers=self.num_workers,
accept_str_col=True)
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols, label_cols)
data = data
if data.num_partitions() != self.num_workers:
data = data.repartition(self.num_workers)
ray_xshards = RayXShards.from_spark_xshards(data)
def transform_func(worker, partition_refs):
params["data_creator"] = make_data_creator(partition_refs)
return worker.validate.remote(**params)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.remote_workers,
transform_func)
else: # data_creator functions; should return Iter or DataLoader
params["data_creator"] = data
params_list = [params] * self.num_workers
worker_stats = ray.get([w.validate.remote(**params_list[i])
for i, w in enumerate(self.remote_workers)])
worker_stats = list(itertools.chain.from_iterable(worker_stats))
stats = worker_stats[0].copy()
return stats
def _predict_spark_xshards(self, xshards, params):
ray_xshards = RayXShards.from_spark_xshards(xshards)
def transform_func(worker, shards_ref):
params["data_creator"] = make_data_creator(shards_ref)
return worker.predict.remote(**params)
pred_shards = ray_xshards.transform_shards_with_actors(self.remote_workers,
transform_func)
spark_xshards = pred_shards.to_spark_xshards()
return spark_xshards
def predict(self, data, batch_size=None, verbose=1,
steps=None, callbacks=None, data_config=None,
feature_cols=None):
"""
Predict the input data
:param data: predict input data. It can be XShards or Spark DataFrame.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature}, where feature is a numpy array or a tuple of numpy arrays.
:param batch_size: Batch size used for inference. Default: None.
:param verbose: Prints output of one model if true.
:param steps: Total number of steps (batches of samples) before declaring the prediction
round finished. Ignored with the default value of None.
:param callbacks: List of Keras compatible callbacks to apply during prediction.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:return:
"""
logger.info("Starting predict step.")
params = dict(
verbose=verbose,
batch_size=batch_size,
steps=steps,
callbacks=callbacks,
data_config=data_config,
)
from zoo.orca.data import SparkXShards
from pyspark.sql import DataFrame
if isinstance(data, DataFrame):
xshards, _ = dataframe_to_xshards(data,
validation_data=None,
feature_cols=feature_cols,
label_cols=None,
mode="predict",
accept_str_col=True)
pred_shards = self._predict_spark_xshards(xshards, params)
result = convert_predict_xshards_to_dataframe(data, pred_shards)
elif isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols)
pred_shards = self._predict_spark_xshards(data, params)
result = update_predict_xshards(data, pred_shards)
else:
raise ValueError("Only xshards or Spark DataFrame is supported for predict")
return result
def get_model(self):
"""
Returns the learned model.
:return: the learned model.
"""
state_refs = [w.get_state.remote() for w in self.remote_workers]
state = ray.get(state_refs[0])
return self._get_model_from_state(state)
@enable_multi_fs_save
def save(self, checkpoint):
"""
Saves the model at the provided checkpoint.
:param checkpoint: (str) Path to the target checkpoint file.
"""
# Some model might need to aggregate variables during checkpointing
# which requires both the chief and workers to participate in the
# allreduce communication protocol.
# So we need to call get_state on every remote workers, otherwise
# it might get stuck
state_refs = [w.get_state.remote() for w in self.remote_workers]
state = ray.get(state_refs[0])
with open(checkpoint, "wb") as f:
pickle.dump(state, f)
return checkpoint
@enable_multi_fs_load
def load(self, checkpoint, **kwargs):
"""
Loads the model from the provided checkpoint.
:param checkpoint: (str) Path to target checkpoint file.
"""
with open(checkpoint, "rb") as f:
state = pickle.load(f)
state_id = ray.put(state)
ray.get([worker.set_state.remote(state_id) for worker in self.remote_workers])
def shutdown(self):
"""
Shuts down workers and releases resources.
"""
for worker in self.remote_workers:
worker.shutdown.remote()
worker.__ray_terminate__.remote()
def _get_model_from_state(self, state):
"""Creates model and load weights from state"""
# keep the same behavior as `set_state` in `load` do
model = self.model_creator(self.config)
model.set_weights(state["weights"])
return model
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/orca/learn/tf2/estimator.py
|
Python
|
apache-2.0
| 21,813
|
[
"ORCA"
] |
fceaef5c382112586489e03685a9d3df4e802e6e743afbc407818c3d1d08e018
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
======================
ELM Classifiers Comparison
======================
A comparison of a several ELMClassifiers with different types of hidden
layer activations.
ELMClassifier is a classifier based on the Extreme Learning Machine,
a single layer feedforward network with random hidden layer components
and least squares fitting of the hidden->output weights by default [1][2]
The point of this example is to illustrate the nature of decision boundaries
with different hidden layer activation types and regressors.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
In particular in high dimensional spaces data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
References
__________
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
===============================================================================
Basis Functions:
gaussian rbf : exp(-gamma * (||x-c||/r)^2)
tanh : np.tanh(a)
sinsq : np.power(np.sin(a), 2.0)
tribas : np.clip(1.0 - np.fabs(a), 0.0, 1.0)
hardlim : np.array(a > 0.0, dtype=float)
where x : input pattern
a : dot_product(x, c) + b
c,r : randomly generated components
Label Legend:
ELM(10,tanh) :10 tanh units
ELM(10,tanh,LR) :10 tanh units, LogisticRegression
ELM(10,sinsq) :10 sin*sin units
ELM(10,tribas) :10 tribas units
ELM(10,hardlim) :10 hardlim units
ELM(20,rbf(0.1)) :20 rbf units gamma=0.1
"""
print __doc__
# Code source: Gael Varoqueux
# Andreas Mueller
# Modified for Documentation merge by Jaques Grobler
# Modified for Extreme Learning Machine Classifiers by David Lambert
# License: BSD
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from elm import GenELMClassifier
from random_layer import RBFRandomLayer, MLPRandomLayer
def get_data_bounds(X):
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return (x_min, x_max, y_min, y_max, xx, yy)
def plot_data(ax, X_train, y_train, X_test, y_test, xx, yy):
cm = ListedColormap(['#FF0000', '#0000FF'])
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
def plot_contour(ax, X_train, y_train, X_test, y_test, xx, yy, Z):
cm = pl.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - 0.3, yy.min() + 0.3, ('%.2f' % score).lstrip('0'),
size=13, horizontalalignment='right')
def make_datasets():
return [make_moons(n_samples=200, noise=0.3, random_state=0),
make_circles(n_samples=200, noise=0.2, factor=0.5, random_state=1),
make_linearly_separable()]
def make_classifiers():
names = ["ELM(10,tanh)", "ELM(10,tanh,LR)", "ELM(10,sinsq)",
"ELM(10,tribas)", "ELM(hardlim)", "ELM(20,rbf(0.1))"]
nh = 10
# pass user defined transfer func
sinsq = (lambda x: np.power(np.sin(x), 2.0))
srhl_sinsq = MLPRandomLayer(n_hidden=nh, activation_func=sinsq)
# use internal transfer funcs
srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')
# use gaussian RBF
srhl_rbf = RBFRandomLayer(n_hidden=nh*2, rbf_width=0.1, random_state=0)
log_reg = LogisticRegression()
classifiers = [GenELMClassifier(hidden_layer=srhl_tanh),
GenELMClassifier(hidden_layer=srhl_tanh, regressor=log_reg),
GenELMClassifier(hidden_layer=srhl_sinsq),
GenELMClassifier(hidden_layer=srhl_tribas),
GenELMClassifier(hidden_layer=srhl_hardlim),
GenELMClassifier(hidden_layer=srhl_rbf)]
return names, classifiers
def make_linearly_separable():
X, y = make_classification(n_samples=200, n_features=2, n_redundant=0,
n_informative=2, random_state=1,
n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
return (X, y)
###############################################################################
datasets = make_datasets()
names, classifiers = make_classifiers()
i = 1
figure = pl.figure(figsize=(18, 9))
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4,
random_state=0)
x_min, x_max, y_min, y_max, xx, yy = get_data_bounds(X)
# plot dataset first
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
plot_data(ax, X_train, y_train, X_test, y_test, xx, yy)
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plot_contour(ax, X_train, y_train, X_test, y_test, xx, yy, Z)
i += 1
figure.subplots_adjust(left=.02, right=.98)
pl.show()
|
chrinide/PyFV
|
pyfv/elm/plot_elm_comparison.py
|
Python
|
gpl-2.0
| 7,043
|
[
"Gaussian"
] |
6d6629cc1ad1d1a8f0bb3290169eb841836422f2f3fe03c687ade309fe07d3fe
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
import astropy.units as u
from . import _stats
__all__ = ['gaussian_fwhm_to_sigma', 'gaussian_sigma_to_fwhm',
'binom_conf_interval', 'binned_binom_proportion',
'poisson_conf_interval', 'median_absolute_deviation', 'mad_std',
'signal_to_noise_oir_ccd', 'bootstrap', 'kuiper', 'kuiper_two',
'kuiper_false_positive_probability', 'cdf_from_intervals',
'interval_overlap_length', 'histogram_intervals', 'fold_intervals']
__doctest_skip__ = ['binned_binom_proportion']
__doctest_requires__ = {'binom_conf_interval': ['scipy'],
'poisson_conf_interval': ['scipy']}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1. / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
# NUMPY_LT_1_18
def _expand_dims(data, axis):
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the
expanded array shape.
This function allows for tuple axis arguments.
``numpy.expand_dims`` currently does not allow that, but it will in
numpy v1.18 (https://github.com/numpy/numpy/pull/14051).
``_expand_dims`` can be replaced with ``numpy.expand_dims`` when the
minimum support numpy version is v1.18.
Parameters
----------
data : array-like
Input array.
axis : int or tuple of int
Position in the expanded axes where the new axis (or axes) is
placed. A tuple of axes is now supported. Out of range axes as
described above are now forbidden and raise an `AxisError`.
Returns
-------
result : ndarray
View of ``data`` with the number of dimensions increased.
"""
if isinstance(data, np.matrix):
data = np.asarray(data)
else:
data = np.asanyarray(data)
if not isinstance(axis, (tuple, list)):
axis = (axis,)
out_ndim = len(axis) + data.ndim
axis = np.core.numeric.normalize_axis_tuple(axis, out_ndim)
shape_it = iter(data.shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
return data.reshape(shape)
def binom_conf_interval(k, n, confidence_level=0.68269, interval='wilson'):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
""" # noqa
if confidence_level < 0. or confidence_level > 1.:
raise ValueError('confidence_level must be between 0. and 1.')
alpha = 1. - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError('n must be positive')
if (k < 0).any() or (k > n).any():
raise ValueError('k must be in {0, 1, .., n}')
if interval == 'wilson' or interval == 'wald':
from scipy.special import erfinv
kappa = np.sqrt(2.) * min(erfinv(confidence_level), 1.e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == 'wilson':
midpoint = (k + kappa ** 2 / 2.) / (n + kappa ** 2)
halflength = (kappa * np.sqrt(n)) / (n + kappa ** 2) * \
np.sqrt(p * (1 - p) + kappa ** 2 / (4 * n))
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.] = 0.
conf_interval[conf_interval > 1.] = 1.
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1. - p) / n)
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
elif interval == 'jeffreys' or interval == 'flat':
from scipy.special import betaincinv
if interval == 'jeffreys':
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1. - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1. - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.
elif k == n:
upperbound = 1.
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f'Unrecognized interval: {interval:s}')
return conf_interval
def binned_binom_proportion(x, success, bins=10, range=None,
confidence_level=0.68269, interval='wilson'):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError('sizes of x and success must match')
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(k, n, confidence_level=confidence_level, interval=interval)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(n, interval='root-n', sigma=1, background=0,
confidence_level=None):
r"""Poisson parameter confidence interval given observed counts
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also `discusses
<http://www.pp.rhul.ac.uk/~cowan/atlas/ErrorBars.pdf>`_ several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
""" # noqa
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == 'root-n':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
elif interval == 'root-n-0':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == 'pearson':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n + 0.5 - np.sqrt(n + 0.25),
n + 0.5 + np.sqrt(n + 0.25)])
elif interval == 'sherpagehrels':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75),
n + 1 + np.sqrt(n + 0.75)])
elif interval == 'frequentist-confidence':
_check_poisson_conf_inputs(1., background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array([0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha)])
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == 'kraft-burrows-nousek':
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError('Number of counts must be integer.')
elif not issubclass(n.dtype.type, np.integer):
raise TypeError('Number of counts must be integer.')
if confidence_level is None:
raise ValueError('Set confidence_level for method {}. (sigma is '
'ignored.)'.format(interval))
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError('confidence_level must be a number between 0 and 1.')
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError('Background must be >= 0.')
conf_interval = np.vectorize(_kraft_burrows_nousek,
cache=True)(n, background, confidence_level)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (isinstance(data, u.Quantity) and func is np.median
and data_median.ndim == 0 and np.isnan(data_median)):
data_median = data.__array_wrap__(data_median)
# broadcast the median array before subtraction
if axis is not None:
data_median = _expand_dims(data_median, axis=axis) # NUMPY_LT_1_18
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (isinstance(data, u.Quantity) and func is np.median
and result.ndim == 0 and np.isnan(result)):
result = data.__array_wrap__(result)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(
data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix,
gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(t * (source_eps * gain + npix *
(sky_eps * gain + dark_eps)) + npix * rd ** 2)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <http://mpmath.org/>`_ library.
'''
from scipy.optimize import brentq
from scipy.integrate import quad
from scipy.special import factorial
from math import exp
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1. / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
'''
from mpmath import mpf, factorial, findroot, fsum, power, exp, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1. / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0., N - B], solver='ridder',
tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver='ridder',
tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
'''
from astropy.utils.compat.optional_deps import HAS_SCIPY, HAS_MPMATH
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError('Need mpmath package for input numbers this '
'large.')
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError('Either scipy or mpmath are required.')
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import factorial, comb
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import factorial, comb
if D < 0. or D > 2.:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2. / N:
return 1. - factorial(N) * (D - 1. / N)**(N - 1)
elif D < 3. / N:
k = -(N * D - 1.) / 2.
r = np.sqrt(k**2 - (N * D - 2.)**2 / 2.)
a, b = -k + r, -k - r
return 1 - (factorial(N - 1) * (b**(N - 1) * (1 - a) - a**(N - 1) * (1 - b))
/ N**(N - 2) / (b - a))
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.) / (2. * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y**(t - 3) * (y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2)
term = Tt * comb(N, t) * (1 - D - t / N)**(N - t - 1)
return term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = (np.amax(cdfv - np.arange(N) / float(N)) +
np.amax((np.arange(N) + 1) / float(N) - cdfv))
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1, = data1.shape
n2, = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)):
raise ValueError('kuiper_two only accepts real inputs')
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError('kuiper_two only accepts non-nan inputs')
D = _stats.ks_2samp(np.asarray(data1, common_type),
np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for (a, b, wt) in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.)
breaks.add(1.)
breaks = sorted(breaks)
breaks_map = dict([(f, i) for (i, f) in enumerate(breaks)])
totals = np.zeros(len(breaks) - 1)
totals += tot
for (a, b, wt) in r:
totals[breaks_map[a]:breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError(
"Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n,
float(j + 1) / n), (start, end))
h[j] += ol / (1. / n) * totals[i]
start = end
return h
|
mhvk/astropy
|
astropy/stats/funcs.py
|
Python
|
bsd-3-clause
| 61,721
|
[
"Gaussian"
] |
f048daf3d1066e0664f7ed0e2394fa1863f385d1c4dd04151b1ca8efbded967a
|
# -*- coding: UTF-8 -*-
import random
from neat import config
class NodeGene(object):
def __init__(self, id, nodetype, bias=0, response=4.924273, activation_type=None):
""" A node gene encodes the basic artificial neuron model.
nodetype should be "INPUT", "HIDDEN", or "OUTPUT"
"""
self._id = id
self._type = nodetype
self._bias = bias
self._response = response
self._activation_type = activation_type
assert(self._type in ('INPUT', 'OUTPUT', 'HIDDEN'))
id = property(lambda self: self._id)
type = property(lambda self: self._type)
bias = property(lambda self: self._bias)
response = property(lambda self: self._response)
activation_type = property(lambda self: self._activation_type)
def __str__(self):
return "Node %2d %6s, bias %+2.10s, response %+2.10s" \
%(self._id, self._type, self._bias, self._response)
def get_child(self, other):
""" Creates a new NodeGene ramdonly inheriting its attributes from parents """
assert(self._id == other._id)
ng = NodeGene(self._id, self._type,
random.choice((self._bias, other._bias)),
random.choice((self._response, other._response)),
self._activation_type)
return ng
def __mutate_bias(self):
#self._bias += random.uniform(-1, 1) * config.Config.bias_mutation_power
self._bias += random.gauss(0,1)*config.Config.bias_mutation_power
if self._bias > config.Config.max_weight:
self._bias = config.Config.max_weight
elif self._bias < config.Config.min_weight:
self._bias = config.Config.min_weight
def __mutate_response(self):
""" Mutates the neuron's average firing response. """
#self._response += random.uniform(-0.2, 0.2) * config.Config.bias_mutation_power
self._response += random.gauss(0,1)*config.Config.bias_mutation_power
def copy(self):
return NodeGene(self._id, self._type, self._bias,
self._response, self._activation_type)
def mutate(self):
r = random.random
if r() < config.Config.prob_mutatebias:
self.__mutate_bias()
if r() < config.Config.prob_mutatebias:
self.__mutate_response()
class CTNodeGene(NodeGene):
""" Continuous-time node gene - used in CTRNNs.
The main difference here is the addition of
a decay rate given by the time constant.
"""
def __init__(self, id, nodetype, bias = 1.0, response = 1.0, activation_type = 'exp', time_constant = 1.0):
super(CTNodeGene, self).__init__(id, nodetype, bias, response, activation_type)
self._time_constant = time_constant
time_constant = property(lambda self: self._time_constant)
def mutate(self):
super(CTNodeGene, self).mutate()
# mutating the time constant could bring numerical instability
# do it with caution
#if random.random() < 0.1:
# self.__mutate_time_constant()
def __mutate_time_constant(self):
""" Warning: pertubing the time constant (tau) may result in numerical instability """
self._time_constant += random.gauss(1.0,0.5)*0.001
if self._time_constant > config.Config.max_weight:
self._time_constant = config.Config.max_weight
elif self._time_constant < config.Config.min_weight:
self._time_constant = config.Config.min_weight
return self
def get_child(self, other):
""" Creates a new NodeGene ramdonly inheriting its attributes from parents """
assert(self._id == other._id)
ng = CTNodeGene(self._id, self._type,
random.choice((self._bias, other._bias)),
random.choice((self._response, other._response)),
self._activation_type,
random.choice((self._time_constant, other._time_constant)))
return ng
def __str__(self):
return "Node %2d %6s, bias %+2.10s, response %+2.10s, activation %s, time constant %+2.5s" \
% (self._id, self._type, self._bias, self._response,
self._activation_type, self._time_constant)
def copy(self):
return CTNodeGene(self._id, self._type, self._bias,
self._response, self._activation_type, self._time_constant)
class ConnectionGene(object):
__global_innov_number = 0
__innovations = {} # A list of innovations.
# Should it be global? Reset at every generation? Who knows?
@classmethod
def reset_innovations(cls):
cls.__innovations = {}
def __init__(self, innodeid, outnodeid, weight, enabled, innov = None):
self.__in = innodeid
self.__out = outnodeid
self.__weight = weight
self.__enabled = enabled
if innov is None:
try:
self.__innov_number = self.__innovations[self.key]
except KeyError:
self.__innov_number = self.__get_new_innov_number()
self.__innovations[self.key] = self.__innov_number
else:
self.__innov_number = innov
weight = property(lambda self: self.__weight)
innodeid = property(lambda self: self.__in)
outnodeid = property(lambda self: self.__out)
enabled = property(lambda self: self.__enabled)
# Key for dictionaries, avoids two connections between the same nodes.
key = property(lambda self: (self.__in, self.__out))
def mutate(self):
r = random.random
if r() < config.Config.prob_mutate_weight:
self.__mutate_weight()
if r() < config.Config.prob_togglelink:
self.enable()
#TODO: Remove weight_replaced?
#if r() < 0.001:
# self.__weight_replaced()
def enable(self):
""" Enables a link. """
self.__enabled = True
def __mutate_weight(self):
#self.__weight += random.uniform(-1,1) * Config.weight_mutation_power
self.__weight += random.gauss(0,1)*config.Config.weight_mutation_power
if self.__weight > config.Config.max_weight:
self.__weight = config.Config.max_weight
elif self.__weight < config.Config.min_weight:
self.__weight = config.Config.min_weight
def __weight_replaced(self):
#self.__weight = random.uniform(-config.Config.random_range, Config.random_range)
self.__weight = random.gauss(0, config.Config.weight_stdev)
@classmethod
def __get_new_innov_number(cls):
cls.__global_innov_number += 1
return cls.__global_innov_number
def __str__(self):
s = "In %2d, Out %2d, Weight %+3.5f, " % (self.__in, self.__out, self.__weight)
if self.__enabled:
s += "Enabled, "
else:
s += "Disabled, "
return s + "Innov %d" % (self.__innov_number,)
def __lt__(self, other):
return(self.__innov_number < other.__innov_number)
def split(self, node_id):
""" Splits a connection, creating two new connections and disabling this one """
self.__enabled = False
new_conn1 = ConnectionGene(self.__in, node_id, 1.0, True)
new_conn2 = ConnectionGene(node_id, self.__out, self.__weight, True)
return new_conn1, new_conn2
def copy(self):
return ConnectionGene(self.__in, self.__out, self.__weight,
self.__enabled, self.__innov_number)
def is_same_innov(self, cg):
return self.__innov_number == cg.__innov_number
def get_child(self, cg):
# TODO: average both weights (Stanley, p. 38)
return random.choice((self, cg)).copy()
|
davidmfinol/py3NEAT
|
neat/genome.py
|
Python
|
gpl-3.0
| 7,772
|
[
"NEURON"
] |
70b3ec03a6e03a6613854c5c015b96251aefb6d1dcc5071387f18a1aebfd6125
|
'''
--------------------------------------------------------
Authors:
- Brin Rosenthal (sbrosenthal@ucsd.edu)
- Julia Len (jlen@ucsd.edu)
- Mikayla Webster (13webstermj@gmail.com)
--------------------------------------------------------
'''
from __future__ import print_function
import json
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
#import visJS_module # use this for local testing
import visJS2jupyter.visJS_module as visJS_module
import scipy_heatKernel
def draw_graph_overlap(G1, G2,
edge_cmap=plt.cm.coolwarm,
export_file='graph_overlap.json',
export_network=False,
highlight_nodes=None,
k=None,
node_cmap=plt.cm.autumn,
node_name_1='graph 1',
node_name_2='graph 2',
node_size=10,
physics_enabled=False,
**kwargs):
'''
Takes two networkX graphs and displays their overlap, where intersecting
nodes are triangles. Additional kwargs are passed to visjs_module.
Inputs:
- G1: a networkX graph
- G2: a networkX graph
- edge_cmap: matplotlib colormap for edges, default: matplotlib.cm.coolwarm
- export_file: JSON file to export graph data, default: 'graph_overlap.json'
- export_network: export network to Cytoscape, default: False
- highlight_nodes: list of nodes to place borders around, default: None
- k: float, optimal distance between nodes for nx.spring_layout(), default: None
- node_cmap: matplotlib colormap for nodes, default: matplotlib.cm.autumn
- node_name_1: string to name first graph's nodes, default: 'graph 1'
- node_name_2: string to name second graph's nodes, default: 'graph 2'
- node_size: size of nodes, default: 10
- physics_enabled: enable physics simulation, default: False
Returns:
- VisJS html network plot (iframe) of the graph overlap.
'''
G_overlap = create_graph_overlap(G1, G2, node_name_1, node_name_2)
# create nodes dict and edges dict for input to visjs
nodes = list(G_overlap.nodes())
edges = list(G_overlap.edges())
# set the position of each node
if k is None:
pos = nx.spring_layout(G_overlap)
else:
pos = nx.spring_layout(G_overlap,k=k)
xpos,ypos=zip(*pos.values())
nx.set_node_attributes(G_overlap, name = 'xpos', values = dict(zip(pos.keys(),[x*1000 for x in xpos])))
nx.set_node_attributes(G_overlap, name = 'ypos', values = dict(zip(pos.keys(),[y*1000 for y in ypos])))
# set the border width of nodes
if 'node_border_width' not in kwargs.keys():
kwargs['node_border_width'] = 2
border_width = {}
for n in nodes:
if highlight_nodes is not None and n in highlight_nodes:
border_width[n] = kwargs['node_border_width']
else:
border_width[n] = 0
nx.set_node_attributes(G_overlap, name = 'nodeOutline', values = border_width)
# set the shape of each node
nodes_shape=[]
for node in G_overlap.nodes(data=True):
if node[1]['node_overlap']==0:
nodes_shape.append('dot')
elif node[1]['node_overlap']==2:
nodes_shape.append('square')
elif node[1]['node_overlap']==1:
nodes_shape.append('triangle')
node_to_shape=dict(zip(G_overlap.nodes(),nodes_shape))
nx.set_node_attributes(G_overlap, name = 'nodeShape', values = node_to_shape)
# set the node label of each node
if highlight_nodes:
node_labels = {}
for node in nodes:
if node in highlight_nodes:
node_labels[node] = str(node)
else:
node_labels[node] = ''
else:
node_labels = {n:str(n) for n in nodes}
nx.set_node_attributes(G_overlap, name = 'nodeLabel', values = node_labels)
# set the node title of each node
node_titles = [ node[1]['node_name_membership'] + '<br/>' + str(node[0])
for node in G_overlap.nodes(data=True) ]
node_titles = dict(zip(G_overlap.nodes(),node_titles))
nx.set_node_attributes(G_overlap, name = 'nodeTitle', values = node_titles)
# set color of each node
node_to_color = visJS_module.return_node_to_color(G_overlap,
field_to_map='node_overlap',
cmap=node_cmap,
color_max_frac=.9,
color_min_frac=.1)
# set color of each edge
edge_to_color = visJS_module.return_edge_to_color(G_overlap,
field_to_map='edge_weight',
cmap=edge_cmap,
alpha=.3)
# create the nodes_dict with all relevant fields
nodes_dict = [{'id':str(n),
'border_width':border_width[n],
'color':node_to_color[n],
'degree':G_overlap.degree(n),
'node_label':node_labels[n],
'node_shape':node_to_shape[n],
'node_size':node_size,
'title':node_titles[n],
'x':np.float64(pos[n][0]).item()*1000,
'y':np.float64(pos[n][1]).item()*1000}
for n in nodes]
# map nodes to indices for source/target in edges
node_map = dict(zip(nodes,range(len(nodes))))
# create the edges_dict with all relevant fields
edges_dict = [{'source':node_map[edges[i][0]],
'target':node_map[edges[i][1]],
'color':edge_to_color[edges[i]]}
for i in range(len(edges))]
# set node_size_multiplier to increase node size as graph gets smaller
if 'node_size_multiplier' not in kwargs.keys():
if len(nodes) > 500:
kwargs['node_size_multiplier'] = 3
elif len(nodes) > 200:
kwargs['node_size_multiplier'] = 5
else:
kwargs['node_size_multiplier'] = 7
kwargs['physics_enabled'] = physics_enabled
# if node hovering color not set, set default to black
if 'node_color_hover_background' not in kwargs.keys():
kwargs['node_color_hover_background'] = 'black'
# node size determined by size in nodes_dict, not by id
if 'node_size_field' not in kwargs.keys():
kwargs['node_size_field'] = 'node_size'
# node label determined by value in nodes_dict
if 'node_label_field' not in kwargs.keys():
kwargs['node_label_field'] = 'node_label'
# export the network to JSON for Cytoscape
if export_network:
node_colors = map_node_to_color(G_overlap,'node_overlap',False)
nx.set_node_attributes(G_overlap, name = 'nodeColor', values = node_colors)
edge_colors = map_edge_to_color(G_overlap,'edge_weight',False)
nx.set_edge_attributes(G_overlap, name = 'edgeColor', values = edge_colors)
visJS_module.export_to_cytoscape(G = G_overlap, export_file = export_file)
return visJS_module.visjs_network(nodes_dict,edges_dict,**kwargs)
def create_graph_overlap(G1,G2,node_name_1,node_name_2):
'''
Create and return the overlap of two graphs.
Inputs:
- G1: a networkX graph
- G2: a networkX graph
- node_name_1: string to name first graph's nodes
- node_name_2: string to name second graph's nodes
Returns:
- A networkX graph that is the overlap of G1 and G2.
'''
overlap_graph = nx.Graph()
node_union = list(np.union1d(list(G1.nodes()),list(G2.nodes())))
node_intersect = list(np.intersect1d(list(G1.nodes()),list(G2.nodes())))
nodes_1only = np.setdiff1d(list(G1.nodes()),node_intersect)
nodes_2only = np.setdiff1d(list(G2.nodes()),node_intersect)
edges_total = list(G1.edges())
edges_total.extend(list(G2.edges()))
overlap_graph.add_nodes_from(node_union)
# set node attributes to distinguish which graph the node belongs to
node_overlap=[]
node_name_membership=[]
for node in node_union:
if node in nodes_1only:
node_overlap.append(0)
node_name_membership.append(node_name_1)
elif node in nodes_2only:
node_overlap.append(2)
node_name_membership.append(node_name_2)
else:
node_overlap.append(1)
node_name_membership.append(node_name_1+' + '+node_name_2)
nx.set_node_attributes(overlap_graph,
name = 'node_overlap',
values = dict(zip(node_union,node_overlap)))
nx.set_node_attributes(overlap_graph,
name = 'node_name_membership',
values = dict(zip(node_union,node_name_membership)))
nodes_total = list(overlap_graph.nodes())
intersecting_edge_val = int(math.floor(math.log10(len(nodes_total)))) * 10
# set the edge weights
edge_weights = {}
for e in edges_total:
eflip = (e[1],e[0])
if (e in edge_weights.keys()):
edge_weights[e]+=intersecting_edge_val
elif (eflip in edge_weights.keys()):
edge_weights[eflip]+=intersecting_edge_val
else:
edge_weights[e]=1
v1,v2 = zip(*edge_weights.keys())
weights = edge_weights.values()
edges = zip(v1,v2,weights)
overlap_graph.add_weighted_edges_from(edges)
nx.set_edge_attributes(overlap_graph, name = 'edge_weight', values = edge_weights)
return overlap_graph
def draw_heat_prop(G, seed_nodes, random_walk = True,
edge_cmap=plt.cm.autumn_r,
export_file='heat_prop.json',
export_network=False,
highlight_nodes=None,
k=None,
largest_connected_component=False,
node_cmap=plt.cm.autumn_r,
node_size=10,
num_nodes=None,
physics_enabled=False,
Wprime=None,
**kwargs):
'''
Implements and displays the network propagation for a given graph and seed
nodes. Additional kwargs are passed to visJS_module.
Inputs:
- G: a networkX graph
- seed_nodes: nodes on which to initialize the simulation (must be a dict if random_walk = False)
- random_walk: True to perform a random walk style heat propagation, False to perform a diffusion style one.
- edge_cmap: matplotlib colormap for edges, default: matplotlib.cm.autumn_r
- export_file: JSON file to export graph data, default: 'graph_overlap.json'
- export_network: export network to Cytoscape, default: False
- highlight_nodes: list of nodes to place borders around, default: None
- k: float, optimal distance between nodes for nx.spring_layout(), default: None
- largest_connected_component: boolean, whether or not to display largest_connected_component,
default: False
- node_cmap: matplotlib colormap for nodes, default: matplotlib.cm.autumn_r
- node_size: size of nodes, default: 10
- num_nodes: the number of the hottest nodes to graph, default: None (all nodes will be graphed)
- physics_enabled: enable physics simulation, default: False
- Wprime: normalized adjacency matrix (from function normalized_adj_matrix())
Returns:
- VisJS html network plot (iframe) of the heat propagation.
'''
# check for invalid nodes in seed_nodes
invalid_nodes = [node for node in seed_nodes if node not in G.nodes()]
for node in invalid_nodes:
print ('Node {} not in graph'.format(node))
if invalid_nodes:
return
# perform the network propagation
if random_walk == True: # perform random walk style heat propagation
if Wprime is None:
Wprime = normalized_adj_matrix(G)
prop_graph = network_propagation(G, Wprime, seed_nodes).to_dict()
nx.set_node_attributes(G, name = 'node_heat', values = prop_graph)
else: # perform diffusion style heat propagation
if (type(seed_nodes) == list): # if the user supplies a list, convert to dict
one_list = [1]*len(seed_nodes) # all seed nodes get start value of 1
seed_nodes = dict(zip(seed_nodes, one_list))
elif (type(seed_nodes) != dict):
print('seed_nodes must be a list or a dict')
return -1
heat_kernel = scipy_heatKernel.SciPYKernel(G) # need a graph
diffused_heats = heat_kernel.diffuse(seed_nodes) # need seed_to_heat mapping
nx.set_node_attributes(G, name = 'node_heat', values = dict(diffused_heats))
# find top num_nodes hottest nodes and connected component if requested
G = set_num_nodes(G,num_nodes)
if largest_connected_component:
G = max(nx.connected_component_subgraphs(G), key=len)
nodes = list(G.nodes())
edges = list(G.edges())
# check for empty nodes and edges after getting subgraph of G
if not nodes:
print ('There are no nodes in the graph. Try increasing num_nodes.')
return
if not edges:
print ('There are no edges in the graph. Try increasing num_nodes.')
return
# set the position of each node
if k is None:
pos = nx.spring_layout(G)
else:
pos = nx.spring_layout(G,k=k)
xpos,ypos=zip(*pos.values())
nx.set_node_attributes(G, name = 'xpos', values = dict(zip(pos.keys(),[x*1000 for x in xpos])))
nx.set_node_attributes(G, name = 'ypos', values = dict(zip(pos.keys(),[y*1000 for y in ypos])))
# set the border width of nodes
if 'node_border_width' not in kwargs.keys():
kwargs['node_border_width'] = 2
border_width = {}
for n in nodes:
if n in seed_nodes:
border_width[n] = kwargs['node_border_width']
elif highlight_nodes is not None and n in highlight_nodes:
border_width[n] = kwargs['node_border_width']
else:
border_width[n] = 0
nx.set_node_attributes(G, name = 'nodeOutline', values = border_width)
# set the shape of each node
nodes_shape=[]
for node in G.nodes():
if node in seed_nodes:
nodes_shape.append('triangle')
else:
nodes_shape.append('dot')
node_to_shape=dict(zip(G.nodes(),nodes_shape))
nx.set_node_attributes(G, name = 'nodeShape', values = node_to_shape)
# add a field for node labels
if highlight_nodes:
node_labels = {}
for node in nodes:
if node in seed_nodes:
node_labels[node] = str(node)
elif node in highlight_nodes:
node_labels[node] = str(node)
else:
node_labels[node] = ''
else:
node_labels = {n:str(n) for n in nodes}
nx.set_node_attributes(G, name = 'nodeLabel', values = node_labels)
# set title for each node
node_titles = [str(node[0]) + '<br/>heat = ' + str(round(node[1]['node_heat'],5))
for node in G.nodes(data=True)]
node_titles = dict(zip(G.nodes(),node_titles))
nx.set_node_attributes(G, name = 'nodeTitle', values = node_titles)
# set color of each node
node_to_color = visJS_module.return_node_to_color(G,
field_to_map='node_heat',
cmap=node_cmap,
color_vals_transform='log')
# set heat value of edge based off hottest connecting node's value
node_attr = nx.get_node_attributes(G,'node_heat')
edge_weights = {}
for e in edges:
if node_attr[e[0]] > node_attr[e[1]]:
edge_weights[e] = node_attr[e[0]]
else:
edge_weights[e] = node_attr[e[1]]
nx.set_edge_attributes(G, name = 'edge_weight', values = edge_weights)
# set color of each edge
edge_to_color = visJS_module.return_edge_to_color(G,
field_to_map='edge_weight',
cmap=edge_cmap,
color_vals_transform='log')
# create the nodes_dict with all relevant fields
nodes_dict = [{'id':str(n),
'border_width':border_width[n],
'degree':G.degree(n),
'color':node_to_color[n],
'node_label':node_labels[n],
'node_size':node_size,
'node_shape':node_to_shape[n],
'title':node_titles[n],
'x':np.float64(pos[n][0]).item()*1000,
'y':np.float64(pos[n][1]).item()*1000} for n in nodes]
# map nodes to indices for source/target in edges
node_map = dict(zip(nodes,range(len(nodes))))
# create the edges_dict with all relevant fields
edges_dict = [{'source':node_map[edges[i][0]],
'target':node_map[edges[i][1]],
'color':edge_to_color[edges[i]]} for i in range(len(edges))]
# set node_size_multiplier to increase node size as graph gets smaller
if 'node_size_multiplier' not in kwargs.keys():
if len(nodes) > 500:
kwargs['node_size_multiplier'] = 3
elif len(nodes) > 200:
kwargs['node_size_multiplier'] = 5
else:
kwargs['node_size_multiplier'] = 7
kwargs['physics_enabled'] = physics_enabled
# if node hovering color not set, set default to black
if 'node_color_hover_background' not in kwargs.keys():
kwargs['node_color_hover_background'] = 'black'
# node size determined by size in nodes_dict, not by id
if 'node_size_field' not in kwargs.keys():
kwargs['node_size_field'] = 'node_size'
# node label determined by value in nodes_dict
if 'node_label_field' not in kwargs.keys():
kwargs['node_label_field'] = 'node_label'
# export the network to JSON for Cytoscape
if export_network:
node_colors = map_node_to_color(G,'node_heat',True)
nx.set_node_attributes(G, name = 'nodeColor', values = node_colors)
edge_colors = map_edge_to_color(G,'edge_weight',True)
nx.set_edge_attributes(G, name = 'edgeColor', values = edge_colors)
visJS_module.export_to_cytoscape(G = G,export_file = export_file)
return visJS_module.visjs_network(nodes_dict,edges_dict,**kwargs)
def draw_colocalization(G, seed_nodes_1, seed_nodes_2,
edge_cmap=plt.cm.autumn_r,
export_file='colocalization.json',
export_network=False,
highlight_nodes=None,
k=None,
largest_connected_component=False,
node_cmap=plt.cm.autumn_r,
node_size=10,
num_nodes=None,
physics_enabled=False,
Wprime=None,
**kwargs):
'''
Implements and displays the network propagation for a given graph and two
sets of seed nodes. Additional kwargs are passed to visJS_module.
Inputs:
- G: a networkX graph
- seed_nodes_1: first set of nodes on which to initialize the simulation
- seed_nodes_2: second set of nodes on which to initialize the simulation
- edge_cmap: matplotlib colormap for edges, optional, default: matplotlib.cm.autumn_r
- export_file: JSON file to export graph data, default: 'colocalization.json'
- export_network: export network to Cytoscape, default: False
- highlight_nodes: list of nodes to place borders around, default: None
- k: float, optional, optimal distance between nodes for nx.spring_layout(), default: None
- largest_connected_component: boolean, optional, whether or not to display largest_connected_component,
default: False
- node_cmap: matplotlib colormap for nodes, optional, default: matplotlib.cm.autumn_r
- node_size: size of nodes, default: 10
- num_nodes: the number of the hottest nodes to graph, default: None (all nodes will be graphed)
- physics_enabled: enable physics simulation, default: False
- Wprime: Normalized adjacency matrix (from normalized_adj_matrix)
Returns:
- VisJS html network plot (iframe) of the colocalization.
'''
# check for invalid nodes in seed_nodes
invalid_nodes = [(node,'seed_nodes_1') for node in seed_nodes_1 if node not in G.nodes()]
invalid_nodes.extend([(node,'seed_nodes_2') for node in seed_nodes_2 if node not in G.nodes()])
for node in invalid_nodes:
print ('Node {} in {} not in graph'.format(node[0], node[1]))
if invalid_nodes:
return
# perform the colocalization
if Wprime is None:
Wprime = normalized_adj_matrix(G)
prop_graph_1 = network_propagation(G, Wprime, seed_nodes_1).to_dict()
prop_graph_2 = network_propagation(G, Wprime, seed_nodes_2).to_dict()
prop_graph = {node:(prop_graph_1[node]*prop_graph_2[node]) for node in prop_graph_1}
nx.set_node_attributes(G, name = 'node_heat', values = prop_graph)
# find top num_nodes hottest nodes and connected component if requested
G = set_num_nodes(G,num_nodes)
if largest_connected_component:
G = max(nx.connected_component_subgraphs(G), key=len)
nodes = list(G.nodes())
edges = list(G.edges())
# check for empty nodes and edges after getting subgraph of G
if not nodes:
print ('There are no nodes in the graph. Try increasing num_nodes.')
return
if not edges:
print ('There are no edges in the graph. Try increasing num_nodes.')
return
# set position of each node
if k is None:
pos = nx.spring_layout(G)
else:
pos = nx.spring_layout(G,k=k)
xpos,ypos=zip(*pos.values())
nx.set_node_attributes(G, name = 'xpos', values = dict(zip(pos.keys(),[x*1000 for x in xpos])))
nx.set_node_attributes(G, name = 'ypos', values = dict(zip(pos.keys(),[y*1000 for y in ypos])))
# set the border width of nodes
if 'node_border_width' not in kwargs.keys():
kwargs['node_border_width'] = 2
border_width = {}
for n in nodes:
if n in seed_nodes_1 or n in seed_nodes_2:
border_width[n] = kwargs['node_border_width']
elif highlight_nodes is not None and n in highlight_nodes:
border_width[n] = kwargs['node_border_width']
else:
border_width[n] = 0
nx.set_node_attributes(G, name = 'nodeOutline', values = border_width)
# set the shape of each node
nodes_shape=[]
for node in G.nodes():
if node in seed_nodes_1:
nodes_shape.append('triangle')
elif node in seed_nodes_2:
nodes_shape.append('square')
else:
nodes_shape.append('dot')
node_to_shape=dict(zip(G.nodes(),nodes_shape))
nx.set_node_attributes(G, name = 'nodeShape', values = node_to_shape)
# add a field for node labels
if highlight_nodes:
node_labels = {}
for node in nodes:
if node in seed_nodes_1 or n in seed_nodes_2:
node_labels[node] = str(node)
elif node in highlight_nodes:
node_labels[node] = str(node)
else:
node_labels[node] = ''
else:
node_labels = {n:str(n) for n in nodes}
nx.set_node_attributes(G, name = 'nodeLabel', values = node_labels)
# set the title of each node
node_titles = [str(node[0]) + '<br/>heat = ' + str(round(node[1]['node_heat'],10))
for node in G.nodes(data=True)]
node_titles = dict(zip(nodes,node_titles))
nx.set_node_attributes(G, name = 'nodeTitle', values = node_titles)
# set the color of each node
node_to_color = visJS_module.return_node_to_color(G,
field_to_map='node_heat',
cmap=node_cmap,
color_vals_transform='log')
# set heat value of edge based off hottest connecting node's value
node_attr = nx.get_node_attributes(G,'node_heat')
edge_weights = {}
for e in edges:
if node_attr[e[0]] > node_attr[e[1]]:
edge_weights[e] = node_attr[e[0]]
else:
edge_weights[e] = node_attr[e[1]]
nx.set_edge_attributes(G, name = 'edge_weight', values = edge_weights)
# set the color of each edge
edge_to_color = visJS_module.return_edge_to_color(G,
field_to_map = 'edge_weight',
cmap=edge_cmap,
color_vals_transform = 'log')
# create the nodes_dict with all relevant fields
nodes_dict = [{'id':str(n),
'border_width':border_width[n],
'degree':G.degree(n),
'color':node_to_color[n],
'node_label':node_labels[n],
'node_size':node_size,
'node_shape':node_to_shape[n],
'title':node_titles[n],
'x':np.float64(pos[n][0]).item()*1000,
'y':np.float64(pos[n][1]).item()*1000} for n in nodes]
# map nodes to indices for source/target in edges
node_map = dict(zip(nodes, range(len(nodes))))
# create the edges_dict with all relevant fields
edges_dict = [{'source':node_map[edges[i][0]],
'target':node_map[edges[i][1]],
'color':edge_to_color[edges[i]]} for i in range(len(edges))]
# set node_size_multiplier to increase node size as graph gets smaller
if 'node_size_multiplier' not in kwargs.keys():
if len(nodes) > 500:
kwargs['node_size_multiplier'] = 1
elif len(nodes) > 200:
kwargs['node_size_multiplier'] = 3
else:
kwargs['node_size_multiplier'] = 5
kwargs['physics_enabled'] = physics_enabled
# if node hovering color not set, set default to black
if 'node_color_hover_background' not in kwargs.keys():
kwargs['node_color_hover_background'] = 'black'
# node size determined by size in nodes_dict, not by id
if 'node_size_field' not in kwargs.keys():
kwargs['node_size_field'] = 'node_size'
# node label determined by value in nodes_dict
if 'node_label_field' not in kwargs.keys():
kwargs['node_label_field'] = 'node_label'
# export the network to JSON for Cytoscape
if export_network:
node_colors = map_node_to_color(G,'node_heat',True)
nx.set_node_attributes(G, name = 'nodeColor', values = node_colors)
edge_colors = map_edge_to_color(G,'edge_weight',True)
nx.set_edge_attributes(G, name = 'edgeColor', values = edge_colors)
visJS_module.export_to_cytoscape(G = G,export_file = export_file)
return visJS_module.visjs_network(nodes_dict,edges_dict,**kwargs)
def normalized_adj_matrix(G,conserve_heat=True,weighted=False):
'''
This function returns normalized adjacency matrix.
Inputs:
- G: NetworkX graph from which to calculate normalized adjacency matrix
- conserve_heat:
- True: Heat will be conserved (sum of heat vector = 1). Graph asymmetric
- False: Heat will not be conserved. Graph symmetric.
Returns:
- numpy array of the normalized adjacency matrix.
'''
wvec=[]
for e in G.edges(data=True):
v1 = e[0]
v2 = e[1]
deg1 = G.degree(v1)
deg2 = G.degree(v2)
if weighted:
weight = e[2]['weight']
else:
weight=1
if conserve_heat:
wvec.append((v1,v2,weight/float(deg2))) #np.sqrt(deg1*deg2)))
wvec.append((v2,v1,weight/float(deg1)))
else:
wvec.append((v1,v2,weight/np.sqrt(deg1*deg2)))
if conserve_heat:
# if conserving heat, make G_weighted a di-graph (not symmetric)
G_weighted= nx.DiGraph()
else:
# if not conserving heat, make G_weighted a simple graph (symmetric)
G_weighted = nx.Graph()
G_weighted.add_weighted_edges_from(wvec)
Wprime = nx.to_numpy_matrix(G_weighted,nodelist=list(G.nodes()))
Wprime = np.array(Wprime)
return Wprime
def network_propagation(G,Wprime,seed_nodes,alpha=.5, num_its=20):
'''
This function implements network propagation, as detailed in:
Vanunu, Oron, et al. 'Associating genes and protein complexes with disease
via network propagation.'
Inputs:
- G: NetworkX graph on which to run simulation
- Wprime: Normalized adjacency matrix (from normalized_adj_matrix)
- seed_nodes: Genes on which to initialize the simulation.
- alpha: Heat dissipation coefficient. Default = 0.5
- num_its: Number of iterations (Default = 20. Convergence usually happens within 10)
Returns:
- Fnew: heat vector after propagation
'''
nodes = list(G.nodes())
numnodes = len(nodes)
edges= list(G.edges())
numedges = len(edges)
Fold = np.zeros(numnodes)
Fold = pd.Series(Fold,index=list(G.nodes()))
Y = np.zeros(numnodes)
Y = pd.Series(Y,index=list(G.nodes()))
for g in seed_nodes:
# normalize total amount of heat added, allow for replacement
Y[g] = Y[g]+1/float(len(seed_nodes))
Fold = Y.copy(deep=True)
for t in range(num_its):
Fnew = alpha*np.dot(Wprime,Fold) + np.multiply(1-alpha,Y)
Fold=Fnew
return Fnew
def set_num_nodes(G, num_nodes):
'''
Sets whether the graph should be physics-enabled or not. It is set for
graphs of fewer than 100 nodes.
Inputs:
- G: a networkX graph
- num_nodes: the number of the hottest nodes to graph
Returns:
- networkX graph that is the subgraph of G with the num_nodes hottest
nodes
'''
if num_nodes != None and num_nodes < len(G.nodes()):
node_heat = [(node[0], node[1]['node_heat']) for node in G.nodes(data=True)]
nodes_sorted = sorted(node_heat, key=lambda x: x[1], reverse=True)
top_hottest_nodes = [nodes_sorted[i][0] for i in range(num_nodes)]
return G.subgraph(top_hottest_nodes)
return G
def map_node_to_color(G,field_to_map,color_vals_transform):
'''
Maps node to color value between 0 and 1 based on the given field.
Inputs:
- G: networkX graph
- field_to_map: node attribute to map color to
- color_vals_transform: to calculate color vals with log (boolean)
Returns:
- Dictionary that maps node to color value.
'''
node_to_field = [(n[0], max(n[1][field_to_map], 10**-18))
for n in G.nodes(data=True)]
nodes,data = zip(*node_to_field)
if color_vals_transform:
nonzero_list = [d for d in data if d>(10**-18)]
if not nonzero_list:
data = [1 for d in data]
else:
min_val = min(nonzero_list)
data = [np.log(max(d,min_val)) for d in data] #set 0 vals to min val
data = [(d-np.min(data)) for d in data] #shift so we don't have neg vals
min_val = np.min(data)
max_val = np.max(data) - min_val
color_list = [float(d-min_val)/max_val for d in data]
return dict(zip(G.nodes(),color_list))
def map_edge_to_color(G,field_to_map,color_vals_transform):
'''
Maps edge to color value between 0 and 1 based on the given field.
Inputs:
- G: networkX graph
- field_to_map: edge attribute to map color to
- color_vals_transform: to calculate color vals with log (boolean)
Returns:
- Dictionary that maps edge to color value.
'''
edges_data = [(e[0],e[1],e[2][field_to_map])
for e in G.edges(data=True)]
edges1,edges2,data = zip(*edges_data)
if color_vals_transform:
nonzero_list = [d for d in data if d>(10**-18)]
if not nonzero_list:
data = [1 for d in data]
else:
min_dn0 = min([d for d in data if d>(10**-18)])
data = [np.log(max(d,min_dn0)) for d in data] #set 0 vals to min val
data = [(d-np.min(data)) for d in data] #shift so we don't have neg vals
edges_data = zip(zip(edges1,edges2),data)
edge_to_field = dict(edges_data)
min_val = np.min(list(edge_to_field.values()))
max_val = np.max(list(edge_to_field.values())) - min_val
color_list = [float(edge_to_field[e]-min_val)/max_val for e in G.edges()]
return dict(zip(G.edges(),color_list))
|
ucsd-ccbb/visJS2jupyter
|
visJS2jupyter/visualizations.py
|
Python
|
mit
| 33,209
|
[
"Cytoscape"
] |
acbcada08afdfad47b9b5d081e7562a46bfe8302022ce524a989b1fa44b4e38a
|
#!/usr/bin/python
"""
# Created on Aug 12, 2016
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com) GitHub ID: grastogi23
#
# module_check: not supported
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_api
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Avi API Module
description:
- This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/)
- This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them.
version_added: 2.3
requirements: [ avisdk ]
options:
http_method:
description:
- Allowed HTTP methods for RESTful services and are supported by Avi Controller.
choices: ["get", "put", "post", "patch", "delete"]
required: true
data:
description:
- HTTP body in YAML or JSON format.
params:
description:
- Query parameters passed to the HTTP API.
path:
description:
- 'Path for Avi API resource. For example, C(path: virtualservice) will translate to C(api/virtualserivce).'
timeout:
description:
- Timeout (in seconds) for Avi API calls.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Get Pool Information using avi_api_session
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: pool
params:
name: "{{ pool_name }}"
register: pool_results
- name: Patch Pool with list of servers
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: patch
path: "{{ pool_path }}"
data:
add:
servers:
- ip:
addr: 10.10.10.10
type: V4
- ip:
addr: 20.20.20.20
type: V4
register: updated_pool
- name: Fetch Pool metrics bandwidth and connections rate
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: analytics/metrics/pool
params:
name: "{{ pool_name }}"
metric_id: l4_server.avg_bandwidth,l4_server.avg_complete_conns
step: 300
limit: 10
register: pool_metrics
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.avi import avi_common_argument_spec, ansible_return
from copy import deepcopy
HAS_AVI = True
try:
from avi.sdk.avi_api import ApiSession
from avi.sdk.utils.ansible_utils import avi_obj_cmp, cleanup_absent_fields
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
http_method=dict(required=True,
choices=['get', 'put', 'post', 'patch',
'delete']),
path=dict(type='str', required=True),
params=dict(type='dict'),
data=dict(type='jsonarg'),
timeout=dict(type='int', default=60)
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(argument_spec=argument_specs)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
tenant_uuid = module.params.get('tenant_uuid', None)
api = ApiSession.get_session(
module.params['controller'], module.params['username'],
module.params['password'], tenant=module.params['tenant'],
tenant_uuid=tenant_uuid)
tenant = module.params.get('tenant', '')
timeout = int(module.params.get('timeout'))
# path is a required argument
path = module.params.get('path', '')
params = module.params.get('params', None)
data = module.params.get('data', None)
if data is not None:
data = json.loads(data)
method = module.params['http_method']
existing_obj = None
changed = method != 'get'
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
if method == 'post':
# need to check if object already exists. In that case
# change the method to be put
gparams['name'] = data['name']
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
try:
existing_obj = rsp.json()['results'][0]
except IndexError:
# object is not found
pass
else:
# object is present
method = 'put'
path += '/' + existing_obj['uuid']
if method == 'put':
# put can happen with when full path is specified or it is put + post
if existing_obj is None:
using_collection = False
if (len(path.split('/')) == 1) and ('name' in data):
gparams['name'] = data['name']
using_collection = True
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
rsp_data = rsp.json()
if using_collection:
if rsp_data['results']:
existing_obj = rsp_data['results'][0]
path += '/' + existing_obj['uuid']
else:
method = 'post'
else:
if rsp.status_code == 404:
method = 'post'
else:
existing_obj = rsp_data
if existing_obj:
changed = not avi_obj_cmp(data, existing_obj)
cleanup_absent_fields(data)
if method == 'patch':
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
existing_obj = rsp.json()
if (method == 'put' and changed) or (method != 'put'):
fn = getattr(api, method)
rsp = fn(path, tenant=tenant, tenant_uuid=tenant, timeout=timeout,
params=params, data=data)
else:
rsp = None
if method == 'delete' and rsp.status_code == 404:
changed = False
rsp.status_code = 200
if method == 'patch' and existing_obj and rsp.status_code < 299:
# Ideally the comparison should happen with the return values
# from the patch API call. However, currently Avi API are
# returning different hostname when GET is used vs Patch.
# tracked as AV-12561
if path.startswith('pool'):
time.sleep(1)
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
new_obj = rsp.json()
changed = not avi_obj_cmp(new_obj, existing_obj)
if rsp is None:
return module.exit_json(changed=changed, obj=existing_obj)
return ansible_return(module, rsp, changed, req=data)
if __name__ == '__main__':
main()
|
civisanalytics/ansible
|
lib/ansible/modules/network/avi/avi_api_session.py
|
Python
|
gpl-3.0
| 8,020
|
[
"VisIt"
] |
5a07eaaa1aa37e56ace988fb6be6e05f02719a7b55234b929869602c5a21eb14
|
"""Get the error of a model. This tool supports multiple error measures."""
# Core Library modules
import csv
import itertools
import logging
import os
import subprocess
import sys
import tempfile
import time
from collections import Callable, OrderedDict
# Third party modules
import pkg_resources
import yaml
# First party modules
import hwrt.utils as utils
logger = logging.getLogger(__name__)
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if default_factory is not None and not isinstance(default_factory, Callable):
raise TypeError("first argument must be callable")
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = ()
else:
args = (self.default_factory,)
return type(self), args, None, None, list(self.items())
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
# Core Library modules
import copy
return type(self)(self.default_factory, copy.deepcopy(list(self.items())))
def __repr__(self):
return (
f"OrderedDefaultDict({self.default_factory}, "
f"{OrderedDict.__repr__(self)})"
)
def get_test_results(model_folder, basename, test_file):
model_src = utils.get_latest_model(model_folder, basename)
if model_src is None:
logger.error(f"No model with basename '{basename}' found in '{model_folder}'.")
else:
_, model_use = tempfile.mkstemp(suffix=".json", text=True)
utils.create_adjusted_model_for_percentages(model_src, model_use)
# Start evaluation
project_root = utils.get_project_root()
time_prefix = time.strftime("%Y-%m-%d-%H-%M")
logger.info(f"Evaluate '{model_src}' with '{test_file}'...")
logfile = os.path.join(
project_root, "logs/%s-error-evaluation.log" % time_prefix
)
logger.info(f"Write log to {logfile}...")
with open(logfile, "w") as log, open(model_use) as model_src_p:
p = subprocess.Popen(
[
utils.get_nntoolkit(),
"run",
"--batch-size",
"1",
"-f%0.4f",
test_file,
],
stdin=model_src_p,
stdout=log,
)
ret = p.wait()
if ret != 0:
logger.error(f"nntoolkit finished with ret code {ret}")
sys.exit(-1)
os.remove(model_use)
return logfile
def make_all(tuplelist):
t = []
for confusiongroup in tuplelist:
for x, y in itertools.permutations(confusiongroup, 2):
t.append((x, y))
return t
def create_report(true_data, eval_data, index2latex, n, merge=True):
r"""
Parameters
----------
true_data : list
Labels
eval_data : list
Predicted labels
index2latex : dict
Maps the output neurons index to LaTeX
n : TODO?
merge : bool
If set to True, some symbols like \sum and \Sigma will not be count as
errors when confused.
"""
# Gather data
correct = []
wrong = []
# Get MER classes
merge_cfg_path = pkg_resources.resource_filename(__name__, "misc/")
merge_cfg_file = os.path.join(merge_cfg_path, "merge.yml")
with open(merge_cfg_file) as fp:
merge_data = yaml.safe_load(fp)
# Make classes
confusing = make_all(merge_data)
if not merge:
confusing = []
# Get false/true negative/positive for each symbol
statistical = {}
possible_keys = []
assert len(true_data) > 0, "true_data was empty"
assert len(true_data) == len(eval_data), "len(true_data)=%i, len(eval_data)=%i" % (
len(true_data),
len(eval_data),
)
for known, evaluated in zip(true_data, eval_data):
evaluated_t1 = list(evaluated.keys())[0]
if known["index"] not in statistical:
statistical[known["index"]] = {
"FP": 0,
"TP": 0,
"FN": 0,
"TN": 0,
"latex": index2latex[known["index"]],
}
possible_keys.append(known["index"])
for key in list(evaluated.keys()):
if key not in statistical:
if key not in index2latex:
logger.error(
f"Key '{key}' is not in index2latex. Did you "
"probaly define a too small number of "
"outputnodes?"
)
logger.error(f"index2latex.keys(): {index2latex.keys()}")
sys.exit(-1)
statistical[key] = {
"FP": 0,
"TP": 0,
"FN": 0,
"TN": 0,
"latex": index2latex[key],
}
possible_keys.append(key)
if known["index"] in list(evaluated.keys())[:n]:
statistical[known["index"]]["TP"] += 1
correct.append(known)
for key in possible_keys:
if key != known["index"]:
statistical[key]["TN"] += 1
elif (index2latex[known["index"]], index2latex[evaluated_t1]) in confusing:
# Some confusions are ok!
statistical[known["index"]]["TP"] += 1
correct.append(known)
for key in possible_keys:
if key != known["index"]:
statistical[key]["TN"] += 1
else:
for key in possible_keys:
if key != known["index"]:
if key not in list(evaluated.keys())[:n]:
statistical[key]["TN"] += 1
else:
statistical[key]["FP"] += 1
else:
statistical[key]["FN"] += 1
formula_id = index2latex[evaluated_t1]
known["confused"] = formula_id # That's an index!
wrong.append(known)
classification_error = len(wrong) / float(len(wrong) + len(correct))
logger.info(
f"Classification error (n={n}, MER={merge}): "
f"{classification_error:0.4f} ({len(wrong)} of {len(eval_data)} wrong)",
)
# Get the data
errors_by_correct_classification = DefaultOrderedDict(list)
errors_by_wrong_classification = DefaultOrderedDict(list)
for el in wrong:
errors_by_correct_classification[el["latex"]].append(el)
errors_by_wrong_classification[el["confused"]].append(el)
# Sort errors_by_correct_classification
tmp = sorted(
iter(errors_by_correct_classification.items()),
key=lambda n: len(n[1]),
reverse=True,
)
errors_by_correct_classification = OrderedDict(tmp)
for key in errors_by_correct_classification:
tmp = sorted(errors_by_correct_classification[key], key=lambda n: n["confused"])
errors_by_correct_classification[key] = tmp
# Sort errors_by_wrong_classification
tmp = sorted(
iter(errors_by_wrong_classification.items()),
key=lambda n: len(n[1]),
reverse=True,
)
errors_by_wrong_classification = OrderedDict(tmp)
for key in errors_by_wrong_classification:
tmp = sorted(errors_by_wrong_classification[key], key=lambda n: n["latex"])
errors_by_wrong_classification[key] = tmp
# Get the tempalte
project_root = utils.get_project_root()
template_path = pkg_resources.resource_filename("hwrt", "templates/")
template = os.path.join(template_path, "classification-error-report.html")
with open(template) as f:
template = f.read()
# Find right place for report file
time_prefix = time.strftime("%Y-%m-%d-%H-%M")
directory = os.path.join(project_root, "reports")
if not os.path.exists(directory):
os.makedirs(directory)
target = os.path.join(
project_root, f"reports/{time_prefix}-classification-error-report.html"
)
# Fill the template
# Third party modules
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
env = Environment()
env.loader = FileSystemLoader(template_path)
t = env.get_template("classification-error-report.html")
rendered = t.render(
wrong=wrong,
correct=correct,
classification_error=classification_error,
errors_by_correct_classification=errors_by_correct_classification,
errors_by_wrong_classification=errors_by_wrong_classification,
statistical=statistical,
)
with open(target, "w") as f:
f.write(rendered)
def analyze_results(
translation_csv, what_evaluated_file, evaluation_file, n, merge=True
):
r"""
Parameters
----------
translation_csv : string
Path to a CSV file which translates the output neuron into semantics.
what_evaluated_file : string
Path to a CSV file which translates testing data to LaTeX labels
(and more?)
evaluation_file : string
Path to a file which has the test data.
n : ?
merge : bool
If set to True, some symbols like \sum and \Sigma will not be count as
errors when confused.
"""
index2latex = {}
with open(translation_csv) as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=",", quotechar='"')
for row in spamreader:
index2latex[int(row["index"])] = row["latex"]
with open(evaluation_file) as f:
eval_data = f.readlines() # Has no heading
# Get probability array (index is class)
for i in range(len(eval_data)):
eval_data[i] = eval_data[i].strip()
splitted = eval_data[i].split(" ")
if len(splitted) == 0:
continue # Skip empty lines
eval_data[i] = list(map(float, splitted))
# index -> probability dictionary
d = OrderedDict()
for index, prob in enumerate(eval_data[i]):
d[index] = prob
# Sort descending by probability
d = OrderedDict(sorted(iter(d.items()), key=lambda n: n[1], reverse=True))
eval_data[i] = d
true_data = []
with open(what_evaluated_file) as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=",", quotechar='"')
for row in spamreader:
row["index"] = int(row["index"])
true_data.append(row)
create_report(true_data, eval_data, index2latex, n, merge)
def main(model_folder, aset="test", n=3, merge=True):
"""Main part of the test script."""
project_root = utils.get_project_root()
if aset == "test":
key_model, key_file = "testing", "testdata"
elif aset == "valid":
key_model, key_file = "validating", "validdata"
else:
key_model, key_file = "training", "traindata"
# Get model description
model_description_file = os.path.join(model_folder, "info.yml")
# Read the model description file
with open(model_description_file) as ymlfile:
model_description = yaml.safe_load(ymlfile)
# Get the data paths (hdf5)
project_root = utils.get_project_root()
data = {}
data["training"] = os.path.join(
project_root, model_description["data-source"], "traindata.hdf5"
)
data["testing"] = os.path.join(
project_root, model_description["data-source"], "testdata.hdf5"
)
data["validating"] = os.path.join(
project_root, model_description["data-source"], "validdata.hdf5"
)
test_data_path = os.path.join(model_folder, data[key_model])
evaluation_file = get_test_results(model_folder, "model", test_data_path)
translation_csv = os.path.join(
project_root, model_description["data-source"], "index2formula_id.csv"
)
what_evaluated_file = os.path.join(
project_root, model_description["data-source"], "translation-%s.csv" % key_file
)
analyze_results(translation_csv, what_evaluated_file, evaluation_file, n, merge)
|
MartinThoma/hwrt
|
hwrt/test.py
|
Python
|
mit
| 12,629
|
[
"NEURON"
] |
b2defa7ef11d501f681138e48a9b93a982b89a2830226b7f4e5b05dda75f2f93
|
# -*- coding: utf-8 -*-
import re
from numbers import Integral
from collections import namedtuple
__all__ = ["countries"]
try:
basestring
except NameError:
basestring = str
Country = namedtuple('Country',
'name alpha2 alpha3 numeric apolitical_name')
_records = [
Country(u"Afghanistan", "AF", "AFG", "004", u"Afghanistan"),
Country(u"Åland Islands", "AX", "ALA", "248", u"Åland Islands"),
Country(u"Albania", "AL", "ALB", "008", u"Albania"),
Country(u"Algeria", "DZ", "DZA", "012", u"Algeria"),
Country(u"American Samoa", "AS", "ASM", "016", u"American Samoa"),
Country(u"Andorra", "AD", "AND", "020", u"Andorra"),
Country(u"Angola", "AO", "AGO", "024", u"Angola"),
Country(u"Anguilla", "AI", "AIA", "660", u"Anguilla"),
Country(u"Antarctica", "AQ", "ATA", "010", u"Antarctica"),
Country(u"Antigua and Barbuda", "AG", "ATG", "028",
u"Antigua and Barbuda"),
Country(u"Argentina", "AR", "ARG", "032", u"Argentina"),
Country(u"Armenia", "AM", "ARM", "051", u"Armenia"),
Country(u"Aruba", "AW", "ABW", "533", u"Aruba"),
Country(u"Australia", "AU", "AUS", "036", u"Australia"),
Country(u"Austria", "AT", "AUT", "040", u"Austria"),
Country(u"Azerbaijan", "AZ", "AZE", "031", u"Azerbaijan"),
Country(u"Bahamas", "BS", "BHS", "044", u"Bahamas"),
Country(u"Bahrain", "BH", "BHR", "048", u"Bahrain"),
Country(u"Bangladesh", "BD", "BGD", "050", u"Bangladesh"),
Country(u"Barbados", "BB", "BRB", "052", u"Barbados"),
Country(u"Belarus", "BY", "BLR", "112", u"Belarus"),
Country(u"Belgium", "BE", "BEL", "056", u"Belgium"),
Country(u"Belize", "BZ", "BLZ", "084", u"Belize"),
Country(u"Benin", "BJ", "BEN", "204", u"Benin"),
Country(u"Bermuda", "BM", "BMU", "060", u"Bermuda"),
Country(u"Bhutan", "BT", "BTN", "064", u"Bhutan"),
Country(u"Bolivia, Plurinational State of", "BO", "BOL", "068",
u"Bolivia, Plurinational State of"),
Country(u"Bonaire, Sint Eustatius and Saba", "BQ", "BES", "535",
u"Bonaire, Sint Eustatius and Saba"),
Country(u"Bosnia and Herzegovina", "BA", "BIH", "070",
u"Bosnia and Herzegovina"),
Country(u"Botswana", "BW", "BWA", "072", u"Botswana"),
Country(u"Bouvet Island", "BV", "BVT", "074", u"Bouvet Island"),
Country(u"Brazil", "BR", "BRA", "076", u"Brazil"),
Country(u"British Indian Ocean Territory", "IO", "IOT", "086",
u"British Indian Ocean Territory"),
Country(u"Brunei Darussalam", "BN", "BRN", "096",
u"Brunei Darussalam"),
Country(u"Bulgaria", "BG", "BGR", "100", u"Bulgaria"),
Country(u"Burkina Faso", "BF", "BFA", "854", u"Burkina Faso"),
Country(u"Burundi", "BI", "BDI", "108", u"Burundi"),
Country(u"Cambodia", "KH", "KHM", "116", u"Cambodia"),
Country(u"Cameroon", "CM", "CMR", "120", u"Cameroon"),
Country(u"Canada", "CA", "CAN", "124", u"Canada"),
Country(u"Cabo Verde", "CV", "CPV", "132", u"Cabo Verde"),
Country(u"Cayman Islands", "KY", "CYM", "136", u"Cayman Islands"),
Country(u"Central African Republic", "CF", "CAF", "140",
u"Central African Republic"),
Country(u"Chad", "TD", "TCD", "148", u"Chad"),
Country(u"Chile", "CL", "CHL", "152", u"Chile"),
Country(u"China", "CN", "CHN", "156", u"China"),
Country(u"Christmas Island", "CX", "CXR", "162", u"Christmas Island"),
Country(u"Cocos (Keeling) Islands", "CC", "CCK", "166",
u"Cocos (Keeling) Islands"),
Country(u"Colombia", "CO", "COL", "170", u"Colombia"),
Country(u"Comoros", "KM", "COM", "174", u"Comoros"),
Country(u"Congo", "CG", "COG", "178", u"Congo"),
Country(u"Congo, Democratic Republic of the", "CD", "COD", "180",
u"Congo, Democratic Republic of the"),
Country(u"Cook Islands", "CK", "COK", "184", u"Cook Islands"),
Country(u"Costa Rica", "CR", "CRI", "188", u"Costa Rica"),
Country(u"Côte d'Ivoire", "CI", "CIV", "384", u"Côte d'Ivoire"),
Country(u"Croatia", "HR", "HRV", "191", u"Croatia"),
Country(u"Cuba", "CU", "CUB", "192", u"Cuba"),
Country(u"Curaçao", "CW", "CUW", "531", u"Curaçao"),
Country(u"Cyprus", "CY", "CYP", "196", u"Cyprus"),
Country(u"Czechia", "CZ", "CZE", "203", u"Czechia"),
Country(u"Denmark", "DK", "DNK", "208", u"Denmark"),
Country(u"Djibouti", "DJ", "DJI", "262", u"Djibouti"),
Country(u"Dominica", "DM", "DMA", "212", u"Dominica"),
Country(u"Dominican Republic", "DO", "DOM", "214", u"Dominican Republic"),
Country(u"Ecuador", "EC", "ECU", "218", u"Ecuador"),
Country(u"Egypt", "EG", "EGY", "818", u"Egypt"),
Country(u"El Salvador", "SV", "SLV", "222", u"El Salvador"),
Country(u"Equatorial Guinea", "GQ", "GNQ", "226", u"Equatorial Guinea"),
Country(u"Eritrea", "ER", "ERI", "232", u"Eritrea"),
Country(u"Estonia", "EE", "EST", "233", u"Estonia"),
Country(u"Ethiopia", "ET", "ETH", "231", u"Ethiopia"),
Country(u"Falkland Islands (Malvinas)", "FK", "FLK", "238",
u"Falkland Islands (Malvinas)"),
Country(u"Faroe Islands", "FO", "FRO", "234", u"Faroe Islands"),
Country(u"Fiji", "FJ", "FJI", "242", u"Fiji"),
Country(u"Finland", "FI", "FIN", "246", u"Finland"),
Country(u"France", "FR", "FRA", "250", u"France"),
Country(u"French Guiana", "GF", "GUF", "254", u"French Guiana"),
Country(u"French Polynesia", "PF", "PYF", "258", u"French Polynesia"),
Country(u"French Southern Territories", "TF", "ATF", "260",
u"French Southern Territories"),
Country(u"Gabon", "GA", "GAB", "266", u"Gabon"),
Country(u"Gambia", "GM", "GMB", "270", u"Gambia"),
Country(u"Georgia", "GE", "GEO", "268", u"Georgia"),
Country(u"Germany", "DE", "DEU", "276", u"Germany"),
Country(u"Ghana", "GH", "GHA", "288", u"Ghana"),
Country(u"Gibraltar", "GI", "GIB", "292", u"Gibraltar"),
Country(u"Greece", "GR", "GRC", "300", u"Greece"),
Country(u"Greenland", "GL", "GRL", "304", u"Greenland"),
Country(u"Grenada", "GD", "GRD", "308", u"Grenada"),
Country(u"Guadeloupe", "GP", "GLP", "312", u"Guadeloupe"),
Country(u"Guam", "GU", "GUM", "316", u"Guam"),
Country(u"Guatemala", "GT", "GTM", "320", u"Guatemala"),
Country(u"Guernsey", "GG", "GGY", "831", u"Guernsey"),
Country(u"Guinea", "GN", "GIN", "324", u"Guinea"),
Country(u"Guinea-Bissau", "GW", "GNB", "624", u"Guinea-Bissau"),
Country(u"Guyana", "GY", "GUY", "328", u"Guyana"),
Country(u"Haiti", "HT", "HTI", "332", u"Haiti"),
Country(u"Heard Island and McDonald Islands", "HM", "HMD", "334",
u"Heard Island and McDonald Islands"),
Country(u"Holy See", "VA", "VAT", "336", u"Holy See"),
Country(u"Honduras", "HN", "HND", "340", u"Honduras"),
Country(u"Hong Kong", "HK", "HKG", "344", u"Hong Kong"),
Country(u"Hungary", "HU", "HUN", "348", u"Hungary"),
Country(u"Iceland", "IS", "ISL", "352", u"Iceland"),
Country(u"India", "IN", "IND", "356", u"India"),
Country(u"Indonesia", "ID", "IDN", "360", u"Indonesia"),
Country(u"Iran, Islamic Republic of", "IR", "IRN", "364",
u"Iran, Islamic Republic of"),
Country(u"Iraq", "IQ", "IRQ", "368", u"Iraq"),
Country(u"Ireland", "IE", "IRL", "372", u"Ireland"),
Country(u"Isle of Man", "IM", "IMN", "833", u"Isle of Man"),
Country(u"Israel", "IL", "ISR", "376", u"Israel"),
Country(u"Italy", "IT", "ITA", "380", u"Italy"),
Country(u"Jamaica", "JM", "JAM", "388", u"Jamaica"),
Country(u"Japan", "JP", "JPN", "392", u"Japan"),
Country(u"Jersey", "JE", "JEY", "832", u"Jersey"),
Country(u"Jordan", "JO", "JOR", "400", u"Jordan"),
Country(u"Kazakhstan", "KZ", "KAZ", "398", u"Kazakhstan"),
Country(u"Kenya", "KE", "KEN", "404", u"Kenya"),
Country(u"Kiribati", "KI", "KIR", "296", u"Kiribati"),
Country(u"Korea, Democratic People's Republic of", "KP", "PRK", "408",
u"Korea, Democratic People's Republic of"),
Country(u"Korea, Republic of", "KR", "KOR", "410", u"Korea, Republic of"),
Country(u"Kuwait", "KW", "KWT", "414", u"Kuwait"),
Country(u"Kyrgyzstan", "KG", "KGZ", "417", u"Kyrgyzstan"),
Country(u"Lao People's Democratic Republic", "LA", "LAO", "418",
u"Lao People's Democratic Republic"),
Country(u"Latvia", "LV", "LVA", "428", u"Latvia"),
Country(u"Lebanon", "LB", "LBN", "422", u"Lebanon"),
Country(u"Lesotho", "LS", "LSO", "426", u"Lesotho"),
Country(u"Liberia", "LR", "LBR", "430", u"Liberia"),
Country(u"Libya", "LY", "LBY", "434", u"Libya"),
Country(u"Liechtenstein", "LI", "LIE", "438", u"Liechtenstein"),
Country(u"Lithuania", "LT", "LTU", "440", u"Lithuania"),
Country(u"Luxembourg", "LU", "LUX", "442", u"Luxembourg"),
Country(u"Macao", "MO", "MAC", "446", u"Macao"),
Country(u"Macedonia, the former Yugoslav Republic of", "MK", "MKD", "807",
u"Macedonia, the former Yugoslav Republic of"),
Country(u"Madagascar", "MG", "MDG", "450", u"Madagascar"),
Country(u"Malawi", "MW", "MWI", "454", u"Malawi"),
Country(u"Malaysia", "MY", "MYS", "458", u"Malaysia"),
Country(u"Maldives", "MV", "MDV", "462", u"Maldives"),
Country(u"Mali", "ML", "MLI", "466", u"Mali"),
Country(u"Malta", "MT", "MLT", "470", u"Malta"),
Country(u"Marshall Islands", "MH", "MHL", "584", u"Marshall Islands"),
Country(u"Martinique", "MQ", "MTQ", "474", u"Martinique"),
Country(u"Mauritania", "MR", "MRT", "478", u"Mauritania"),
Country(u"Mauritius", "MU", "MUS", "480", u"Mauritius"),
Country(u"Mayotte", "YT", "MYT", "175", u"Mayotte"),
Country(u"Mexico", "MX", "MEX", "484", u"Mexico"),
Country(u"Micronesia, Federated States of", "FM", "FSM", "583",
u"Micronesia, Federated States of"),
Country(u"Moldova, Republic of", "MD", "MDA", "498",
u"Moldova, Republic of"),
Country(u"Monaco", "MC", "MCO", "492", u"Monaco"),
Country(u"Mongolia", "MN", "MNG", "496", u"Mongolia"),
Country(u"Montenegro", "ME", "MNE", "499", u"Montenegro"),
Country(u"Montserrat", "MS", "MSR", "500", u"Montserrat"),
Country(u"Morocco", "MA", "MAR", "504", u"Morocco"),
Country(u"Mozambique", "MZ", "MOZ", "508", u"Mozambique"),
Country(u"Myanmar", "MM", "MMR", "104", u"Myanmar"),
Country(u"Namibia", "NA", "NAM", "516", u"Namibia"),
Country(u"Nauru", "NR", "NRU", "520", u"Nauru"),
Country(u"Nepal", "NP", "NPL", "524", u"Nepal"),
Country(u"Netherlands", "NL", "NLD", "528", u"Netherlands"),
Country(u"New Caledonia", "NC", "NCL", "540", u"New Caledonia"),
Country(u"New Zealand", "NZ", "NZL", "554", u"New Zealand"),
Country(u"Nicaragua", "NI", "NIC", "558", u"Nicaragua"),
Country(u"Niger", "NE", "NER", "562", u"Niger"),
Country(u"Nigeria", "NG", "NGA", "566", u"Nigeria"),
Country(u"Niue", "NU", "NIU", "570", u"Niue"),
Country(u"Norfolk Island", "NF", "NFK", "574", u"Norfolk Island"),
Country(u"Northern Mariana Islands", "MP", "MNP", "580",
u"Northern Mariana Islands"),
Country(u"Norway", "NO", "NOR", "578", u"Norway"),
Country(u"Oman", "OM", "OMN", "512", u"Oman"),
Country(u"Pakistan", "PK", "PAK", "586", u"Pakistan"),
Country(u"Palau", "PW", "PLW", "585", u"Palau"),
Country(u"Palestine, State of", "PS", "PSE", "275",
u"Palestine"),
Country(u"Panama", "PA", "PAN", "591", u"Panama"),
Country(u"Papua New Guinea", "PG", "PNG", "598",
u"Papua New Guinea"),
Country(u"Paraguay", "PY", "PRY", "600", u"Paraguay"),
Country(u"Peru", "PE", "PER", "604", u"Peru"),
Country(u"Philippines", "PH", "PHL", "608", u"Philippines"),
Country(u"Pitcairn", "PN", "PCN", "612", u"Pitcairn"),
Country(u"Poland", "PL", "POL", "616", u"Poland"),
Country(u"Portugal", "PT", "PRT", "620", u"Portugal"),
Country(u"Puerto Rico", "PR", "PRI", "630", u"Puerto Rico"),
Country(u"Qatar", "QA", "QAT", "634", u"Qatar"),
Country(u"Réunion", "RE", "REU", "638", u"Réunion"),
Country(u"Romania", "RO", "ROU", "642", u"Romania"),
Country(u"Russian Federation", "RU", "RUS", "643",
u"Russian Federation"),
Country(u"Rwanda", "RW", "RWA", "646", u"Rwanda"),
Country(u"Saint Barthélemy", "BL", "BLM", "652",
u"Saint Barthélemy"),
Country(u"Saint Helena, Ascension and Tristan da Cunha",
"SH", "SHN", "654",
u"Saint Helena, Ascension and Tristan da Cunha"),
Country(u"Saint Kitts and Nevis", "KN", "KNA", "659",
u"Saint Kitts and Nevis"),
Country(u"Saint Lucia", "LC", "LCA", "662", u"Saint Lucia"),
Country(u"Saint Martin (French part)", "MF", "MAF", "663",
u"Saint Martin (French part)"),
Country(u"Saint Pierre and Miquelon", "PM", "SPM", "666",
u"Saint Pierre and Miquelon"),
Country(u"Saint Vincent and the Grenadines", "VC", "VCT", "670",
u"Saint Vincent and the Grenadines"),
Country(u"Samoa", "WS", "WSM", "882", u"Samoa"),
Country(u"San Marino", "SM", "SMR", "674", u"San Marino"),
Country(u"Sao Tome and Principe", "ST", "STP", "678",
u"Sao Tome and Principe"),
Country(u"Saudi Arabia", "SA", "SAU", "682", u"Saudi Arabia"),
Country(u"Senegal", "SN", "SEN", "686", u"Senegal"),
Country(u"Serbia", "RS", "SRB", "688", u"Serbia"),
Country(u"Seychelles", "SC", "SYC", "690", u"Seychelles"),
Country(u"Sierra Leone", "SL", "SLE", "694", u"Sierra Leone"),
Country(u"Singapore", "SG", "SGP", "702", u"Singapore"),
Country(u"Sint Maarten (Dutch part)", "SX", "SXM", "534",
u"Sint Maarten (Dutch part)"),
Country(u"Slovakia", "SK", "SVK", "703", u"Slovakia"),
Country(u"Slovenia", "SI", "SVN", "705", u"Slovenia"),
Country(u"Solomon Islands", "SB", "SLB", "090", u"Solomon Islands"),
Country(u"Somalia", "SO", "SOM", "706", u"Somalia"),
Country(u"South Africa", "ZA", "ZAF", "710", u"South Africa"),
Country(u"South Georgia and the South Sandwich Islands",
"GS", "SGS", "239",
u"South Georgia and the South Sandwich Islands",),
Country(u"South Sudan", "SS", "SSD", "728", u"South Sudan"),
Country(u"Spain", "ES", "ESP", "724", u"Spain"),
Country(u"Sri Lanka", "LK", "LKA", "144", u"Sri Lanka"),
Country(u"Sudan", "SD", "SDN", "729", u"Sudan"),
Country(u"Suriname", "SR", "SUR", "740", u"Suriname"),
Country(u"Svalbard and Jan Mayen", "SJ", "SJM", "744",
u"Svalbard and Jan Mayen"),
Country(u"Swaziland", "SZ", "SWZ", "748", u"Swaziland"),
Country(u"Sweden", "SE", "SWE", "752", u"Sweden"),
Country(u"Switzerland", "CH", "CHE", "756", u"Switzerland"),
Country(u"Syrian Arab Republic", "SY", "SYR", "760",
u"Syrian Arab Republic"),
Country(u"Taiwan, Province of China", "TW", "TWN", "158",
u"Taiwan"),
Country(u"Tajikistan", "TJ", "TJK", "762", u"Tajikistan"),
Country(u"Tanzania, United Republic of", "TZ", "TZA", "834",
u"Tanzania, United Republic of"),
Country(u"Thailand", "TH", "THA", "764", u"Thailand"),
Country(u"Timor-Leste", "TL", "TLS", "626", u"Timor-Leste"),
Country(u"Togo", "TG", "TGO", "768", u"Togo"),
Country(u"Tokelau", "TK", "TKL", "772", u"Tokelau"),
Country(u"Tonga", "TO", "TON", "776", u"Tonga"),
Country(u"Trinidad and Tobago", "TT", "TTO", "780",
u"Trinidad and Tobago"),
Country(u"Tunisia", "TN", "TUN", "788", u"Tunisia"),
Country(u"Turkey", "TR", "TUR", "792", u"Turkey"),
Country(u"Turkmenistan", "TM", "TKM", "795", u"Turkmenistan"),
Country(u"Turks and Caicos Islands", "TC", "TCA", "796",
u"Turks and Caicos Islands"),
Country(u"Tuvalu", "TV", "TUV", "798", u"Tuvalu"),
Country(u"Uganda", "UG", "UGA", "800", u"Uganda"),
Country(u"Ukraine", "UA", "UKR", "804", u"Ukraine"),
Country(u"United Arab Emirates", "AE", "ARE", "784",
u"United Arab Emirates"),
Country(u"United Kingdom",
"GB", "GBR", "826",
u"United Kingdom"),
Country(u"United States", "US", "USA", "840", u"United States"),
Country(u"United States Minor Outlying Islands", "UM", "UMI", "581",
u"United States Minor Outlying Islands"),
Country(u"Uruguay", "UY", "URY", "858", u"Uruguay"),
Country(u"Uzbekistan", "UZ", "UZB", "860", u"Uzbekistan"),
Country(u"Vanuatu", "VU", "VUT", "548", u"Vanuatu"),
Country(u"Venezuela, Bolivarian Republic of", "VE", "VEN", "862",
u"Venezuela, Bolivarian Republic of"),
Country(u"Viet Nam", "VN", "VNM", "704", u"Viet Nam"),
Country(u"Virgin Islands, British", "VG", "VGB", "092",
u"Virgin Islands, British"),
Country(u"Virgin Islands, U.S.", "VI", "VIR", "850",
u"Virgin Islands, U.S."),
Country(u"Wallis and Futuna", "WF", "WLF", "876", u"Wallis and Futuna"),
Country(u"Western Sahara", "EH", "ESH", "732", u"Western Sahara"),
Country(u"Yemen", "YE", "YEM", "887", u"Yemen"),
Country(u"Zambia", "ZM", "ZMB", "894", u"Zambia"),
Country(u"Zimbabwe", "ZW", "ZWE", "716", u"Zimbabwe")]
def _build_index(idx):
return dict((r[idx].upper(), r) for r in _records)
# Internal country indexes
_by_alpha2 = _build_index(1)
_by_alpha3 = _build_index(2)
_by_numeric = _build_index(3)
_by_name = _build_index(0)
_by_apolitical_name = _build_index(4)
# Documented accessors for the country indexes
countries_by_alpha2 = _by_alpha2
countries_by_alpha3 = _by_alpha3
countries_by_numeric = _by_numeric
countries_by_name = _by_name
countries_by_apolitical_name = _by_apolitical_name
NOT_FOUND = object()
class _CountryLookup(object):
def get(self, key, default=NOT_FOUND):
if isinstance(key, Integral):
r = _by_numeric.get("%03d" % key, default)
elif isinstance(key, basestring):
k = key.upper()
if len(k) == 2:
r = _by_alpha2.get(k, default)
elif len(k) == 3 and re.match(r"[0-9]{3}", k):
r = _by_numeric.get(k, default)
elif len(k) == 3:
r = _by_alpha3.get(k, default)
elif k in _by_name:
r = _by_name.get(k, default)
else:
r = _by_apolitical_name.get(k, default)
else:
r = default
if r == NOT_FOUND:
raise KeyError(key)
return r
__getitem__ = get
def __len__(self):
return len(_records)
def __iter__(self):
return iter(_records)
def __contains__(self, item):
try:
self.get(item)
return True
except KeyError:
return False
countries = _CountryLookup()
|
schleichdi2/OpenNfr_E2_Gui-6.0
|
lib/python/Plugins/Extensions/MediaPortal/resources/iso3166/__init__.py
|
Python
|
gpl-2.0
| 18,719
|
[
"BWA"
] |
d6cd65a9921d113b10a1e1edcaaaf36906206d1b7986f6e491cdafb5f08264c5
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver.p4util.exceptions import ValidationError
from psi4.driver import qcdb
# CONVENTIONS:
# n_ at the start of a variable name is short for "number of."
# _pi at the end of a variable name is short for "per irrep."
# h is the index of an irrep.
array_format = {"precision": 10}
def _displace_cart(mol, geom, salc_list, i_m, step_size):
"""Displace a geometry along the specified displacement SALCs.
Parameters
----------
mol : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to displace
geom : ndarray
(nat, 3) reference geometry [a0] of the molecule (const).
salc_list : :py:class:`~psi4.core.CdSalcList`
A list of Cartesian displacement SALCs
i_m : iterator of tuples
An iterator containing tuples. Each tuple has the index of a salc in
salc_list and the number of steps (positive or negative) to displace
the salc at that index.
step_size : float
The size of a single "step," i.e., the stencil size.
Returns
------
label : str
Displacement label for the metadata dictionary.
"""
label = ""
# This for loop and tuple unpacking is why the function can handle
# an arbitrary number of SALCs.
for salc_index, disp_steps in i_m:
# * Python error if iterate through `salc_list`
for i in range(len(salc_list[salc_index])):
component = salc_list[salc_index][i]
geom[component.atom, component.xyz] += (
disp_steps * step_size * component.coef / np.sqrt(mol.mass(component.atom)))
# salc_index is in descending order. We want the label in ascending order, so...
# ...add the new label part from the left of the string, not the right.
label = "{:d}: {:d}".format(salc_index, disp_steps) + (", " if label else "") + label
return label
def _initialize_findif(mol, freq_irrep_only, mode, initialize_string, verbose=0):
"""Perform initialization tasks needed by all primary functions.
Parameters
----------
mol : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to displace
freq_irrep_only : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
mode : {"1_0", "2_0", "2_1"}
The first number specifies the derivative level determined from
displacements, and the second number is the level determined at.
initialize_string : function
A function that returns the string to print to show the caller was entered.
The string is both caller-specific and dependent on values determined
in this function.
verbose : int
Set to 0 to silence extra print information, regardless of the print level.
Used so the information is printed only during geometry generation, and not
during the derivative computation as well.
Returns
-------
data : dict
Miscellaneous information required by callers.
"""
core.print_out("\n ----------------------------------------------------------\n")
core.print_out(" FINDIF\n")
core.print_out(" R. A. King and Jonathon Misiewicz\n")
core.print_out(" ---------------------------------------------------------\n\n")
print_lvl = core.get_option("FINDIF", "PRINT")
num_pts = core.get_option("FINDIF", "POINTS")
disp_size = core.get_option("FINDIF", "DISP_SIZE")
data = {"print_lvl": print_lvl, "num_pts": num_pts, "disp_size": disp_size}
if print_lvl:
core.print_out(initialize_string(data))
# Get settings for CdSalcList, then get the CdSalcList.
method_allowed_irreps = 0x1 if mode == "1_0" else 0xFF
t_project = not core.get_global_option("EXTERN") and (not core.get_global_option("PERTURB_H"))
# core.get_option returns an int, but CdSalcList expect a bool, so re-cast
r_project = t_project and bool(core.get_option("FINDIF", "FD_PROJECT"))
salc_list = core.CdSalcList(mol, method_allowed_irreps, t_project, r_project)
n_atom = mol.natom()
n_irrep = salc_list.nirrep()
n_salc = salc_list.ncd()
if print_lvl and verbose:
core.print_out(f" Number of atoms is {n_atom}.\n")
if method_allowed_irreps != 0x1:
core.print_out(f" Number of irreps is {n_irrep}.\n")
core.print_out(" Number of {!s}SALCs is {:d}.\n".format(
"" if method_allowed_irreps != 0x1 else "symmetric ", n_salc))
core.print_out(" Translations projected? {:d}. Rotations projected? {:d}.\n".format(t_project, r_project))
# TODO: Replace with a generator from a stencil to a set of points.
# Diagonal displacements differ between the totally symmetric irrep, compared to all others.
# Off-diagonal displacements are the same for both.
pts_dict = {
3: {
"sym_irr": ((-1, ), (1, )),
"asym_irr": ((-1, ), ),
"off": ((1, 1), (-1, -1))
},
5: {
"sym_irr": ((-2, ), (-1, ), (1, ), (2, )),
"asym_irr": ((-2, ), (-1, )),
"off": ((-1, -2), (-2, -1), (-1, -1), (1, -1), (-1, 1), (1, 1), (2, 1), (1, 2))
}
}
if num_pts not in pts_dict:
raise ValidationError("FINDIF: Invalid number of points!")
# Convention: x_pi means x_per_irrep. The ith element is x for irrep i, with Cotton ordering.
salc_indices_pi = [[] for h in range(n_irrep)]
# Validate that we have an irrep matching the user-specified irrep, if any.
try:
salc_indices_pi[freq_irrep_only]
except (TypeError, IndexError):
if freq_irrep_only != -1:
raise ValidationError("FINDIF: Irrep value not in valid range.")
# Populate salc_indices_pi for all irreps.
# * Python error if iterate through `salc_list`
for i in range(len(salc_list)):
salc_indices_pi[salc_list[i].irrep_index()].append(i)
# If the method allows more than one irrep, print how the irreps partition the SALCS.
if print_lvl and method_allowed_irreps != 0x1 and verbose:
core.print_out(" Index of SALCs per irrep:\n")
for h in range(n_irrep):
if print_lvl > 1 or freq_irrep_only in {h, -1}:
tmp = (" {:d} " * len(salc_indices_pi[h])).format(*salc_indices_pi[h])
core.print_out(" {:d} : ".format(h + 1) + tmp + "\n")
core.print_out(" Number of SALCs per irrep:\n")
for h in range(n_irrep):
if print_lvl > 1 or freq_irrep_only in {h, -1}:
core.print_out(" Irrep {:d}: {:d}\n".format(h + 1, len(salc_indices_pi[h])))
# Now that we've printed the SALCs, clear any that are not of user-specified symmetry.
if freq_irrep_only != -1:
for h in range(n_irrep):
if h != freq_irrep_only:
salc_indices_pi[h].clear()
n_disp_pi = []
disps = pts_dict[num_pts] # We previously validated num_pts in pts_dict.
for irrep, indices in enumerate(salc_indices_pi):
n_disp = len(indices) * len(disps["asym_irr" if irrep != 0 else "sym_irr"])
if mode == "2_0":
# Either len(indices) or len(indices)-1 is even, so dividing by two is safe.
n_disp += len(indices) * (len(indices) - 1) // 2 * len(disps["off"])
n_disp_pi.append(n_disp)
# Let's print out the number of geometries, the displacement multiplicity, and the CdSALCs!
if print_lvl and verbose:
core.print_out(" Number of geometries (including reference) is {:d}.\n".format(sum(n_disp_pi) + 1))
if method_allowed_irreps != 0x1:
core.print_out(" Number of displacements per irrep:\n")
for i, ndisp in enumerate(n_disp_pi, start=1):
core.print_out(f" Irrep {i}: {ndisp}\n")
if print_lvl > 1 and verbose:
for i in range(len(salc_list)):
salc_list[i].print_out()
data.update({
"n_disp_pi": n_disp_pi,
"n_irrep": n_irrep,
"n_salc": n_salc,
"n_atom": n_atom,
"salc_list": salc_list,
"salc_indices_pi": salc_indices_pi,
"disps": disps,
"project_translations": t_project,
"project_rotations": r_project
})
return data
def _geom_generator(mol, freq_irrep_only, mode):
"""
Generate geometries for the specified molecule and derivative levels.
You probably want to instead use one of the convenience functions:
gradient_from_energies_geometries, hessian_from_energies_geometries,
hessian_from_gradients_geometries.
Parameters
----------
mol : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule on which to perform a finite difference calculation.
freq_irrep_only : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
mode : {"1_0", "2_0", "2_1"}
The first number specifies the targeted derivative level. The
second number is the compute derivative level. E.g., "2_0"
is hessian from energies.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified below.
The dictionary makes findifrec _extensible_. If you need a new field
in the record, just add it.
All fields should be present at all times, with two exceptions:
1. Fields for computed quantities will not be available until
after they are computed.
2. Displacement specific overrides for globals will not be
available unless the user specified the overrides.
(Such overrides are not implemented at time of writing. An example
is giving a displacement its own step dict.)
step : dict
A descriptor for the finite difference step.
In future, this can be overriden by step fields for individual displacements.
units : {'Bohr'}
The units for the displacement. The code currently assumes "bohr," per MolSSI standards.
size : float
The step size for the displacement.
stencil_size : {3, 5}
Number of points to evaluate at for each displacement basis vector. Count
includes the central reference point.
displacement_space : {'CdSalc'}
A string specifying the vector space in which displacements are performed.
Currently, only CdSalc is supported.
project_translations : bool
Whether translations are to be projected out of the displacements.
project_rotations : bool
Whether rotations are to be projected out of the displacements.
molecule : dict
The reference molecule, in MolSSI schema. See
https://molssi-qc-schema.readthedocs.io/en/latest/auto_topology.html
displacements : dict
A dictionary mapping labels specifying the displacement to data about
the geometry. Labels are of the form "A: a, B: b" where A and B index the
basis vector in displacement space and A < B, and a and b index the step
magnitude. For instance, "0: 1, 1: -1" specifies displacing +1 in
displacement vector 0 and -1 in displacement vector 1. "1: -1, 0: 1" is
forbidden for breaking ordering. Generalizes to arbitrary numbers of
simultaneous displacements in the obvious way.
The possible geometry data is as follows:
geometry: list of floats
(3 * nat) The molecular geometry as a flat list in bohr. All coordinates
are given for one atom before proceeding to the next atom.
energy: int
The last computed electronic energy at the geometry.
gradient: list of floats
(3 * nat) The last computed gradient of energy with respect to changes in
geometry at the geometry, as a flat list. All coordinates are given for
displacing one atom before proceeding to the next atom.
reference : dict
A geometry data dict, as described above, for the reference geometry.
"""
msg_dict = {
"1_0":
"energies to determine gradients",
"2_1":
"gradients to determine vibrational frequencies and \n"
" normal modes. Resulting frequencies are only valid at stationary points",
"2_0":
"gradients to determine vibrational frequencies and \n"
" normal modes. Resulting frequencies are only valid at stationary points"
}
try:
print_msg = msg_dict[mode]
except KeyError:
raise ValidationError("FINDIF: Mode {} not recognized.".format(mode))
def init_string(data):
return (" Using finite-differences of {:s}.\n"
" Generating geometries for use with {:d}-point formula.\n"
" Displacement size will be {:6.2e}.\n".format(print_msg, data["num_pts"], data["disp_size"]))
# Genuine support for qcdb molecules would be nice. But that requires qcdb CdSalc tech.
# Until then, silently swap the qcdb molecule out for a psi4.core.molecule.
if isinstance(mol, qcdb.Molecule):
mol = core.Molecule.from_dict(mol.to_dict())
data = _initialize_findif(mol, freq_irrep_only, mode, init_string, 1)
# We can finally start generating displacements.
ref_geom = mol.geometry().clone()
# Now we generate the metadata...
findifrec = {
"step": {
"units": "bohr",
"size": data["disp_size"]
},
"stencil_size": data["num_pts"],
"displacement_space": "CdSALC",
"project_translations": data["project_translations"],
"project_rotations": data["project_rotations"],
"molecule": mol.to_schema(dtype=1, units='Bohr'),
"displacements": {},
"reference": {}
}
def append_geoms(indices, steps):
"""Given a list of indices and a list of steps to displace each, append the corresponding geometry to the list."""
new_geom = ref_geom.clone().np
# Next, to make this salc/magnitude composite.
index_steps = zip(indices, steps)
label = _displace_cart(mol, new_geom, data["salc_list"], index_steps, data["disp_size"])
if data["print_lvl"] > 2:
core.print_out("\nDisplacement '{}'\n{}\n".format(label, np.array_str(new_geom, **array_format)))
findifrec["displacements"][label] = {"geometry": new_geom.ravel().tolist()}
for h in range(data["n_irrep"]):
active_indices = data["salc_indices_pi"][h]
for index in active_indices:
# Displace along the diagonal.
# Remember that the totally symmetric irrep has special displacements.
for val in data["disps"]["sym_irr" if h == 0 else "asym_irr"]:
append_geoms((index, ), val)
# Hessian from energies? We have off-diagonal displacements to worry about.
if mode == "2_0":
# i indexes SALC indices of the current irrep.
for i, index in enumerate(active_indices):
for index2 in active_indices[:i]:
for val in data["disps"]["off"]:
append_geoms((index, index2), val)
if data["print_lvl"] > 2:
core.print_out("\nReference\n{}\n".format(np.array_str(ref_geom.np, **array_format)))
findifrec["reference"]["geometry"] = ref_geom.np.ravel().tolist()
if data["print_lvl"] > 1:
core.print_out("\n-------------------------------------------------------------\n")
return findifrec
def assemble_gradient_from_energies(findifrec):
"""Compute the gradient by finite difference of energies.
Parameters
----------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
Returns
-------
gradient : ndarray
(nat, 3) Cartesian gradient [Eh/a0].
"""
# This *must* be a Psi molecule at present - CdSalcList generation panics otherwise
mol = core.Molecule.from_schema(findifrec["molecule"], nonphysical=True, verbose=0)
def init_string(data):
return (" Computing gradient from energies.\n"
" Using {:d}-point formula.\n"
" Energy without displacement: {:15.10f}\n"
" Check energies below for precision!\n"
" Forces are for mass-weighted, symmetry-adapted cartesians (in au).\n".format(
findifrec["stencil_size"], findifrec["reference"]["energy"]))
data = _initialize_findif(mol, -1, "1_0", init_string)
salc_indices = data["salc_indices_pi"][0]
# Extract the energies, and turn then into an ndarray for easy manipulating
# E(i, j) := Energy on displacing the ith SALC we care about in the jth step
# Steps are ordered, for example, -2, -1, 1, 2
max_disp = (findifrec["stencil_size"] - 1) // 2 # The numerator had better be divisible by two.
e_per_salc = 2 * max_disp
E = np.zeros((len(salc_indices), e_per_salc))
for i, salc_index in enumerate(salc_indices):
for j in range(1, max_disp + 1):
E[i, max_disp - j] = findifrec["displacements"][f"{salc_index}: {-j}"]["energy"]
E[i, max_disp + j - 1] = findifrec["displacements"][f"{salc_index}: {j}"]["energy"]
# Perform the finite difference.
if findifrec["stencil_size"] == 3:
g_q = (E[:, 1] - E[:, 0]) / (2.0 * findifrec["step"]["size"])
elif findifrec["stencil_size"] == 5:
g_q = (E[:, 0] - 8.0 * E[:, 1] + 8.0 * E[:, 2] - E[:, 3]) / (12.0 * findifrec["step"]["size"])
else: # This error SHOULD have already been caught, but just in case...
raise ValidationError("FINDIF: {} is an invalid number of points.".format(findifrec["stencil_size"]))
g_q = np.asarray(g_q)
if data["print_lvl"]:
energy_string = ""
for i in range(1, max_disp + 1):
energy_string = f"Energy(-{i}) " + energy_string + f"Energy(+{i}) "
core.print_out("\n Coord " + energy_string + " Force\n")
for salc in range(data["n_salc"]):
print_str = " {:5d}" + " {:17.10f}" * (e_per_salc) + " {force:17.10f}" + "\n"
energies = E[salc]
core.print_out(print_str.format(salc, force=g_q[salc], *energies))
core.print_out("\n")
# Transform the gradient from mass-weighted SALCs to non-mass-weighted Cartesians
B = data["salc_list"].matrix()
g_cart = np.dot(g_q, B)
g_cart = g_cart.reshape(data["n_atom"], 3)
massweighter = np.array([mol.mass(a) for a in range(data["n_atom"])])**(0.5)
g_cart = (g_cart.T * massweighter).T
if data["print_lvl"]:
core.print_out("\n-------------------------------------------------------------\n")
return g_cart
def _process_hessian_symmetry_block(H_block, B_block, massweighter, irrep, print_lvl):
"""Perform post-construction processing for a symmetry block of the Hessian.
Statements need to be printed, and the Hessian must be made orthogonal.
Parameters
---------
H_block : ndarray
A block of the Hessian for an irrep, in mass-weighted salcs.
Dimensions # cdsalcs by # cdsalcs.
B_block : ndarray
A block of the B matrix for an irrep, which transforms CdSalcs to Cartesians.
Dimensions # cdsalcs by # cartesians.
massweighter : ndarray
The mass associated with each atomic coordinate.
Dimension # cartesians. Due to x, y, z, values appear in groups of three.
irrep : str
A string identifying the irrep H_block and B_block are of.
print_lvl : int
The level of printing information requested by the user.
Returns
-------
H_block : ndarray
H_block, but made into an orthogonal array.
"""
# Symmetrize our Hessian block.
# The symmetric structure is lost due to errors in the computation
H_block = (H_block + H_block.T) / 2.0
if print_lvl >= 3:
core.print_out("\n Force Constants for irrep {} in mass-weighted, ".format(irrep))
core.print_out("symmetry-adapted cartesian coordinates.\n")
core.print_out("\n{}\n".format(np.array_str(H_block, **array_format)))
evals, evects = np.linalg.eigh(H_block)
# Get our eigenvalues and eigenvectors in descending order.
idx = evals.argsort()[::-1]
evals = evals[idx]
evects = evects[:, idx]
normal_irr = np.dot((B_block * massweighter).T, evects)
if print_lvl >= 2:
core.print_out("\n Normal coordinates (non-mass-weighted) for irrep {}:\n".format(irrep))
core.print_out("\n{}\n".format(np.array_str(normal_irr, **array_format)))
return H_block
def _process_hessian(H_blocks, B_blocks, massweighter, print_lvl):
"""Perform post-construction processing for the Hessian.
Statements need to be printed, and the Hessian must be transformed.
Parameters
----------
H_blocks : list of ndarray
A list of blocks of the Hessian per irrep, in mass-weighted salcs.
Each is dimension # cdsalcs-in-irrep by # cdsalcs-in-irrep.
B_blocks : list of ndarray
A block of the B matrix per irrep, which transforms CdSalcs to Cartesians.
Each is dimensions # cdsalcs-in-irrep by # cartesians.
massweighter : ndarray
The mass associated with each atomic coordinate.
Dimension 3 * natom. Due to x, y, z, values appear in groups of three.
print_lvl : int
The level of printing information requested by the user.
Returns
-------
Hx : ndarray
The Hessian in non-mass weighted cartesians.
"""
# We have the Hessian in each irrep! The final task is to perform coordinate transforms.
H = p4util.block_diagonal_array(*H_blocks)
B = np.vstack(B_blocks)
if print_lvl >= 3:
core.print_out("\n Force constant matrix for all computed irreps in mass-weighted SALCS.\n")
core.print_out("\n{}\n".format(np.array_str(H, **array_format)))
# Transform the massweighted Hessian from the CdSalc basis to Cartesians.
# The Hessian is the matrix not of a linear transformation, but of a (symmetric) bilinear form
# As such, the change of basis is formula A' = Xt A X, no inverses!
# More conceptually, it's A'_kl = A_ij X_ik X_jl; Each index transforms linearly.
Hx = np.dot(np.dot(B.T, H), B)
if print_lvl >= 3:
core.print_out("\n Force constants in mass-weighted Cartesian coordinates.\n")
core.print_out("\n{}\n".format(np.array_str(Hx, **array_format)))
# Un-massweight the Hessian.
Hx = np.transpose(Hx / massweighter) / massweighter
if print_lvl >= 3:
core.print_out("\n Force constants in Cartesian coordinates.\n")
core.print_out("\n{}\n".format(np.array_str(Hx, **array_format)))
if print_lvl:
core.print_out("\n-------------------------------------------------------------\n")
return Hx
def assemble_hessian_from_gradients(findifrec, freq_irrep_only):
"""Compute the Hessian by finite difference of gradients.
Parameters
----------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
freq_irrep_only : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
Returns
-------
hessian : ndarray
(3 * nat, 3 * nat) Cartesian Hessian [Eh/a0^2]
"""
# This *must* be a Psi molecule at present - CdSalcList generation panics otherwise
mol = core.Molecule.from_schema(findifrec["molecule"], nonphysical=True, verbose=0)
displacements = findifrec["displacements"]
def init_string(data):
return (" Computing second-derivative from gradients using projected, \n"
" symmetry-adapted, cartesian coordinates.\n\n"
" {:d} gradients passed in, including the reference geometry.\n".format(len(displacements) + 1))
data = _initialize_findif(mol, freq_irrep_only, "2_1", init_string)
# For non-totally symmetric CdSALCs, a symmetry operation can convert + and - displacements.
# Good News: By taking advantage of that, we (potentially) ran less computations.
# Bad News: We need to find the - displacements from the + computations now.
# The next ~80 lines of code are dedicated to that task.
if data["print_lvl"]:
core.print_out(" Generating complete list of displacements from unique ones.\n\n")
pg = mol.point_group()
ct = pg.char_table()
order = pg.order()
# Determine what atoms map to what other atoms under the point group operations.
# The py-side compute_atom_map will work whether mol is a Py-side or C-side object.
atom_map = qcdb.compute_atom_map(mol)
if data["print_lvl"] >= 3:
core.print_out(" The atom map:\n")
for atom, sym_image_list in enumerate(atom_map):
core.print_out(f" {atom + 1:d} : ")
for image_atom in sym_image_list:
core.print_out(f"{image_atom + 1:4d}")
core.print_out("\n")
core.print_out("\n")
# A list of lists of gradients, per irrep
gradients_pi = [[]]
# Extract and print the symmetric gradients. These need no additional processing.
max_disp = (findifrec["stencil_size"] - 1) // 2 # The numerator had better be divisible by two.
for i in data["salc_indices_pi"][0]:
for n in range(-max_disp, 0):
grad_raw = displacements[f"{i}: {n}"]["gradient"]
gradients_pi[0].append(np.reshape(grad_raw, (-1, 3)))
for n in range(1, max_disp + 1):
grad_raw = displacements[f"{i}: {n}"]["gradient"]
gradients_pi[0].append(np.reshape(grad_raw, (-1, 3)))
if data["print_lvl"] >= 3:
core.print_out(" Symmetric gradients\n")
for gradient in gradients_pi[0]:
core.print_out("\n{}\n".format(np.array_str(gradient, **array_format)))
# Asymmetric gradient. There's always SOME operation that transforms a positive
# into a negative displacement.By doing extra things here, we can find the
# gradients at the positive displacements.
for h in range(1, data["n_irrep"]):
# If there are no CdSALCs in this irrep, let's skip it.
if not data["n_disp_pi"][h]:
gradients_pi.append([])
continue
gamma = ct.gamma(h)
if data["print_lvl"] >= 3:
core.print_out(f"Characters for irrep {h}\n")
for group_op in range(order):
core.print_out(" {:5.1f}".format(gamma.character(group_op)))
core.print_out("\n")
# Find the group operation that converts + to - displacements.
for group_op in range(order):
if gamma.character(group_op) == -1:
break
else:
raise ValidationError("A symmetric gradient passed for a non-symmetric one.")
if data["print_lvl"]:
core.print_out(" Operation {} takes plus displacements of irrep {} to minus ones.\n".format(
group_op + 1, gamma.symbol()))
sym_op = np.array(ct.symm_operation(group_op).matrix())
gradients = []
def recursive_gradients(i, n):
"""Populate gradients, with step -n, -n+1, ... -1, 1, ... n. Positive displacements are computed."""
grad_raw = displacements[f"{i}: {-n}"]["gradient"]
gradients.append(np.reshape(grad_raw, (-1, 3)))
new_grad = np.zeros((data["n_atom"], 3))
for atom, image in enumerate(atom_map):
atom2 = image[group_op]
new_grad[atom2] = np.einsum("xy,y->x", sym_op, gradients[-1][atom])
if n > 1:
recursive_gradients(i, n - 1)
gradients.append(new_grad)
for i in data["salc_indices_pi"][h]:
recursive_gradients(i, max_disp)
gradients_pi.append(gradients)
# Massweight all gradients.
# Remember, the atom currently corresponds to our 0 axis, hence these transpose tricks.
massweighter = np.asarray([mol.mass(a) for a in range(data["n_atom"])])**(-0.5)
gradients_pi = [[(grad.T * massweighter).T for grad in gradients] for gradients in gradients_pi]
if data["print_lvl"] >= 3:
core.print_out(" All mass-weighted gradients\n")
for gradients in gradients_pi:
for grad in gradients:
core.print_out("\n{}\n".format(np.array_str(grad, **array_format)))
# We have all our gradients generated now!
# Next, time to get our Hessian.
H_pi = []
B_pi = []
irrep_lbls = mol.irrep_labels()
massweighter = np.repeat(massweighter, 3)
for h in range(data["n_irrep"]):
n_disp = data["n_disp_pi"][h]
Nindices = len(data["salc_indices_pi"][h])
gradients = gradients_pi[h]
if not Nindices:
continue
# Flatten each gradient, and turn it into a COLUMN of the matrix.
gradient_matrix = np.array([grad.flatten() for grad in gradients]).T
# Transform disps from Cartesian to CdSalc coordinates.
# For future convenience, we transpose.
# Rows are gradients and columns are coordinates with respect to a particular CdSALC.
B_pi.append(data["salc_list"].matrix_irrep(h))
grads_adapted = np.dot(B_pi[-1], gradient_matrix).T
if data["print_lvl"] >= 3:
core.print_out("Gradients in B-matrix coordinates\n")
for disp in range(n_disp):
core.print_out(f" disp {disp}: ")
for salc in grads_adapted[disp]:
core.print_out(f"{salc:15.10f}")
core.print_out("\n")
H_pi.append(np.empty([Nindices, Nindices]))
if findifrec["stencil_size"] == 3:
H_pi[-1] = (grads_adapted[1::2] - grads_adapted[::2]) / (2.0 * findifrec["step"]["size"])
elif findifrec["stencil_size"] == 5:
H_pi[-1] = (grads_adapted[::4] - 8 * grads_adapted[1::4] + 8 * grads_adapted[2::4] -
grads_adapted[3::4]) / (12.0 * findifrec["step"]["size"])
H_pi[-1] = _process_hessian_symmetry_block(H_pi[-1], B_pi[-1], massweighter, irrep_lbls[h], data["print_lvl"])
# All blocks of the Hessian are now constructed!
return _process_hessian(H_pi, B_pi, massweighter, data["print_lvl"])
def assemble_hessian_from_energies(findifrec, freq_irrep_only):
"""Compute the Hessian by finite difference of energies.
Parameters
----------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
freq_irrep_only : int
The 0-indexed Cotton ordered irrep to get frequencies for. Choose -1 for all irreps.
Returns
-------
hessian : ndarray
(3 * nat, 3 * nat) Cartesian Hessian [Eh/a0^2].
"""
# This *must* be a Psi molecule at present - CdSalcList generation panics otherwise
mol = core.Molecule.from_schema(findifrec["molecule"], nonphysical=True, verbose=0)
displacements = findifrec["displacements"]
ref_energy = findifrec["reference"]["energy"]
def init_string(data):
max_label_len = str(max([len(label) for label in displacements]))
out_str = ""
for label, disp_data in displacements.items():
out_str += (" {:" + max_label_len + "s} : {:20.10f}\n").format(label, disp_data["energy"])
return (" Computing second-derivative from energies using projected, \n"
" symmetry-adapted, cartesian coordinates.\n\n"
" {:d} energies passed in, including the reference geometry.\n"
" Using {:d}-point formula.\n"
" Energy without displacement: {:15.10f}\n"
" Check energies below for precision!\n{}".format(
len(displacements) + 1, findifrec["stencil_size"], ref_energy, out_str))
data = _initialize_findif(mol, freq_irrep_only, "2_0", init_string)
massweighter = np.repeat([mol.mass(a) for a in range(data["n_atom"])], 3)**(-0.5)
B_pi = []
H_pi = []
irrep_lbls = mol.irrep_labels()
max_disp = (findifrec["stencil_size"] - 1) // 2
e_per_diag = 2 * max_disp
# Unlike in the gradient case, we have no symmetry transformations to worry about.
# We get to the task directly: assembling the force constants in each irrep block.
for h in range(data["n_irrep"]):
salc_indices = data["salc_indices_pi"][h]
if not salc_indices: continue
n_salcs = len(salc_indices)
E = np.zeros((len(salc_indices), e_per_diag))
# Step One: Diagonals
# For asymmetric irreps, the energy at a + disp is the same as at a - disp
# Just reuse the - disp energy for the + disp energy
for i, salc_index in enumerate(salc_indices):
for j in range(1, max_disp + 1):
E[i, max_disp - j] = displacements[f"{salc_index}: {-j}"]["energy"]
k = -j if h else j # Because of the +- displacement trick
E[i, max_disp + j - 1] = displacements[f"{salc_index}: {k}"]["energy"]
# Now determine all diagonal force constants for this irrep.
if findifrec["stencil_size"] == 3:
diag_fcs = E[:, 0] + E[:, 1]
diag_fcs -= 2 * ref_energy
diag_fcs /= (findifrec["step"]["size"]**2)
elif findifrec["stencil_size"] == 5:
diag_fcs = -E[:, 0] + 16 * E[:, 1] + 16 * E[:, 2] - E[:, 3]
diag_fcs -= 30 * ref_energy
diag_fcs /= (12 * findifrec["step"]["size"]**2)
H_irr = np.diag(diag_fcs)
# TODO: It's a bit ugly to use the salc indices to grab the off-diagonals but the indices
# within the irrep to grab the diagonals. Is there a better way to do this?
# Step Two: Off-diagonals
# We need off-diagonal energies, diagonal energies, AND the reference energy
# Grabbing off-diagonal energies is a pain, so once we know our SALCs...
# ...define offdiag_en to do that for us.
for i, salc in enumerate(salc_indices):
for j, salc2 in enumerate(salc_indices[:i]):
offdiag_en = lambda index: displacements["{l}: {}, {k}: {}".format(k=salc, l=salc2, *data["disps"]["off"][index])]["energy"]
if findifrec["stencil_size"] == 3:
fc = (+offdiag_en(0) + offdiag_en(1) + 2 * ref_energy - E[i][0] - E[i][1] - E[j][0] - E[j][1]) / (
2 * findifrec["step"]["size"]**2)
elif findifrec["stencil_size"] == 5:
fc = (-offdiag_en(0) - offdiag_en(1) + 9 * offdiag_en(2) - offdiag_en(3) - offdiag_en(4) +
9 * offdiag_en(5) - offdiag_en(6) - offdiag_en(7) + E[i][0] - 7 * E[i][1] - 7 * E[i][2] +
E[i][3] + E[j][0] - 7 * E[j][1] - 7 * E[j][2] + E[j][3] + 12 * ref_energy) / (
12 * findifrec["step"]["size"]**2)
H_irr[i, j] = fc
H_irr[j, i] = fc
B_pi.append(data["salc_list"].matrix_irrep(h))
H_pi.append(_process_hessian_symmetry_block(H_irr, B_pi[-1], massweighter, irrep_lbls[h], data["print_lvl"]))
# All blocks of the Hessian are now constructed!
return _process_hessian(H_pi, B_pi, massweighter, data["print_lvl"])
def gradient_from_energies_geometries(molecule):
"""
Generate geometries for a gradient by finite difference of energies.
Parameters
----------
molecule : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to compute the gradient of.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
Notes
-----
Only symmetric displacements are necessary, so user specification of
symmetry is disabled.
"""
return _geom_generator(molecule, -1, "1_0")
def hessian_from_gradients_geometries(molecule, irrep):
"""
Generate geometries for a hessian by finite difference of energies.
Parameters
----------
molecule : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to compute the frequencies of.
irrep : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
"""
return _geom_generator(molecule, irrep, "2_1")
def hessian_from_energies_geometries(molecule, irrep):
"""
Generate geometries for a hessian by finite difference of energies.
Parameters
----------
molecule : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to compute the frequencies of.
irrep : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
"""
return _geom_generator(molecule, irrep, "2_0")
|
psi4/psi4
|
psi4/driver/driver_findif.py
|
Python
|
lgpl-3.0
| 38,143
|
[
"Psi4"
] |
6ffb0b9b538f6f84241c277b4069a62129e93b0508d10638b21d4b02ed74faba
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from ._base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from .thread import ThreadPoolExecutor
|
blopker/PCLite
|
pclite/lib/concurrent/futures/__init__.py
|
Python
|
mit
| 706
|
[
"Brian"
] |
f7f443694b0580da0fb97d0fc249f550e635fbc7e7acf238233bbad126c77dab
|
"""Source code analyzer for chalice app.
The main point of this module is to analyze your source code
and track which AWS API calls you make.
We can then use this information to create IAM policies
automatically for you.
How it Works
============
This is basically a simplified abstract interpreter.
The type inference is greatly simplified because
we're only interested in boto3 client types.
In a nutshell:
* Create an AST and symbol table from the source code.
* Interpret the AST and track boto3 types. This is governed
by a few simple rules.
* Propagate inferred boto3 types as much as possible. Most of
the basic stuff is handled, for example:
* ``x = y`` if y is a boto3 type, so is x.
* ``a :: (x -> y), where y is a boto3 type, then given ``b = a()``,
b is of type y.
* Map inferred types across function params and return types.
At the end of the analysis, a final walk is performed to collect any
node of type ``Boto3ClientMethodCallType``. This represents an
API call being made. This also lets you be selective about which
API calls you care about. For example, if you want only want to see
which API calls happen in a particular function, only walk that
particular ``FunctionDef`` node.
"""
import ast
import symtable
from typing import Dict, Set, Any, Optional, List, Union, cast # noqa
APICallT = Dict[str, Set[str]]
OptASTSet = Optional[Set[ast.AST]]
ComprehensionNode = Union[ast.DictComp, ast.GeneratorExp, ast.ListComp]
def get_client_calls(source_code):
# type: (str) -> APICallT
"""Return all clients calls made in provided source code.
:returns: A dict of service_name -> set([client calls]).
Example: {"s3": set(["list_objects", "create_bucket"]),
"dynamodb": set(["describe_table"])}
"""
parsed = parse_code(source_code)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls
def get_client_calls_for_app(source_code):
# type: (str) -> APICallT
"""Return client calls for a chalice app.
This is similar to ``get_client_calls`` except it will
automatically traverse into chalice views with the assumption
that they will be called.
"""
parsed = parse_code(source_code)
parsed.parsed_ast = AppViewTransformer().visit(parsed.parsed_ast)
ast.fix_missing_locations(parsed.parsed_ast)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls
def parse_code(source_code, filename='app.py'):
# type: (str, str) -> ParsedCode
parsed = ast.parse(source_code, filename)
table = symtable.symtable(source_code, filename, 'exec')
return ParsedCode(parsed, ChainedSymbolTable(table, table))
class BaseType(object):
def __repr__(self):
# type: () -> str
return "%s()" % self.__class__.__name__
def __eq__(self, other):
# type: (Any) -> bool
return isinstance(other, self.__class__)
# The next 5 classes are used to track the
# components needed to create a boto3 client.
# While we really only care about boto3 clients we need
# to track all the types it takes to get there:
#
# import boto3 <--- bind "boto3" as the boto3 module type
# c = boto.client <--- bind "c" as the boto3 create client type
# s3 = c('s3') <--- bind 's3' as the boto3 client type, subtype 's3'.
# m = s3.list_objects <--- bind as API call 's3', 'list_objects'
# r = m() <--- bind as API call invoked (what we care about).
#
# That way we can handle (in addition to the case above) things like:
# import boto3; boto3.client('s3').list_objects()
# import boto3; s3 = boto3.client('s3'); s3.list_objects()
class Boto3ModuleType(BaseType):
pass
class Boto3CreateClientType(BaseType):
pass
class Boto3ClientType(BaseType):
def __init__(self, service_name):
# type: (str) -> None
#: The name of the AWS service, e.g. 's3'.
self.service_name = service_name
def __eq__(self, other):
# type: (Any) -> bool
# NOTE: We can't use self.__class__ because of a mypy bug:
# https://github.com/python/mypy/issues/3061
# We can change this back once that bug is fixed.
if not isinstance(other, Boto3ClientType):
return False
return self.service_name == other.service_name
def __repr__(self):
# type: () -> str
return "%s(%s)" % (self.__class__.__name__, self.service_name)
class Boto3ClientMethodType(BaseType):
def __init__(self, service_name, method_name):
# type: (str, str) -> None
self.service_name = service_name
self.method_name = method_name
def __eq__(self, other):
# type: (Any) -> bool
if self.__class__ != other.__class__:
return False
return (
self.service_name == other.service_name and
self.method_name == other.method_name)
def __repr__(self):
# type: () -> str
return "%s(%s, %s)" % (
self.__class__.__name__,
self.service_name,
self.method_name
)
class Boto3ClientMethodCallType(Boto3ClientMethodType):
pass
class TypedSymbol(symtable.Symbol):
inferred_type = None # type: Any
ast_node = None # type: ast.AST
class FunctionType(BaseType):
def __init__(self, return_type):
# type: (Any) -> None
self.return_type = return_type
def __eq__(self, other):
# type: (Any) -> bool
if self.__class__ != other.__class__:
return False
return self.return_type == other.return_type
def __repr__(self):
# type: () -> str
return "%s(%s)" % (
self.__class__.__name__,
self.return_type,
)
class StringLiteral(object):
def __init__(self, value):
# type: (str) -> None
self.value = value
class ParsedCode(object):
def __init__(self, parsed_ast, symbol_table):
# type: (ast.AST, ChainedSymbolTable) -> None
self.parsed_ast = parsed_ast
self.symbol_table = symbol_table
class APICallCollector(ast.NodeVisitor):
"""Traverse a given AST and look for any inferred API call types.
This visitor assumes you've ran type inference on the AST.
It will search through the AST and collect any API calls.
"""
def __init__(self, binder):
# type: (TypeBinder) -> None
self.api_calls = {} # type: APICallT
self._binder = binder
def collect_api_calls(self, node):
# type: (ast.AST) -> APICallT
self.visit(node)
return self.api_calls
def visit(self, node):
# type: (ast.AST) -> None
inferred_type = self._binder.get_type_for_node(node)
if isinstance(inferred_type, Boto3ClientMethodCallType):
self.api_calls.setdefault(inferred_type.service_name, set()).add(
inferred_type.method_name)
ast.NodeVisitor.visit(self, node)
class ChainedSymbolTable(object):
def __init__(self, local_table, global_table):
# type: (symtable.SymbolTable, symtable.SymbolTable) -> None
# If you're in the module scope, then pass in
# the same symbol table for local and global.
self._local_table = local_table
self._global_table = global_table
def new_sub_table(self, local_table):
# type: (symtable.SymbolTable) -> ChainedSymbolTable
# Create a new symbol table using this instances
# local table as the new global table and the passed
# in local table as the new local table.
return self.__class__(local_table, self._local_table)
def get_inferred_type(self, name):
# type: (str) -> Any
# Given a symbol name, check whether a type
# has been inferred.
# The stdlib symtable will already fall back to
# global scope if necessary.
symbol = self._local_table.lookup(name)
if symbol.is_global():
try:
global_symbol = self._global_table.lookup(name)
except KeyError:
# It's not an error if a symbol.is_global()
# but is not in our "_global_table", because
# we're not considering the builtin scope.
# In this case we just say that there is no
# type we've inferred.
return None
return getattr(global_symbol, 'inferred_type', None)
return getattr(symbol, 'inferred_type', None)
def set_inferred_type(self, name, inferred_type):
# type: (str, Any) -> None
symbol = cast(TypedSymbol, self._local_table.lookup(name))
symbol.inferred_type = inferred_type
def lookup_sub_namespace(self, name):
# type: (str) -> ChainedSymbolTable
for child in self._local_table.get_children():
if child.get_name() == name:
return self.__class__(child, self._local_table)
for child in self._global_table.get_children():
if child.get_name() == name:
return self.__class__(child, self._global_table)
raise ValueError("Unknown symbol name: %s" % name)
def get_sub_namespaces(self):
# type: () -> List[symtable.SymbolTable]
return self._local_table.get_children()
def get_name(self):
# type: () -> str
return self._local_table.get_name()
def get_symbols(self):
# type: () -> List[symtable.Symbol]
return self._local_table.get_symbols()
def register_ast_node_for_symbol(self, name, node):
# type: (str, ast.AST) -> None
symbol = cast(TypedSymbol, self._local_table.lookup(name))
symbol.ast_node = node
def lookup_ast_node_for_symbol(self, name):
# type: (str) -> ast.AST
symbol = self._local_table.lookup(name)
if symbol.is_global():
symbol = self._global_table.lookup(name)
try:
return cast(TypedSymbol, symbol).ast_node
except AttributeError:
raise ValueError(
"No AST node registered for symbol: %s" % name)
def has_ast_node_for_symbol(self, name):
# type: (str) -> bool
try:
self.lookup_ast_node_for_symbol(name)
return True
except (ValueError, KeyError):
return False
class TypeBinder(object):
def __init__(self):
# type: () -> None
self._node_to_type = {} # type: Dict[ast.AST, Any]
def get_type_for_node(self, node):
# type: (Any) -> Any
return self._node_to_type.get(node)
def set_type_for_node(self, node, inferred_type):
# type: (Any, Any) -> None
self._node_to_type[node] = inferred_type
class SymbolTableTypeInfer(ast.NodeVisitor):
_SDK_PACKAGE = 'boto3'
_CREATE_CLIENT = 'client'
def __init__(self, parsed_code, binder=None, visited=None):
# type: (ParsedCode, Optional[TypeBinder], OptASTSet) -> None
self._symbol_table = parsed_code.symbol_table
self._current_ast_namespace = parsed_code.parsed_ast
self._node_inference = {} # type: Dict[ast.AST, Any]
if binder is None:
binder = TypeBinder()
if visited is None:
visited = set()
self._binder = binder
self._visited = visited
def bind_types(self):
# type: () -> TypeBinder
self.visit(self._current_ast_namespace)
return self._binder
def known_types(self, scope_name=None):
# type: (Optional[str]) -> Dict[str, Any]
table = None
if scope_name is None:
table = self._symbol_table
else:
table = self._symbol_table.lookup_sub_namespace(scope_name)
return {
s.get_name(): cast(TypedSymbol, s).inferred_type
for s in table.get_symbols()
if hasattr(s, 'inferred_type') and
cast(TypedSymbol, s).inferred_type is not None and
s.is_local()
}
def _set_inferred_type_for_name(self, name, inferred_type):
# type: (str, Any) -> None
self._symbol_table.set_inferred_type(name, inferred_type)
def _set_inferred_type_for_node(self, node, inferred_type):
# type: (Any, Any) -> None
self._binder.set_type_for_node(node, inferred_type)
def _get_inferred_type_for_node(self, node):
# type: (Any) -> Any
return self._binder.get_type_for_node(node)
def _new_inference_scope(self, parsed_code, binder, visited):
# type: (ParsedCode, TypeBinder, Set[ast.AST]) -> SymbolTableTypeInfer
instance = self.__class__(parsed_code, binder, visited)
return instance
def visit_Import(self, node):
# type: (ast.Import) -> None
for child in node.names:
if isinstance(child, ast.alias):
import_name = child.name
if import_name == self._SDK_PACKAGE:
self._set_inferred_type_for_name(
import_name, Boto3ModuleType())
self.generic_visit(node)
def visit_Name(self, node):
# type: (ast.Name) -> None
self._set_inferred_type_for_node(
node,
self._symbol_table.get_inferred_type(node.id)
)
self.generic_visit(node)
def visit_Assign(self, node):
# type: (ast.Assign) -> None
# The LHS gets the inferred type of the RHS.
# We do this post-traversal to let the type inference
# run on the children first.
self.generic_visit(node)
rhs_inferred_type = self._get_inferred_type_for_node(node.value)
if rhs_inferred_type is None:
# Special casing assignment to a string literal.
if isinstance(node.value, ast.Str):
rhs_inferred_type = StringLiteral(node.value.s)
self._set_inferred_type_for_node(node.value, rhs_inferred_type)
for t in node.targets:
if isinstance(t, ast.Name):
self._symbol_table.set_inferred_type(t.id, rhs_inferred_type)
self._set_inferred_type_for_node(node, rhs_inferred_type)
def visit_Attribute(self, node):
# type: (ast.Attribute) -> None
self.generic_visit(node)
lhs_inferred_type = self._get_inferred_type_for_node(node.value)
if lhs_inferred_type is None:
return
elif lhs_inferred_type == Boto3ModuleType():
# Check for attributes such as boto3.client.
if node.attr == self._CREATE_CLIENT:
# This is a "boto3.client" attribute.
self._set_inferred_type_for_node(node, Boto3CreateClientType())
elif isinstance(lhs_inferred_type, Boto3ClientType):
self._set_inferred_type_for_node(
node,
Boto3ClientMethodType(
lhs_inferred_type.service_name,
node.attr
)
)
def visit_Call(self, node):
# type: (ast.Call) -> None
self.generic_visit(node)
# func -> Node that's being called
# args -> Arguments being passed.
inferred_func_type = self._get_inferred_type_for_node(node.func)
if inferred_func_type == Boto3CreateClientType():
# e_0 : B3CCT -> B3CT[S]
# e_1 : S str which is a service name
# e_0(e_1) : B3CT[e_1]
if len(node.args) >= 1:
service_arg = node.args[0]
if isinstance(service_arg, ast.Str):
self._set_inferred_type_for_node(
node, Boto3ClientType(service_arg.s))
elif isinstance(self._get_inferred_type_for_node(service_arg),
StringLiteral):
sub_type = self._get_inferred_type_for_node(service_arg)
inferred_type = Boto3ClientType(sub_type.value)
self._set_inferred_type_for_node(node, inferred_type)
elif isinstance(inferred_func_type, Boto3ClientMethodType):
self._set_inferred_type_for_node(
node,
Boto3ClientMethodCallType(
inferred_func_type.service_name,
inferred_func_type.method_name
)
)
elif isinstance(inferred_func_type, FunctionType):
self._set_inferred_type_for_node(
node, inferred_func_type.return_type)
elif isinstance(node.func, ast.Name) and \
self._symbol_table.has_ast_node_for_symbol(node.func.id):
if node not in self._visited:
self._visited.add(node)
self._infer_function_call(node)
def visit_Lambda(self, node):
# type: (ast.Lambda) -> None
# Lambda is going to be a bit tricky because
# there's a new child namespace (via .get_children()),
# but it's not something that will show up in the
# current symbol table via .lookup().
# For now, we're going to ignore lambda expressions.
pass
def _infer_function_call(self, node):
# type: (Any) -> None
# Here we're calling a function we haven't analyzed
# yet. We're first going to analyze the function.
# This will set the inferred_type on the FunctionDef
# node.
# If we get a FunctionType as the inferred type of the
# function, then we know that the inferred type for
# calling the function is the .return_type type.
function_name = node.func.id
sub_table = self._symbol_table.lookup_sub_namespace(function_name)
ast_node = self._symbol_table.lookup_ast_node_for_symbol(
function_name)
self._map_function_params(sub_table, node, ast_node)
child_infer = self._new_inference_scope(
ParsedCode(ast_node, sub_table), self._binder, self._visited)
child_infer.bind_types()
inferred_func_type = self._get_inferred_type_for_node(ast_node)
self._symbol_table.set_inferred_type(function_name, inferred_func_type)
# And finally the result of this Call() node will be
# the return type from the function we just analyzed.
if isinstance(inferred_func_type, FunctionType):
self._set_inferred_type_for_node(
node, inferred_func_type.return_type)
def _map_function_params(self, sub_table, node, def_node):
# type: (ChainedSymbolTable, Any, Any) -> None
# TODO: Handle the full calling syntax, kwargs, stargs, etc.
# Right now we just handle positional args.
defined_args = def_node.args
for arg, defined in zip(node.args, defined_args.args):
inferred_type = self._get_inferred_type_for_node(arg)
if inferred_type is not None:
name = self._get_name(defined)
sub_table.set_inferred_type(name, inferred_type)
def _get_name(self, node):
# type: (Any) -> str
try:
return getattr(node, 'id')
except AttributeError:
return getattr(node, 'arg')
def visit_FunctionDef(self, node):
# type: (ast.FunctionDef) -> None
if node.name == self._symbol_table.get_name():
# Not using generic_visit() because we don't want to
# visit the decorator_list attr.
for child in node.body:
self.visit(child)
else:
self._symbol_table.register_ast_node_for_symbol(node.name, node)
def visit_AsyncFunctionDef(self, node):
# type: (ast.FunctionDef) -> None
# this type is actually wrong but we can't use the actual type as it's
# not available in python 2
self.visit_FunctionDef(node)
def visit_ClassDef(self, node):
# type: (ast.ClassDef) -> None
# Not implemented yet. We want to ensure we don't
# traverse into the class body for now.
return
def visit_DictComp(self, node):
# type: (ast.DictComp) -> None
self._handle_comprehension(node, 'dictcomp')
def visit_Return(self, node):
# type: (Any) -> None
self.generic_visit(node)
inferred_type = self._get_inferred_type_for_node(node.value)
if inferred_type is not None:
self._set_inferred_type_for_node(node, inferred_type)
# We're making a pretty big assumption there's one return
# type per function. Will likely need to come back to this.
inferred_func_type = FunctionType(inferred_type)
self._set_inferred_type_for_node(self._current_ast_namespace,
inferred_func_type)
def visit_ListComp(self, node):
# type: (ast.ListComp) -> None
# 'listcomp' is the string literal used by python
# to creating the SymbolTable for the corresponding
# list comp function.
self._handle_comprehension(node, 'listcomp')
def visit_GeneratorExp(self, node):
# type: (ast.GeneratorExp) -> None
# Generator expressions are an interesting case.
# They create a new sub scope, but they're not
# explicitly named. Python just creates a table
# with the name "genexpr".
self._handle_comprehension(node, 'genexpr')
def _visit_first_comprehension_generator(self, node):
# type: (ComprehensionNode) -> None
if node.generators:
# first generator's iterator is visited in the current scope
first_generator = node.generators[0]
self.visit(first_generator.iter)
def _collect_comprehension_children(self, node):
# type: (ComprehensionNode) -> List[ast.expr]
if isinstance(node, ast.DictComp):
# dict comprehensions have two values to be checked
child_nodes = [node.key, node.value]
else:
child_nodes = [node.elt]
if node.generators:
first_generator = node.generators[0]
child_nodes.append(first_generator.target)
for if_expr in first_generator.ifs:
child_nodes.append(if_expr)
for generator in node.generators[1:]:
# rest need to be visited in the child scope
child_nodes.append(generator.iter)
child_nodes.append(generator.target)
for if_expr in generator.ifs:
child_nodes.append(if_expr)
return child_nodes
def _visit_comprehension_children(self, node, comprehension_type):
# type: (ComprehensionNode, str) -> None
child_nodes = self._collect_comprehension_children(node)
child_scope = self._get_matching_sub_namespace(comprehension_type,
node.lineno)
if child_scope is None:
# In Python 2 there's no child scope for list comp
# Or we failed to locate the child scope, this happens in Python 2
# when there are multiple comprehensions of the same type in the
# same scope. The line number trick doesn't work as Python 2 always
# passes line number 0, make a best effort
for child_node in child_nodes:
try:
self.visit(child_node)
except KeyError:
pass
return
for child_node in child_nodes:
# visit sub expressions in the child scope
child_table = self._symbol_table.new_sub_table(child_scope)
child_infer = self._new_inference_scope(
ParsedCode(child_node, child_table),
self._binder, self._visited)
child_infer.bind_types()
def _handle_comprehension(self, node, comprehension_type):
# type: (ComprehensionNode, str) -> None
self._visit_first_comprehension_generator(node)
self._visit_comprehension_children(node, comprehension_type)
def _get_matching_sub_namespace(self, name, lineno):
# type: (str, int) -> Optional[symtable.SymbolTable]
namespaces = [t for t in self._symbol_table.get_sub_namespaces()
if t.get_name() == name]
if len(namespaces) == 1:
# if there's only one match for the name, return it
return namespaces[0]
for namespace in namespaces:
# otherwise disambiguate by using the line number
if namespace.get_lineno() == lineno:
return namespace
return None
def visit(self, node):
# type: (Any) -> None
return ast.NodeVisitor.visit(self, node)
class AppViewTransformer(ast.NodeTransformer):
_CHALICE_DECORATORS = [
'route', 'authorizer', 'lambda_function',
'schedule', 'on_s3_event', 'on_sns_message',
'on_sqs_message', 'on_ws_connect', 'on_ws_message',
'on_ws_disconnect',
]
def visit_FunctionDef(self, node):
# type: (ast.FunctionDef) -> Any
if self._is_chalice_view(node):
return self._auto_invoke_view(node)
return node
def _is_chalice_view(self, node):
# type: (ast.FunctionDef) -> bool
# We can certainly improve on this, but this check is more
# of a heuristic for the time being. The ideal way to do this
# is to infer the Chalice type and ensure the function is
# decorated with the Chalice type's route() method.
decorator_list = node.decorator_list
if not decorator_list:
return False
for decorator in decorator_list:
if isinstance(decorator, ast.Call) and \
isinstance(decorator.func, ast.Attribute):
if decorator.func.attr in self._CHALICE_DECORATORS:
return True
return False
def _auto_invoke_view(self, node):
# type: (ast.FunctionDef) -> List[ast.AST]
auto_invoke = ast.Expr(
value=ast.Call(
func=ast.Name(id=node.name, ctx=ast.Load()),
args=[], keywords=[], starargs=None, kwargs=None
)
)
return [node, auto_invoke]
|
awslabs/chalice
|
chalice/analyzer.py
|
Python
|
apache-2.0
| 26,421
|
[
"VisIt"
] |
704b8ea01d384c4eb5a13fec2884e1e1a3f529b74ee616c848edeb2195985b79
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('viconToGps')
import rospy
import conversion as c
from sensor_msgs.msg import NavSatFix
from sensor_msgs.msg import NavSatStatus
from geometry_msgs.msg import TransformStamped
latestViconMsg = TransformStamped()
latestViconMsg.transform.translation.x = 30
parentFrameLat = rospy.get_param('reference_latitude', 20)
parentFrameLong = rospy.get_param('reference_longitude', 30)
parentFrameAlt = rospy.get_param('reference_altitude', 40)
parentFrameAngle = rospy.get_param('Angle', 0)
def callback(viconData):
''' Saves the data when it gets a Vicon packet
'''
global latestViconMsg
latestViconMsg = viconData
#rospy.loginfo("Vicon position is %s.",[viconData.transform.translation.x, viconData.transform.translation.y, viconData.transform.translation.z])
def talker():
# Subscribe to Vicon messages
viconTopic = rospy.get_param('topic')
rospy.Subscriber(viconTopic, TransformStamped, callback)
# Start a publisher for the GPS messages
pub = rospy.Publisher('GPS/position', NavSatFix) # FIXME
# Start the node
rospy.init_node('talker')
# Populate the NavSatFix message from the parameter server
statusMsg = NavSatStatus()
statusMsg.status = rospy.get_param('status', -1)
statusMsg.service = rospy.get_param('service', 1)
fixMsg = NavSatFix()
fixMsg.header.stamp = rospy.Time.now()
fixMsg.header.frame_id = "/world"
fixMsg.status = statusMsg
fixMsg.position_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 0]
#position could be modified with some gaussian noise added to it and then calculate the covariance matrix.
fixMsg.position_covariance_type = rospy.get_param('position_covariance_type', 0);
while not rospy.is_shutdown():
[fixMsg.longitude, fixMsg.latitude, fixMsg.altitude] = c.xyz2gps([parentFrameLong, parentFrameLat, parentFrameAlt], latestViconMsg.transform.translation.x, latestViconMsg.transform.translation.y, latestViconMsg.transform.translation.z, parentFrameAngle)
statusMsg.status = rospy.get_param('status', statusMsg.status)
statusMsg.service = rospy.get_param('service', statusMsg.service)
# put the sigma and calculate the cov matrix here
#rospy.loginfo([fixMsg.longitude, fixMsg.latitude, fixMsg.altitude])
pub.publish(fixMsg)
rospy.sleep(0.1)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass
|
kdhansen/roburoc4
|
viconToGps/nodes/talker.py
|
Python
|
gpl-3.0
| 2,492
|
[
"Gaussian"
] |
ff345c284f9e464b0d6a650615588ce5aba1cd00b3154191e010057e09a3844d
|
"""Tests for parse.py"""
import unittest
from qcl import parse
import numpy as np
class ParseTest(unittest.TestCase):
"""Tests for parse.py"""
def test_xyzfile(self):
"""Test for parse.xyzfile()"""
ccdata = parse.xyzfile('../data/xyz/water.xyz')
self.assertEqual(ccdata.charge, 0)
self.assertEqual(ccdata.mult, 1)
self.assertEqual(len(ccdata.atomnos), 3)
atomnos = np.array([8, 1, 1])
self.assertTrue(np.array_equal(ccdata.atomnos, atomnos))
atomcoords = np.array([
[-5.35740, 2.09256, 0.00000],
[-4.38740, 2.09256, 0.00000],
[-5.68073, 1.66625, -0.80909]])
self.assertTrue(np.array_equal(ccdata.atomcoords, atomcoords))
def test_xyzfile_ccdata_xyz(self):
"""Test for parse.xyzfile()"""
ccdata = parse.xyzfile('../data/xyz/S+MeO.xyz', ccxyz=True)
self.assertEqual(ccdata.charge, 0)
self.assertEqual(ccdata.mult, 2)
self.assertEqual(len(ccdata.atomnos), 13)
atomnos = np.array([6, 8, 8, 6, 14, 1, 1, 1, 1, 6, 1, 1, 1])
self.assertTrue(np.array_equal(ccdata.atomnos, atomnos))
atomcoords = np.array([
[-3.77578, 1.41262, -0.21447],
[-2.58788, 2.06725, -0.31292],
[-4.74823, 1.86911, -0.79440],
[-3.94794, 0.18513, 0.64310],
[-3.26032, -1.33907, -0.21050],
[-1.82613, -1.25295, -0.50911],
[-3.98211, -1.54172, -1.47550],
[-3.48170, -2.50778, 0.65363],
[-5.02928, -0.02357, 0.79011],
[-1.46784, 1.77800, 0.43859],
[-0.66178, 2.49212, 0.17276],
[-1.10077, 0.75603, 0.23026],
[-1.69461, 1.88618, 1.52073]])
self.assertTrue(np.array_equal(ccdata.atomcoords, atomcoords))
elements = ['C', 'O', 'O', 'C', 'Si', 'H', 'H',
'H', 'H', 'C', 'H', 'H', 'H']
self.assertListEqual(ccdata.elements, elements)
comment = '0 2\n'
self.assertEqual(ccdata.comment, comment)
filename = 'S+MeO.xyz'
self.assertEqual(ccdata.filename, filename)
def test_multixyzfile(self):
"""Test for parse.multixyzfile()"""
ccdatas = parse.multixyzfile('../data/xyz/multi.xyz')
self.assertEqual(ccdatas[0].charge, 1)
self.assertEqual(ccdatas[0].mult, 2)
self.assertEqual(len(ccdatas[0].atomnos), 13)
atomnos = np.array([6, 8, 8, 6, 14, 1, 1, 1, 1, 6, 1, 1, 1])
self.assertTrue(np.array_equal(ccdatas[0].atomnos, atomnos))
atomcoords = np.array([
[-3.77578, 1.41262, -0.21447],
[-2.58788, 2.06725, -0.31292],
[-4.74823, 1.86911, -0.79440],
[-3.94794, 0.18513, 0.64310],
[-3.26032, -1.33907, -0.21050],
[-1.82613, -1.25295, -0.50911],
[-3.98211, -1.54172, -1.47550],
[-3.48170, -2.50778, 0.65363],
[-5.02928, -0.02357, 0.79011],
[-1.46784, 1.77800, 0.43859],
[-0.66178, 2.49212, 0.17276],
[-1.10077, 0.75603, 0.23026],
[-1.69461, 1.88618, 1.52073]])
self.assertTrue(np.array_equal(ccdatas[0].atomcoords, atomcoords))
self.assertEqual(ccdatas[1].charge, 0)
self.assertEqual(ccdatas[1].mult, 1)
self.assertEqual(len(ccdatas[1].atomnos), 3)
atomnos = np.array([8, 1, 1])
self.assertTrue(np.array_equal(ccdatas[1].atomnos, atomnos))
atomcoords = np.array([
[-5.35740, 2.09256, 0.00000],
[-4.38740, 2.09256, 0.00000],
[-5.68073, 1.66625, -0.80909]])
self.assertTrue(np.array_equal(ccdatas[1].atomcoords, atomcoords))
def test_mopacoutputfile(self):
"""Test for prase.mopacoutputfile"""
ccdata = parse.mopacoutputfile('../data/mop/mndo.out', nogeometry=True)
# Calculated using 23.060548867 kcal/mol per eV (cclib conversion factor)
scf_kcalmol = -44257.25147116261
print(ccdata.scfenergies[0])
print(scf_kcalmol)
self.assertAlmostEqual(ccdata.scfenergies[0], scf_kcalmol, places=10)
if __name__ == '__main__':
unittest.main()
|
ben-albrecht/qcl
|
test/test_parse.py
|
Python
|
mit
| 4,238
|
[
"cclib"
] |
c1c088990de63f002ee1dda5652fff60d4e11cdef139d78ff3644f73a38e69f2
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2019 Manas.Tech
# License granted by Canonical Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The Crystal plugin can be used for Crystal projects using `shards`.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- crystal-channel:
(string, default: latest/stable)
The Snap Store channel to install Crystal from.
- crystal-build-options
(list of strings, default: '[]')
These options are passed to `shards build`.
"""
import os
import shutil
from snapcraft import file_utils
from snapcraft.internal import common, elf, errors
from snapcraft.plugins.v1 import PluginV1
_CRYSTAL_CHANNEL = "latest/stable"
class CrystalPlugin(PluginV1):
@classmethod
def schema(cls):
schema = super().schema()
schema["properties"]["crystal-channel"] = {
"type": "string",
"default": _CRYSTAL_CHANNEL,
}
schema["properties"]["crystal-build-options"] = {
"type": "array",
"minitems": 1,
"uniqueItems": True,
"items": {"type": "string"},
"default": [],
}
schema["required"] = ["source"]
return schema
@classmethod
def get_build_properties(cls):
return ["crystal-build-options"]
@classmethod
def get_pull_properties(cls):
return ["crystal-channel"]
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.build_snaps.append("crystal/{}".format(self.options.crystal_channel))
# See https://github.com/crystal-lang/distribution-scripts/blob/8bc01e26291dc518390129e15df8f757d687871c/docker/ubuntu.Dockerfile#L9
self.build_packages.extend(
[
"git",
"make",
"gcc",
"pkg-config",
"libssl-dev",
"libxml2-dev",
"libyaml-dev",
"libgmp-dev",
"libpcre3-dev",
"libevent-dev",
"libz-dev",
]
)
def build(self):
super().build()
self.run(
["shards", "build", "--without-development"]
+ self.options.crystal_build_options,
self.builddir,
)
output_bin = os.path.join(self.builddir, "bin")
if not os.path.exists(output_bin):
raise errors.SnapcraftEnvironmentError(
"No binaries were built. Ensure the shards.yaml contains valid targets."
)
install_bin_path = os.path.join(self.installdir, "bin")
bin_paths = (os.path.join(output_bin, b) for b in os.listdir(output_bin))
elf_files = (elf.ElfFile(path=b) for b in bin_paths if elf.ElfFile.is_elf(b))
os.makedirs(install_bin_path, exist_ok=True)
for elf_file in elf_files:
shutil.copy2(
elf_file.path,
os.path.join(install_bin_path, os.path.basename(elf_file.path)),
)
elf_dependencies_path = elf_file.load_dependencies(
root_path=self.installdir,
core_base_path=common.get_installed_snap_path(
self.project._get_build_base()
),
arch_triplet=self.project.arch_triplet,
content_dirs=self.project._get_provider_content_dirs(),
)
for elf_dependency_path in elf_dependencies_path:
lib_install_path = os.path.join(
self.installdir, elf_dependency_path[1:]
)
os.makedirs(os.path.dirname(lib_install_path), exist_ok=True)
if not os.path.exists(lib_install_path):
file_utils.link_or_copy(
elf_dependency_path, lib_install_path, follow_symlinks=True
)
|
snapcore/snapcraft
|
snapcraft/plugins/v1/crystal.py
|
Python
|
gpl-3.0
| 4,670
|
[
"CRYSTAL"
] |
ef1f163ff1226da6e333b5d42f5cf095806a881a658711cffe9c3b13799bf296
|
"""
Image Pyramids
functions: cv2.pyrUp(), cv2.pyrDown()
sometimes, need to work with images of different resolution of the same image
create images with different resolution, search for object in all the images
image pyramid = {images of different resolution}
pyramid types
Gaussian pyramid
Laplacian pyramid
"""
# Higher level(Low resolution) in Gaussian
# remove consecutive rows and cols in lower level (higher res) image
# each pixel in higher level formed by contribution from 5 pixels in underlying lower level with Gaussian weights
# thus, MxN image becomes M/2 x N/2 image
# so area reduced by 1/4 of original area -- called an Octave
# expanding, area becomes 4x in each level
# Gaussian pyramids: cv2.pyrDown() and cv2.pyrUp()
img = cv2.imread('messi5.jpg')
lower_reso = cv2.pyrDown(higher_reso)
# go down image pyramid with cv2.pyrUp() function
higher_reso2 = cv2.pyrUp(lower_reso)
# NOTE: higher_reso2 != higher_reso, for when you decrease the resolution, you loose the information
# Laplacian Pyramids
# formed from Gaussian pyramids
# Laplacian pyr edges are edge images only
# mose elements are zeros
# used in image compression
# level is formed by diff btwn lvl in Gaussian pyramid and expanded version of its upper level in Gaussian pyramid
# Image Blending using Pyramids
# in image stitching, need to stack 2 images together; amy not look good due to image discontinuities
# image blending gives seamless blending without leaving much data
# ex. blend apple and orange
# load apple and orange images
# find Gaussian pyramids for apple and orange
# from G.pyramids, find Laplacian pyramids
# join left hald of apple and right hald of orange in each levels of Laplacian pyramids
# from joint image pyramids, reconstruct original image
import cv2
import numpy as np, sys
A = cv2.imread('apple.jpg')
B = v2.imread('orange.jpg')
# generate Gaussian pyramid for A
G = A.copy()
gpA = [G]
for i in xrange(6):
G = cv2.pyrDown(G)
gpA.append(G)
# generate Gaussian pyramid for B
G = B.copy()
gpB = [G]
for i in xrange(6):
G = cv2.pyrDown(G)
gpB.append(G)
# generate Laplacian pyramid for A
lpA = [gpA[5]]
for i in xrange(5,0,-1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i-1], GE)
lpA.append(L)
# generate Laplacian pyramid for B
lpB = [gpB[5]]
for i in xrange(5,0,-1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i-1], GE)
lpB.append(L)
# Add left and right halves of images in each level
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
ls = np.hstack((la[:, 0:cols/2], lb[:, cols/2:]))
LS.append(ls)
# now reconstruct
ls_ = LS[0]
for i in xrange(1,6):
ls_ = cv2.pyrUp(ls_)
ls_ = cv2.add(ls_, LS[i])
# image with direct connecting each half
real = np.hstack((A[:,:cols/2], B[:, cols/2:]))
cv2.imwrite('Pyramid_blending2.jpg', ls_)
cv2.imwrite('Direct_blending.jpg', real)
|
SSG-DRD-IOT/commercial-iot-security-system
|
opencv/tutorials/imageProcessing/pyramids/pyramids.py
|
Python
|
mit
| 2,980
|
[
"Gaussian"
] |
6d60eb5fefdf19af5ddcb50fedef735dd547a6a39489c1a4c7a77d6751209f11
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.endpoint_service import (
EndpointServiceAsyncClient,
)
from google.cloud.aiplatform_v1beta1.services.endpoint_service import (
EndpointServiceClient,
)
from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers
from google.cloud.aiplatform_v1beta1.services.endpoint_service import transports
from google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.aiplatform_v1beta1.types import accelerator_type
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import endpoint
from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint
from google.cloud.aiplatform_v1beta1.types import endpoint_service
from google.cloud.aiplatform_v1beta1.types import explanation
from google.cloud.aiplatform_v1beta1.types import explanation_metadata
from google.cloud.aiplatform_v1beta1.types import machine_resources
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert EndpointServiceClient._get_default_mtls_endpoint(None) is None
assert (
EndpointServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [EndpointServiceClient, EndpointServiceAsyncClient,]
)
def test_endpoint_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.EndpointServiceGrpcTransport, "grpc"),
(transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_endpoint_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [EndpointServiceClient, EndpointServiceAsyncClient,]
)
def test_endpoint_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_endpoint_service_client_get_transport_class():
transport = EndpointServiceClient.get_transport_class()
available_transports = [
transports.EndpointServiceGrpcTransport,
]
assert transport in available_transports
transport = EndpointServiceClient.get_transport_class("grpc")
assert transport == transports.EndpointServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
EndpointServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceClient),
)
@mock.patch.object(
EndpointServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceAsyncClient),
)
def test_endpoint_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(EndpointServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(EndpointServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
EndpointServiceClient,
transports.EndpointServiceGrpcTransport,
"grpc",
"true",
),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
EndpointServiceClient,
transports.EndpointServiceGrpcTransport,
"grpc",
"false",
),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
EndpointServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceClient),
)
@mock.patch.object(
EndpointServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_endpoint_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_endpoint_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_endpoint_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_endpoint_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = EndpointServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_endpoint(
transport: str = "grpc", request_type=endpoint_service.CreateEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.CreateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_endpoint_from_dict():
test_create_endpoint(request_type=dict)
def test_create_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
client.create_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.CreateEndpointRequest()
@pytest.mark.asyncio
async def test_create_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.CreateEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.CreateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_endpoint_async_from_dict():
await test_create_endpoint_async(request_type=dict)
def test_create_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.CreateEndpointRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.CreateEndpointRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_endpoint(
parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
def test_create_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_endpoint(
endpoint_service.CreateEndpointRequest(),
parent="parent_value",
endpoint=gca_endpoint.Endpoint(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_endpoint(
parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
@pytest.mark.asyncio
async def test_create_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_endpoint(
endpoint_service.CreateEndpointRequest(),
parent="parent_value",
endpoint=gca_endpoint.Endpoint(name="name_value"),
)
def test_get_endpoint(
transport: str = "grpc", request_type=endpoint_service.GetEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
network="network_value",
)
response = client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.GetEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.network == "network_value"
def test_get_endpoint_from_dict():
test_get_endpoint(request_type=dict)
def test_get_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
client.get_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.GetEndpointRequest()
@pytest.mark.asyncio
async def test_get_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.GetEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
network="network_value",
)
)
response = await client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.GetEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.network == "network_value"
@pytest.mark.asyncio
async def test_get_endpoint_async_from_dict():
await test_get_endpoint_async(request_type=dict)
def test_get_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.GetEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
call.return_value = endpoint.Endpoint()
client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.GetEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint())
await client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint.Endpoint()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_endpoint(
endpoint_service.GetEndpointRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint.Endpoint()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_endpoint(
endpoint_service.GetEndpointRequest(), name="name_value",
)
def test_list_endpoints(
transport: str = "grpc", request_type=endpoint_service.ListEndpointsRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint_service.ListEndpointsResponse(
next_page_token="next_page_token_value",
)
response = client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.ListEndpointsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEndpointsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_endpoints_from_dict():
test_list_endpoints(request_type=dict)
def test_list_endpoints_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
client.list_endpoints()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.ListEndpointsRequest()
@pytest.mark.asyncio
async def test_list_endpoints_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.ListEndpointsRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint_service.ListEndpointsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.ListEndpointsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEndpointsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_endpoints_async_from_dict():
await test_list_endpoints_async(request_type=dict)
def test_list_endpoints_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.ListEndpointsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
call.return_value = endpoint_service.ListEndpointsResponse()
client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_endpoints_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.ListEndpointsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint_service.ListEndpointsResponse()
)
await client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_endpoints_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint_service.ListEndpointsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_endpoints(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_endpoints_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_endpoints(
endpoint_service.ListEndpointsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_endpoints_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint_service.ListEndpointsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint_service.ListEndpointsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_endpoints(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_endpoints_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_endpoints(
endpoint_service.ListEndpointsRequest(), parent="parent_value",
)
def test_list_endpoints_pager():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_endpoints(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, endpoint.Endpoint) for i in results)
def test_list_endpoints_pages():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
pages = list(client.list_endpoints(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_endpoints_async_pager():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_endpoints), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
async_pager = await client.list_endpoints(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, endpoint.Endpoint) for i in responses)
@pytest.mark.asyncio
async def test_list_endpoints_async_pages():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_endpoints), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_endpoints(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_endpoint(
transport: str = "grpc", request_type=endpoint_service.UpdateEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
network="network_value",
)
response = client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UpdateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.network == "network_value"
def test_update_endpoint_from_dict():
test_update_endpoint(request_type=dict)
def test_update_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
client.update_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UpdateEndpointRequest()
@pytest.mark.asyncio
async def test_update_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.UpdateEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
network="network_value",
)
)
response = await client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UpdateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
assert response.network == "network_value"
@pytest.mark.asyncio
async def test_update_endpoint_async_from_dict():
await test_update_endpoint_async(request_type=dict)
def test_update_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UpdateEndpointRequest()
request.endpoint.name = "endpoint.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
call.return_value = gca_endpoint.Endpoint()
client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint.name=endpoint.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UpdateEndpointRequest()
request.endpoint.name = "endpoint.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_endpoint.Endpoint()
)
await client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint.name=endpoint.name/value",) in kw[
"metadata"
]
def test_update_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_endpoint.Endpoint()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_endpoint(
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_endpoint(
endpoint_service.UpdateEndpointRequest(),
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_endpoint.Endpoint()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_endpoint.Endpoint()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_endpoint(
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_endpoint(
endpoint_service.UpdateEndpointRequest(),
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_endpoint(
transport: str = "grpc", request_type=endpoint_service.DeleteEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeleteEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_endpoint_from_dict():
test_delete_endpoint(request_type=dict)
def test_delete_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
client.delete_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeleteEndpointRequest()
@pytest.mark.asyncio
async def test_delete_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.DeleteEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeleteEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_endpoint_async_from_dict():
await test_delete_endpoint_async(request_type=dict)
def test_delete_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeleteEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeleteEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_endpoint(
endpoint_service.DeleteEndpointRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_endpoint(
endpoint_service.DeleteEndpointRequest(), name="name_value",
)
def test_deploy_model(
transport: str = "grpc", request_type=endpoint_service.DeployModelRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_deploy_model_from_dict():
test_deploy_model(request_type=dict)
def test_deploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
client.deploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeployModelRequest()
@pytest.mark.asyncio
async def test_deploy_model_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.DeployModelRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_deploy_model_async_from_dict():
await test_deploy_model_async(request_type=dict)
def test_deploy_model_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_deploy_model_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
def test_deploy_model_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.deploy_model(
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model == gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
)
assert args[0].traffic_split == {"key_value": 541}
def test_deploy_model_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.deploy_model(
endpoint_service.DeployModelRequest(),
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
@pytest.mark.asyncio
async def test_deploy_model_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.deploy_model(
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model == gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
)
assert args[0].traffic_split == {"key_value": 541}
@pytest.mark.asyncio
async def test_deploy_model_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.deploy_model(
endpoint_service.DeployModelRequest(),
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
def test_undeploy_model(
transport: str = "grpc", request_type=endpoint_service.UndeployModelRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UndeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_undeploy_model_from_dict():
test_undeploy_model(request_type=dict)
def test_undeploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
client.undeploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UndeployModelRequest()
@pytest.mark.asyncio
async def test_undeploy_model_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.UndeployModelRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UndeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_undeploy_model_async_from_dict():
await test_undeploy_model_async(request_type=dict)
def test_undeploy_model_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UndeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_undeploy_model_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UndeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
def test_undeploy_model_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.undeploy_model(
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model_id == "deployed_model_id_value"
assert args[0].traffic_split == {"key_value": 541}
def test_undeploy_model_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.undeploy_model(
endpoint_service.UndeployModelRequest(),
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
@pytest.mark.asyncio
async def test_undeploy_model_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.undeploy_model(
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model_id == "deployed_model_id_value"
assert args[0].traffic_split == {"key_value": 541}
@pytest.mark.asyncio
async def test_undeploy_model_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.undeploy_model(
endpoint_service.UndeployModelRequest(),
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EndpointServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EndpointServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = EndpointServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.EndpointServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.EndpointServiceGrpcTransport,)
def test_endpoint_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.EndpointServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_endpoint_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.EndpointServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_endpoint",
"get_endpoint",
"list_endpoints",
"update_endpoint",
"delete_endpoint",
"deploy_model",
"undeploy_model",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_endpoint_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EndpointServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_endpoint_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EndpointServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_endpoint_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EndpointServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_endpoint_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EndpointServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_endpoint_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EndpointServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_endpoint_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_endpoint_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EndpointServiceGrpcTransport, grpc_helpers),
(transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_endpoint_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_endpoint_service_host_no_port():
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_endpoint_service_host_with_port():
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_endpoint_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EndpointServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_endpoint_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EndpointServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_endpoint_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_endpoint_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_endpoint_service_grpc_lro_client():
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_endpoint_service_grpc_lro_async_client():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_endpoint_path():
project = "squid"
location = "clam"
endpoint = "whelk"
expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
project=project, location=location, endpoint=endpoint,
)
actual = EndpointServiceClient.endpoint_path(project, location, endpoint)
assert expected == actual
def test_parse_endpoint_path():
expected = {
"project": "octopus",
"location": "oyster",
"endpoint": "nudibranch",
}
path = EndpointServiceClient.endpoint_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_endpoint_path(path)
assert expected == actual
def test_model_path():
project = "cuttlefish"
location = "mussel"
model = "winkle"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = EndpointServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "nautilus",
"location": "scallop",
"model": "abalone",
}
path = EndpointServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_model_path(path)
assert expected == actual
def test_network_path():
project = "squid"
network = "clam"
expected = "projects/{project}/global/networks/{network}".format(
project=project, network=network,
)
actual = EndpointServiceClient.network_path(project, network)
assert expected == actual
def test_parse_network_path():
expected = {
"project": "whelk",
"network": "octopus",
}
path = EndpointServiceClient.network_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_network_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = EndpointServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = EndpointServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = EndpointServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = EndpointServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = EndpointServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = EndpointServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = EndpointServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = EndpointServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = EndpointServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = EndpointServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.EndpointServiceTransport, "_prep_wrapped_messages"
) as prep:
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.EndpointServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = EndpointServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
sasha-gitg/python-aiplatform
|
tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py
|
Python
|
apache-2.0
| 110,793
|
[
"Octopus"
] |
ba8e1c944ddce0685d631ef5aa6dbc0428e53668695e69218407e656aadcdb8c
|
import glob
import hashlib
import sys
import math
import time
# 'nwchem.py' is an intrinsic module
import nwchem
#______________________________________________________
#
#________________ PHYSICAL CONSTANTS _________________
#______________________________________________________
kCalPerHartree = 627.509451
Boltzmann = 1.3806488E-23
Avogadro = 6.02214129E+23
JoulePerKcal = 4.184E+03
T298 = 298.15
AUKCAL = kCalPerHartree
Rgas = 1.9872041 / 1000.0 / AUKCAL # atomic units
kT_298_perMol = (Boltzmann * T298 * Avogadro) / JoulePerKcal / kCalPerHartree
class Gn_common(object):
def __init__(self, charge=0, multiplicity="singlet", tracing=False,
debug=False, integral_memory_cache=3500000000,
integral_disk_cache=0, force_c1_symmetry=False,
noautoz=False):
self.dhf298 = 0.0
self.dhf0 = 0.0
self.force_c1_symmetry = force_c1_symmetry
self.noautoz = noautoz
multiplets = ["(null)", "singlet", "doublet", "triplet", "quartet",
"quintet", "hextet","septet", "octet"]
self.integral_memory_cache = integral_memory_cache
self.integral_disk_cache = integral_disk_cache
self.nOpen = None
self.multiplicity_numeric = multiplets.index(multiplicity.lower())
self.charge = charge
self.multiplicity = multiplicity
if multiplicity != "singlet":
self.hftype = "uhf"
else:
self.hftype = "rhf"
self.tracing = tracing
self.debug_flag = debug
self.geohash = self.geometry_hash()
self.atoms = []
def say(self, s):
"""Write to stderr console. No implicit newline "\n".
:param s: message to write
:type s : str
"""
if nwchem.ga_nodeid() == 0:
sys.stderr.write(s)
def log(self, s):
"""Write to stdout console.
:param s: message to write
:type s : str
"""
if nwchem.ga_nodeid() == 0:
sys.stdout.write(s + "\n")
def report(self, s):
"""Write to stderr, stdout.
Add newline to stderr.
:param s: message to write
:type s : str
"""
#self.say(s + '\n')
self.log(s)
def debug(self, s):
"""Write message to stderr if debug_flag is on.
:param s: message to write
:type s : str
"""
if self.debug_flag:
self.say("DEBUG: {0}\n".format(s))
def quick_optimize(self):
'''
HF/3-21G optimization
'''
if self.is_atom():
return
self.say('quick optimize.')
self.send_nwchem_cmd("scf; maxiter 99; end")
self.send_nwchem_cmd("driver; maxiter 99; end".format(self.geohash))
self.send_nwchem_cmd("basis noprint ; * library 3-21G ; end")
scfcmd = self.build_SCF_cmd()
self.send_nwchem_cmd(scfcmd)
# optimize the geometry, ignore energy and gradient results
en, grad = nwchem.task_optimize("scf")
def vib_thermo(self, vibs):
"""Handroll the ZPE because NWChem's zpe accumulates
truncation error from 3 sigfig physical constants.
"""
AUKCAL = 627.5093314
c = 2.99792458E+10
h = 6.62606957E-27
kgas = 1.3806488E-16 # cgs units
Rgas = 1.9872041/1000.0/AUKCAL # atomic units
temperature = 298.15
vibsum = 0.0
for freq in vibs:
if (freq > 0.1):
vibsum += freq
cm2Ha = 219474.6 # cm-1 to Hartree conversion
self.Ezpe = vibsum / (2.0 * cm2Ha)
# shamelessly swipe code from NWCHEM/src/vib_wrtFreq.F
eth = 0.0
hth = 0.0
xdum = 0.0
for freq in vibs:
if (freq > 0.1):
thetav = freq * (h * c / kgas) #freqency temperature in Kelvin from cm-1
if (temperature > 0.0):
xdum = math.exp(-thetav/temperature)
else:
xdum = 0.0
xdum = xdum / (1.0 - xdum)
eth = eth + thetav * (0.5 + xdum)
eth = eth * Rgas
# linear boolean is available only after task_freq('scf') runs
# NWChem only writes the flag if molecule is linear
try:
is_linear = nwchem.rtdb_get("vib:linear")
except:
is_linear = False
if (is_linear):
# translational(3/2RT) and rotation(2/2RT) thermal corrections
eth = eth + 2.5 * Rgas * temperature
else:
# translational(3/2RT) and rotation(3/2RT) thermal corrections
eth = eth + 3.0 * Rgas * temperature
# Hthermal = eth+pV=eth+RT, since pV=RT
hth = eth + Rgas * temperature
self.debug("Handrolled E,H thermal= %.6f, %.6f\n" % (eth,hth))
self.Ethermal = eth
self.Hthermal = hth
def geometry_hash(self):
"""Produce a hashed geometry identifier from the geometry in the
RTDB. This is useful to generate file names for writing and reading
geometry to/from disk.
:return: sha1 hex digest of geometry
:rtype : str
"""
keys = [nwchem.rtdb_first()]
while True:
try:
keys.append(nwchem.rtdb_next())
except nwchem.NWChemError:
break
ckey = [k for k in keys if "coords" in k and "geometry" in k][0]
tkey = [k for k in keys if "tags" in k and "geometry" in k][0]
coords = nwchem.rtdb_get(ckey)
tags = nwchem.rtdb_get(tkey)
fused = " ".join([str(c) for c in coords]) + " ".join([str(t) for t in tags])
result = hashlib.sha1(fused).hexdigest()
return result
def is_molecule(self):
"""Determine if this is a molecular system (more than 1 atom)
:return: True if more than 1 atom, else False
:rtype : bool
"""
return len(self.atoms) > 1
def is_atom(self):
"""Determine if this is an atomic system (just 1 atom)
:return: True if exactly 1 atom, else False
:rtype : bool
"""
return len(self.atoms) == 1
def send_nwchem_cmd(self, s):
"""Send a command to be parsed as NWChem job input language.
:param s: command to sent
:type s : str
"""
nwchem.input_parse(s)
self.debug("cmd: [%s]" % s)
def set_charge(self, charge=0):
"""Set NWChem system charge
:param charge: total system charge
:type charge : int
"""
self.charge = charge
self.send_nwchem_cmd("charge %s" % charge)
def element_number(self, element):
"""Get the atomic number associated with a full element name,
like "lithium". Return 0 if lookup fails.
:param element: element name
:type element : str
:return: atomic number
:rtype : int
"""
elementNames = [ 'zero', # placeholder
'HYDROGEN','HELIUM','LITHIUM','BERYLLIUM','BORON','CARBON',
'NITROGEN','OXYGEN','FLUORINE','NEON','SODIUM','MAGNESIUM',
'ALUMINIUM','SILICON','PHOSPHORUS','SULFUR','CHLORINE','ARGON',
'POTASSIUM','CALCIUM','SCANDIUM','TITANIUM','VANADIUM','CHROMIUM',
'MANGANESE','IRON','COBALT','NICKEL','COPPER','ZINC','GALLIUM',
'GERMANIUM','ARSENIC','SELENIUM','BROMINE','KRYPTON'
]
number = elementNames.index(element.upper())
if number == -1:
number = 0
return number
def symbol_number(self, symbol):
"""Get the atomic number associated with an element symbol, like "Li".
Return 0 if lookup fails.
:param symbol: element symbol
:type symbol : str
:return: atomic number
:rtype : int
"""
atomicSymbols = [ 'zero', # placeholder
'H', 'HE',
'LI','BE','B' ,'C' ,'N' ,'O' ,'F' ,'NE',
'NA','MG','AL','SI','P' ,'S' ,'CL','AR',
'K' ,'CA',
'SC','TI','V' ,'CR','MN','FE','CO','NI','CU','ZN',
'GA','GE','AS','SE','BR','KR'
]
number = atomicSymbols.index(symbol.upper())
if number == -1:
number = 0
return number
def atomic_number(self, s):
"""Get the atomic number of an element symbol or name. Try to treat
the input as a symbol first, then as an element if that fails.
:param s: element symbol or name
:type s : str
:return: atomic number
:rtype : int
"""
return self.symbol_number(s) or self.element_number(s)
def basis_prepare(self, basis, input="", output="",
coordinates="spherical", context="scf"):
"""Set up commands to store vectors to a file and/or project or
load stored vectors for use as initial guess. Also handles
switching between basis sets.
:param basis: name of current basis set
:type basis :str
:param input: optional name of basis set for vector input
:type input : str
:param output: optional name of basis set for vector output
:type output : str
:param coordinates: "cartesian" or "spherical", for current basis
:type coordinates : str
:param context: "scf" or "dft" vectors setup context
:type context : str
"""
def simplename(basis_name):
name = basis_name[:]
t = {"-" : "_", "*" : "star", "+" : "plus",
"(" : "", ")" : "", "," : "_"}
for key, value in t.items():
name = name.replace(key, value)
return name
sn = simplename(basis)
sn_input = simplename(input)
sn_output = simplename(output)
basis_cmd = "basis {0} {1} ; * library {2} ; end".format(sn, coordinates, basis)
self.send_nwchem_cmd(basis_cmd)
self.send_nwchem_cmd('set "ao basis" {0}'.format(sn))
if input and output:
t = "{context}; vectors input project {small} {small}.movecs output {large}.movecs; end"
vectors = t.format(context=context, small=sn_input, large=sn_output)
elif input:
t = "{context}; vectors input {small}.movecs; end"
vectors = t.format(context=context, small=sn_input)
elif output:
t = "{context}; vectors input atomic output {large}.movecs; end"
vectors = t.format(context=context, large=sn_output)
#no vector projection or storage; just wanted to set a different basis
else:
vectors = ""
if vectors:
self.send_nwchem_cmd(vectors)
def initialize_atoms_list(self):
"""Use NWChem's RTDB geometry to initialize self.atoms,
e.g. CH3OH gives ['C','H','H','H','O''H']
"""
# rtdb_get() returns a string if only one atom
# or a list of atoms (i.e., tags).
# Absent type handling,
# len('CU') is the same as len(['C','U'])
tags = nwchem.rtdb_get("geometry:geometry:tags")
if type(tags) == str:
self.atoms.append(tags)
self.debug('AtomsList: %s\n' % tags)
else:
self.atoms.extend(tags)
if self.debug_flag:
atmstr=''
for atm in tags:
atmstr += ' '+atm
self.debug('AtomsList: %s\n' % atmstr)
self.debug('NumAtoms={0}\n'.format(len(self.atoms)))
def build_SCF_cmd(self):
"""Prepare SCF block with multiplicity-appropriate choice of
HF form.
:return: SCF control block
:rtype : str
"""
memory_cache_words = self.integral_memory_cache / 8
disk_cache_words = self.integral_disk_cache / 8
tpl = "scf ; semidirect memsize {0} filesize {1}; {2} ; {3} ; end"
block = tpl.format(memory_cache_words, disk_cache_words,
self.multiplicity, self.hftype)
return block
def reset_symmetry(self):
"""Reload geometry and force symmetry down if TCE must be used.
"""
if self.force_c1_symmetry:
return
xyzs = glob.glob(self.geohash + "*.xyz")
xyzs.sort()
geofile = xyzs[-1]
#TODO: use largest Abelian subgroup symmetries instead of c1
if self.multiplicity != "singlet" or getattr(self, "highest_correlated", "") == "qcisd(t)":
symmetry_block = "symmetry c1;"
geoblock = "geometry units angstroms print xyz {0}; {1} load {2}; end"
autoz = {True : "noautoz", False : ""}[self.noautoz]
self.send_nwchem_cmd(geoblock.format(autoz, symmetry_block,
geofile))
def report_dHf(self):
"""Report change in heat of formation going from 0 K to 298 K.
"""
heatsOfFormation = [
#("\n"),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"),
(" HEAT OF FORMATION (0K): % 10.2f kCal/mol" % self.dhf0),
(" HEAT OF FORMATION (298K): % 10.2f kCal/mol" % self.dhf298),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
]
for line in heatsOfFormation:
self.report(line)
def spin_orbit_energy(self):
"""Get spin orbit energy correction according to nature of system and charge.
:return: spin orbit energy correction
:rtype : float
"""
if self.is_molecule(): # no spin orbit corrections for molecules
correction = 0.0
else: # It's an atom
atom = self.atoms[0]
correction = self.E_spin_orbit(self.atomic_number(atom),
self.charge)
return correction
def E_spin_orbit(self, atomic_number, charge):
"""EspinOrbit tabulates the spin orbit energies
of atomic species in the first three rows as listed in
Gaussian-4 theory
Larry A. Curtiss,Paul C. Redfern,Krishnan Raghavachari
JOURNAL OF CHEMICAL PHYSICS 126, 084108 (2007)
DOI: 10.1063/1.2436888
This table contains lists of spin orbit energies for
[neutral,positive,negative] species.
When Curtiss lists no values, 0.0 is returned.
Values may not agree with current NIST listings.
Although table values are in milli-Hartrees,
function E_spin_orbit returns values in Hartrees.
:param atomic_number: atomic number of atomic species
:type atomic_number : int
:param charge: charge on atomic species
:type charge : int
:return: energy in Hartrees
:rtype : float
"""
# [neutral, Z+, Z- ]
ESpinOrbit = [
[ 0.0, 0.0, 0.0 ], # 00 zero index place holder
[ 0.0, 0.0, 0.0 ], # 01 H Hydrogen
[ 0.0, 0.0, 0.0 ], # 02 He Helium
[ 0.0, 0.0, 0.0 ], # 03 Li Lithium
[ 0.0, 0.0, 0.0 ], # 04 Be Beryllium
[-0.05, 0.0, -0.03], # 05 B Boron
[-0.14, -0.2, 0.0 ], # 06 C Carbon
[ 0.0, -0.43, 0.0 ], # 07 N Nitrogen
[-0.36, 0.0, -0.26], # 08 O Oxygen
[-0.61, -0.67, 0.0 ], # 09 F Fluorine
[ 0.0, -1.19, 0.0 ], # 10 Ne Neon
[ 0.0, 0.0, 0.0 ], # 11 Na Sodium
[ 0.0, 0.0, 0.0 ], # 12 Mg Magnesium
[-0.34, 0.0, -0.28], # 13 Al Aluminum
[-0.68, -0.93, 0.0 ], # 14 Si Silicon
[ 0.0, -1.43, -0.45], # 15 P Phosphorus
[-0.89, 0.0, -0.88], # 16 S Sulfur
[-1.34, -1.68, 0.0 ], # 17 Cl Chlorine
[ 0.0, -2.18, 0.0 ], # 18 Ar Argon
[ 0.0, 0.0, 0.0 ], # 19 K Potassium
[ 0.0, 0.0, 0.0 ], # 20 Ca Calcium
[ 0.0, 0.0, 0.0 ], # 21 Sc Scandium
[ 0.0, 0.0, 0.0 ], # 22 Ti Titanium
[ 0.0, 0.0, 0.0 ], # 23 V Vanadium
[ 0.0, 0.0, 0.0 ], # 24 Cr Chromium
[ 0.0, 0.0, 0.0 ], # 25 Mn Manganese
[ 0.0, 0.0, 0.0 ], # 26 Fe Iron
[ 0.0, 0.0, 0.0 ], # 27 Co Cobalt
[ 0.0, 0.0, 0.0 ], # 28 Ni Nickel
[ 0.0, 0.0, 0.0 ], # 29 Cu Copper
[ 0.0, 0.0, 0.0 ], # 30 Zn Zinc
[-2.51, 0.0, 0.0 ], # 31 Ga Gallium
[-4.41, -5.37, 0.0 ], # 32 Ge Germanium
[ 0.0, -8.04, 0.0 ], # 33 As Arsenic
[-4.3, 0.0, 0.0 ], # 34 Se Selenium
[-5.6, -6.71, 0.0 ], # 35 Br Bromine
[ 0.0, -8.16, 0.0 ] # 36 Kr Krypton
]
if not atomic_number in range(len(ESpinOrbit)):
return 0.0
if charge > 0:
ion = 1
elif charge < 0:
ion = 2
else:
ion = 0
milliHa_to_Ha = 0.001
espin = ESpinOrbit[atomic_number][ion] * milliHa_to_Ha
return espin
def atomic_DHF (self, elementNum):
"""Get atomic heats of formation at 0 K and 298 K, in
kcal/mol.
:param elementNum: atomic number
:type elementNum : int
:return: atomic heats of formation
:rtype : tuple
"""
#atom [dHf(0), dHf(298)] in kcal/mol
atomDHF = [
[0.0,0.0], # 00 zero placeholder
[51.63, 52.103 ], # 01 Hydrogen
[0.00, 0.00 ], # 02 Helium
[37.70, 38.07 ], # 03 Lithium
[76.40, 77.40 ], # 04 Beryllium
[135.10, 136.30 ], # 05 Boron
[169.98, 171.29 ], # 06 Carbon
[112.53, 112.97 ], # 07 Nitrogen
[58.99, 59.56 ], # 08 Oxygen
[18.47, 18.97 ], # 09 Fluorine
[0.00, 0.00 ], # 10 Neon
[25.76, 25.69 ], # 11 Sodium
[34.87, 35.16 ], # 12 Magnesium
[80.20, 80.80 ], # 13 Aluminum
[107.20, 108.20 ], # 14 Silicon
[75.45, 75.65 ], # 15 Phosphorus
[65.71, 66.25 ], # 16 Sulfur
[28.59, 28.99 ], # 17 Chlorine
[0.00, 0.00 ], # 18 Argon
[21.27, 21.49 ], # 19 Potassium
[42.50, 42.29 ], # 20 Calcium
[0.0,0.0],[0.0,0.0], # transition elements 21-30 Sc-Zn
[0.0,0.0],[0.0,0.0],
[0.0,0.0],[0.0,0.0],
[0.0,0.0],[0.0,0.0],
[0.0,0.0],[0.0,0.0],
[65.00, 65.00 ], # 31 Gallium
[88.91, 88.91 ], # 32 Germanium
[73.90, 72.42 ], # 33 Arsenic
[55.76, 54.27 ], # 34 Selenium
[26.74, 28.18 ], # 35 Bromine
[0.0, 0.0 ], # 36 Krypton
]
self.debug('atomic_DHF: elementNum=%d' % elementNum)
try:
result = atomDHF[elementNum]
self.debug('atomic_DHF: E,H=%.2f,%.2f' % (result[0], result[1]))
except IndexError:
self.debug('atomic_DHF: error: element %d not in table?' % elementNum)
result = (0.0, 0.0)
return result
def atom_core_orbitals(self, atomicNumber, convention="gamess"):
"""This replicates the core electron pair lookup table in
src/geom/geom_core.F
:param atomicNumber: atomic number of an atom
:type atomicNumber : int
:param convention: "gamess" or "nwchem" table
:type convention : str
:return: number of core orbitals for atom
:rtype : int
"""
#NWChem version 6.5
nwchemCoreOrbitals = [0, # zero index place holder
0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
5, 5, 5, 5, 5, 5, 5, 5,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,
27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,
27,27,27,27,27,27,27,27,27,27,27,27,27,27,43,43,43,43,
43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,
43,43,43,43,]
# GAMESS version 12
gamessCoreOrbitals = [0, # zero index place holder
0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
5, 5, 5, 5, 5, 5, 5, 5,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,14,14,14,14,14,14,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,
39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,
34,34,34,34,34,34,34,34,34,34,39,39,39,39,39,39,
43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,50]
cmap = {"gamess" : gamessCoreOrbitals, "nwchem" : nwchemCoreOrbitals}
try:
nCoreOrbitals = cmap[convention.lower()]
except KeyError:
raise ValueError("Uknown core orbital convention " + repr(convention))
if atomicNumber <= len(nCoreOrbitals):
n = nCoreOrbitals[atomicNumber]
else:
n = 0
return n
def sum_core_orbitals(self, convention="gamess"):
"""Sum the total number of frozen core orbitals in a system.
nFrozen isn't consistently logged to RTDB by
Tensor Contraction Engine methods, so do the work ourselves.
:param convention: orbital freeze convention, "gamess" or "nwchem"
:type convention : str
:return: sum of frozen core orbitals
:rtype : int
"""
total = sum([self.atom_core_orbitals(self.atomic_number(a),
convention=convention)
for a in self.atoms])
return total
class G4_mp2(Gn_common):
"""
G4(MP2) composite method for Python under NWChem 6.5
Implementation by Matt B. Ernst and Daniel R. Haney
7/18/2015
Gaussian-4 theory using reduced order perturbation theory
Larry A. Curtiss,Paul C. Redfern,Krishnan Raghavachari
THE JOURNAL OF CHEMICAL PHYSICS 127, 124105 2007
1 optimize @ B3LYP/6-31G(2df,p)
2 Ezpe = zpe at B3LYP/6-31G(2df,p)
3 E(MP2) = MP2(fc)/6-31G(d)
4 E(ccsd(t)) = CCSD(fc,T)/6-31G(d)
5 E(HF/G3LXP) = HF/G3LargeXP
6 E(G3LargeXP) = MP2(fc)/G3LargeXP
7 E(HF1) = HF/g4mp2-aug-cc-pVTZ
8 E(HF2) = HF/g4mp2-aug-cc-pVQZ
E(HFlimit) = extrapolated HF limit, =CBS
delta(HF) = E(HFlimit) - E(HF/G3LargeXP)
E(SO) = spin orbit energy
Ehlc = High Level Correction
E(G4(MP2)) = E(CCSD(T)) +
E(G3LargeXP) - E(MP2) +
Delta(HFlimit) +
E(SO) +
E(HLC) +
Ezpe * scale_factor
"""
def __init__(self, *args, **kw):
super(G4_mp2, self).__init__(*args, **kw)
self.correlated_basis = [("6-31G*", "cartesian"),
("g3mp2largexp", "spherical")]
self.cbs_basis = [("g4mp2-aug-cc-pvtz", "spherical"),
("g4mp2-aug-cc-pvqz", "spherical")]
#Zero Point Energy scale factor for B3LYP/6-31G(2df,p)
self.ZPEScaleFactor = 0.9854 # Curtiss scale factor for Gaussian 09
#self.ZPEScaleFactor = 0.9798 # Truhlar scale Factor for NWChem 6.5
self.Ezpe = 0.0
self.Emp2 = 0.0
self.Eccsdt = 0.0
self.Ehfg3lxp = 0.0
self.Emp2g3lxp = 0.0
self.Ehf1 = 0.0
self.Ehf2 = 0.0
self.Ecbs = 0.0
self.Ehlc = 0.0
self.Ethermal = 0.0
self.Hthermal = 0.0
self.E0 = 0.0
self.E298 = 0.0
self.H298 = 0.0
# valence electron variables
self.nAlpha = 0
self.nBeta = 0
self.nFrozen = 0
def report_summary(self):
"""Report results in GAMESS G3(MP2) output format for easy comparison.
"""
Szpe = self.Ezpe * self.ZPEScaleFactor
dMP2 = self.Emp2g3lxp - self.Emp2
dHF = self.Ecbs - self.Ehfg3lxp
summary = [
("\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~NWChem6.5"),
(" SUMMARY OF G4(MP2) CALCULATIONS"),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"),
(" B3LYP/6-31G(2df,p)= % 12.6f HF/maug-cc-p(T+d)Z= % 12.6f" % (self.Eb3lyp, self.Ehf1)),
(" HF/CBS = % 12.6f HF/maug-cc-p(Q+d)Z= % 12.6f" % (self.Ecbs, self.Ehf2)),
(" MP2/6-31G(d) = % 12.6f CCSD(T)/6-31G(d) = % 12.6f" % (self.Emp2, self.Eccsdt)),
(" HF/G3MP2LARGEXP = % 12.6f MP2/G3MP2LARGEXP = % 12.6f" % (self.Ehfg3lxp, self.Emp2g3lxp)),
(" DE(MP2) = % 12.6f DE(HF) = % 12.6f" % (dMP2, dHF)),
(" ZPE(B3LYP) = % 12.6f ZPE SCALE FACTOR = % 12.6f" % (Szpe, self.ZPEScaleFactor)),
(" HLC = % 12.6f FREE ENERGY = % 12.6f" % (self.Ehlc, 0.0)),
(" THERMAL ENERGY = % 12.6f THERMAL ENTHALPY = % 12.6f" % (self.Ethermal, self.Hthermal)),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"),
(" E(G4(MP2)) @ 0K = % 12.6f E(G4(MP2)) @298K = % 12.6f" % (self.E0, self.E298)),
(" H(G4(MP2)) = % 12.6f G(G4(MP2)) = % 12.6f" % (self.H298, 0.0)),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"),
]
for line in summary:
self.report(line)
def report_all(self):
self.report_summary ()
self.report_dHf ()
def E0_atom(self, elementNum):
"""List of precalculated atomic G4(MP2) energies at 0K
Returns E(0K), E(298.15K) tuple.
:param elementNum: atomic number
:type elementNum : int
:return: energy at 0 K and 298 K
:rtype : tuple
"""
e0_g4mp2 = [ 0.0 , # 00 zero index place holder
-0.502094 , # 01 H Hydrogen
-2.892437 , # 02 He Helium
-7.434837 , # 03 Li Lithium
-14.618701 , # 04 Be Beryllium
-24.610037 , # 05 B Boron
-37.794204 , # 06 C Carbon
-54.532825 , # 07 N Nitrogen
-75.002483 , # 08 O Oxygen
-99.659686 , # 09 F Fluorine
-128.854769 , # 10 Ne Neon
-161.860999 , # 11 Na Sodium
-199.646948 , # 12 Mg Magnesium
-241.944728 , # 13 Al Aluminum
-288.947800 , # 14 Si Silicon
-340.837016 , # 15 P Phosphorus
-397.676523 , # 16 S Sulfur
-459.703691 , # 17 Cl Chlorine
-527.083295 , # 18 Ar Argon
-599.166975 , # 19 K Potassium
-676.784184 , # 20 Ca Calcium
0.0,0.0,0.0, # 21-23 transition metals
0.0,0.0,0.0, # 24-26 transition metals
0.0,0.0,0.0, # 27-29 transition metals
0.0, # 30 transition metals
-1923.601298 , # 31 Ga Gallium
-2075.700329 , # 32 Ge Germanium
-2234.578295 , # 33 As Arsenic
-2400.243694 , # 34 Se Selenium
-2572.850476 , # 35 Br Bromine
-2752.487773 , # 36 Kr Krypton
]
# Ideal gas kinetic energy contribution
eth = (5.0/2) * kT_298_perMol
try:
e0 = e0_g4mp2[elementNum]
e298 = e0 + eth
result = (e0, e298)
except IndexError:
result = (0.0, 0.0)
return result
def calc_deltaHf(self):
"""Calculate heat of formation at 0K and 298K.
"""
sum_atoms_E0 = 0.0
sum_atoms_E298 = 0.0
sum_atoms_dhf0 = 0.0
sum_atoms_dhf298 = 0.0
if self.is_molecule():
for atom in self.atoms:
e0, e298 = self.E0_atom(self.atomic_number(atom))
sum_atoms_E0 += e0
sum_atoms_E298 += e298
d0, d298 = self.atomic_DHF(self.atomic_number(atom))
sum_atoms_dhf0 += d0
sum_atoms_dhf298 += d298
self.debug('sumDHF0,sumDHF298 = %.2f,%.2f' % (sum_atoms_dhf0, sum_atoms_dhf298))
else:
return False
self.dhf0 = (self.E0 - sum_atoms_E0) * kCalPerHartree + sum_atoms_dhf0
self.dhf298 = (self.H298 - sum_atoms_E298) * kCalPerHartree + sum_atoms_dhf298
self.debug('dhf0,dhf298 = %.2f,%.2f' % (self.dhf0, self.dhf298))
def init_g4mp2(self):
"""Say hello.
"""
title = nwchem.rtdb_get("title")
self.say(" %s -- NWChem G4(MP2) Composite Method\n" % (title))
def prepare_scf_vectors(self):
"""Set up converged scf vectors before first correlated steps so they
can be used to initialize later calculations.
"""
self.basis_prepare(self.correlated_basis[0][0],
output=self.correlated_basis[0][0],
coordinates=self.correlated_basis[0][1])
self.send_nwchem_cmd(self.build_SCF_cmd())
nwchem.task_energy("scf")
def optimize(self):
"""# 1 optimize B3LYP/6-31G(2df,p)
"""
self.say('optimize.')
self.send_nwchem_cmd("basis noprint ; * library 6-31G(2df,p) ; end")
scfcmd = self.build_SCF_cmd()
self.send_nwchem_cmd(scfcmd)
# canonical B3LYP spec
# GAMESS-US b3lyp uses VWN_5
# Gaussian uses VWN_3
# NWChem uses something entirely different
b3lyp_GAMESS = 'xc HFexch 0.2 slater 0.8 becke88 nonlocal 0.72 vwn_5 0.19 lyp 0.81'
b3lyp_Gaussian = 'xc HFexch 0.2 slater 0.8 becke88 nonlocal 0.72 vwn_3 0.19 lyp 0.81'
b3lyp_NWChem = 'xc b3lyp'
blips = b3lyp_Gaussian
memory_cache_words = self.integral_memory_cache / 8
disk_cache_words = self.integral_disk_cache / 8
mem = "semidirect memsize {0} filesize {1}".format(memory_cache_words,
disk_cache_words)
if self.multiplicity != "singlet":
self.send_nwchem_cmd('dft ; odft ; mult %d ; %s ; %s ; end' % (self.multiplicity_numeric, blips, mem))
else:
self.send_nwchem_cmd('dft ; %s ; %s ; end' % (blips, mem))
# fetch and copy atom names list (tags) which enumerates atoms.
# only available _after_ SCF statement
self.initialize_atoms_list()
self.send_nwchem_cmd("driver; maxiter 99; xyz {0}; end".format(self.geohash))
self.send_nwchem_cmd("scf; maxiter 99; end")
# optimize the geometry, ignore energy and gradient results
if self.is_atom():
en = nwchem.task_energy("dft")
else:
self.debug("task_optimize(dft)")
en, grad = nwchem.task_optimize("dft")
self.Eb3lyp = en
#self.report("debug: HF/6-31G(2df,p) SCF:energy = %f Ha" % (en))
def E_zpe(self):
"""Run hessian on equilibrium geometry, get zero point energy.
n
Note: linear tri-atomics and larger give high ZPE values in
NWChem @ HF/6-31G*
"""
self.say('ZPE.')
temperature = T298
if self.is_atom():
self.Ezpe = 0.0
self.Ethermal = 1.5 * Rgas * temperature # 3/2 * RT
self.Hthermal = self.Ethermal + (Rgas * temperature)
return False
# run hessian on equilibrium geometry
# ignore ZPE, calculate it from vibrations list
zpe, vibs, intens = nwchem.task_freq("dft")
self.vib_thermo(vibs)
def E_mp2(self):
"""Calculate the MP2 energy at the B3LYP-optimized geometry.
# 3 E_(MP2) = MP2(fc)/6-31G(d)//B3LYP/6-31G(2df,p)
:return: failure code (True for failure, False for success)
:rtype : bool
"""
self.say('MP2(fc).')
self.basis_prepare(self.correlated_basis[0][0],
input=self.correlated_basis[0][0],
coordinates=self.correlated_basis[0][1])
scfcmd = self.build_SCF_cmd()
self.send_nwchem_cmd(scfcmd)
self.send_nwchem_cmd("unset mp2:*")
self.send_nwchem_cmd("mp2 ; freeze atomic ; end")
try:
en = nwchem.task_energy("mp2")
self.debug('MP2 frozen: en=%.6f\n' % en)
except:
self.report("FAILED: MP2(fc)/6-31G(2df,p) energy")
return True
else:
self.Emp2 = en
return False
def E_ccsdt(self):
"""# 4 E_(ccsd(t)) = CCSD(fc,T)/6-31G(d)
Get CCSDT(fc)/6-31G(d) energy
"""
self.say("CCSD(T).")
if self.multiplicity != "singlet" or self.is_atom():
self.send_nwchem_cmd("unset tce:*")
self.send_nwchem_cmd("tce ; ccsd(t) ; freeze atomic ; end")
en = nwchem.task_energy("tce")
else:
self.send_nwchem_cmd("ccsd ; freeze atomic ; end")
en = nwchem.task_energy("ccsd(t)")
self.debug('CCSD(T): en=%.6f\n' % en)
self.Eccsdt = en
def E_hf_g3lxp(self):
"""# 5 E_(HF/G3LXP) = HF/G3LargeXP
"""
self.basis_prepare(self.correlated_basis[1][0],
input=self.correlated_basis[0][0],
output=self.correlated_basis[1][0],
coordinates=self.correlated_basis[1][1])
en = nwchem.task_energy("scf")
self.Ehfg3lxp = en
self.debug("HF/G3LargeXP SCF:energy = %f Ha" % (en))
def E_mp2_g3lxp(self):
"""# 5 E_(HF/G3LXP) = MP2fc/G3LargeXP
"""
self.basis_prepare(self.correlated_basis[1][0],
coordinates=self.correlated_basis[1][1])
self.send_nwchem_cmd("unset mp2:*")
self.send_nwchem_cmd("mp2 ; freeze atomic ; end")
en = nwchem.task_energy("mp2")
self.debug('MP(2,fc)/g3mp2large: en=%.6f\n' % en)
self.Emp2g3lxp = en
def E_hf1(self):
"""Use g4mp2-aug-cc-pvtz basis set to get first HF energy.
"""
self.say("HF1.")
self.basis_prepare(self.cbs_basis[0][0],
input=self.correlated_basis[0][0],
output=self.cbs_basis[0][0],
coordinates=self.cbs_basis[0][1])
en = nwchem.task_energy("scf")
self.Ehf1 = en
self.debug("HF/%s: energy = %f Ha" % (self.cbs_basis[0][0], en))
def E_hf2(self):
"""Use g4mp2-aug-cc-pvqz to get second
HF energy.
:return: failure code (True for failure, False for success)
:rtype : bool
"""
self.say("HF2.")
self.basis_prepare(self.cbs_basis[1][0],
input=self.cbs_basis[0][0],
output=self.cbs_basis[1][0],
coordinates=self.cbs_basis[1][1])
en = nwchem.task_energy("scf")
self.Ehf2 = en
self.debug("HF/%s: energy = %f Ha" % (self.cbs_basis[0][0], en))
def E_cbs(self):
"""E_(HFlimit) = extrapolated HF limit
:return: failure code (True for failure, False for success)
:rtype : bool
"""
self.say('CBS.')
use_Petersen = True # use Petersen CBS extrapolation
#TODO: why abs() here?
if abs(self.Ehf1) > abs(self.Ehf2):
self.Ehf1, self.Ehf2 = self.Ehf2, self.Ehf1
if use_Petersen: # petersen CBS extrapolation
a = -1.63
cbs = (self.Ehf2 - self.Ehf1 * math.exp(a)) / (1 - math.exp(a))
else: # Truhlar CBS extrapolation
a = 3.4
k1 = math.pow(3,a) / (math.pow(3,a) - math.pow(2,a))
k2 = math.pow(2,a) / (math.pow(3,a) - math.pow(2,a))
cbs = k1 * self.Ehf2 - k2 * self.Ehf1
self.Ecbs = cbs
self.debug('CBS energy = %.6f' % cbs)
return False
def E_hlc(self):
"""E_hlc = High Level Correction
"""
# Correction coefficients in milli Hartrees
# closed shell
A = 0.009472
# open shell
Ap = 0.009769
B = 0.003179
# atoms and atom ions
C = 0.009741
D = 0.002115
# single electron pair species
# e.g.: Li2, Na2, LiNa, BeH2, BeH+
E = 0.002379
self.say('HLC.')
nClosed = nwchem.rtdb_get("scf:nclosed")
nOpen = nwchem.rtdb_get("scf:nopen")
nElec = nwchem.rtdb_get("scf:nelec")
self.nFrozen = self.sum_core_orbitals()
# According to Curtiss,
# nBeta = num valence pairs
# nAlpha = num unpaired or remaining valence electrons
# subject to the constraint that nAlpha >= nBeta
self.nBeta = nClosed - self.nFrozen
self.nAlpha = nElec - self.nFrozen - nClosed
self.debug('\nclosed=%d open=%d frozen=%d nAlpha=%d nBeta=%d\n' % \
(nClosed, nOpen, self.nFrozen, self.nAlpha, self.nBeta))
if self.nAlpha < self.nBeta:
self.nAlpha, self.nBeta = self.nBeta, self.nAlpha
#self.say('** a<->b swap: nAlpha=%d nBeta=%d\n' % (self.nAlpha,self.nBeta))
# test for single (valence) electron pair species
if (nOpen == 0) and (nClosed - self.nFrozen) == 1 and \
(self.nAlpha == 1) and (self.nBeta == 1):
hlc = E
elif self.is_atom():
hlc = -C * (self.nBeta) - D * (self.nAlpha - self.nBeta)
elif self.multiplicity != "singlet":
hlc = -Ap * (self.nBeta) - B * (self.nAlpha - self.nBeta)
else: # USUAL CASE: singlet, closed shell
hlc = -A * (self.nBeta)
self.Ehlc = hlc
def E_g4mp2(self):
"""
E_(G3(MP2)) = E_(CCSD(T)) +
E_(G3LargeXP) - E_(MP2) +
Delta(HFlimit) +
E_(SO) +
E_(HLC) +
E_zpe * scale_factor
:return: failure code (True for failure, False for success)
:rtype : bool
"""
scaled_ZPE = self.Ezpe * self.ZPEScaleFactor
self.E0 = self.Eccsdt + \
(self.Emp2g3lxp - self.Emp2) + \
(self.Ecbs - self.Ehfg3lxp) + \
self.spin_orbit_energy() + \
self.Ehlc + \
scaled_ZPE
self.E298 = self.E0 + (self.Ethermal - self.Ezpe)
self.H298 = self.E0 + (self.Hthermal - self.Ezpe)
return False
def run(self):
"""Calculate G4MP2 energy for a system that has already been prepared.
"""
self.Edone = False
g4mp2_function = [
self.quick_optimize,
self.optimize,
self.E_zpe,
self.prepare_scf_vectors,
self.E_hf_g3lxp,
self.E_hf1,
self.E_hf2,
self.reset_symmetry,
self.E_mp2,
self.E_ccsdt,
self.E_mp2_g3lxp,
self.E_cbs,
self.E_hlc,
self.spin_orbit_energy,
self.E_g4mp2,
self.calc_deltaHf
]
self.init_g4mp2()
t0=time.time()
for i in range(len(g4mp2_function)):
g4mp2_function[i]()
et=time.time()-t0
self.report("\nWall: %.2f seconds" % et)
self.report_all()
class G3_mp2(Gn_common):
"""
G3(MP2) THERMOCHEMICAL METHOD FOR NWCHEM
Daniel R. Haney 2015
g3mp2.py implements the G3(MP2) composite thermochemical method
in Python 2.7 for inclusion in an NWChem6.5 input file.
It requires:
NWChem6.5 Zero Point Energy fix
g3mp2large basis set
revised 6-31gs basis set, if 3rd row element energies are desired
The original G3(MP2) method is described in:
"Gaussian-3 theory using reduced Moller-Plesset order"
Curtiss,Redfern,Raghavachari,Rassolov, and Pople
J.Chem.Phys. 110 4703 (1999)
As in the GAMESS ab initio package, it has been modified to
permit CCSD(T) instead of QCISD(T) in the base energy term.
This results in slightly higher total energies than reported by
Curtiss in the G2/97 test set, faster run times, and lower mean
average deviations in isodesmic reaction energy calculations.
"""
def __init__(self, *args, **kw):
if kw.pop("use_qcisdt", False):
self.highest_correlated = "qcisd(t)"
else:
self.highest_correlated = "ccsd(t)"
super(G3_mp2, self).__init__(*args, **kw)
self.ZPEScaleFactor = 0.8929
self.Ezpe = 0.0
self.Emp2full = 0.0
self.Emp2frozen = 0.0
self.Ecc = 0.0
self.Eg3mp2large = 0.0
self.Ehlc = 0.0
self.Ethermal = 0.0
self.Hthermal = 0.0
self.E0 = 0.0
self.E298 = 0.0
self.H298 = 0.0
# valence electron variables
self.nAlpha = 0
self.nBeta = 0
self.nFrozen = 0
def HF_optimize(self):
'''
HF/6-31G(d) optimization
'''
self.say('optimize.')
self.send_nwchem_cmd("scf; maxiter 99; end")
self.send_nwchem_cmd("driver; maxiter 99; xyz {0}; end".format(self.geohash))
self.basis_prepare("6-31G*", output="6-31G*", coordinates="cartesian")
scfcmd = self.build_SCF_cmd()
self.send_nwchem_cmd(scfcmd)
# fetch and copy atom names list (tags) which enumerates atoms.
# only available _after_ SCF statement
self.initialize_atoms_list()
# optimize the geometry, ignore energy and gradient results
if self.is_atom():
en = nwchem.task_energy("scf")
else:
en, grad = nwchem.task_optimize("scf")
def HF_zpe(self):
''' run hessian on equilibrium geometry
get zero point energy.
n
note: linear tri-atomics and larger
give high ZPE values in NWChem @ HF/6-31G*
'''
temperature = T298
self.say("zpe.")
if self.is_atom():
self.Ezpe = 0.0
self.Ethermal = 1.5 * Rgas * temperature
self.Hthermal = self.Ethermal + (Rgas * temperature)
return
# run hessian on equilibrium geometry
# ignore ZPE, calculate it from vibrations list
zpe, vibs, intens = nwchem.task_freq("scf")
self.vib_thermo(vibs)
def MP2_optimize(self):
'''Optimize geometry at MP2(full)/6-31G(d)
'''
self.say('MP2 optimize.')
if self.is_atom():
en = nwchem.task_energy("mp2")
else:
en, grad = nwchem.task_optimize("mp2")
self.debug('optimize: MP(2,full)/6-31G*= %.6f\n' % en)
self.Emp2full = en
def MP2_frozen(self):
# MP2(fc)/6-31G* single point energy
self.say('MP2(frozen).')
scfcmd = self.build_SCF_cmd()
self.send_nwchem_cmd(scfcmd)
self.send_nwchem_cmd("unset mp2:*")
self.send_nwchem_cmd("mp2 ; freeze atomic ; end")
en = nwchem.task_energy("mp2")
self.debug('MP2 frozen: en=%.6f\n' % en)
self.Emp2frozen = en
def ccsdt_qcisdt_frozen(self):
#highest level correlated calculation
if self.highest_correlated == "qcisd(t)":
self.say("QCISD(T).")
tce = "tce ; qcisd(t) ; freeze atomic ; end"
nwchem.input_parse(tce)
en = nwchem.task_energy("tce")
self.Ecc = en
elif self.highest_correlated == "ccsd(t)":
self.say("CCSD(T).")
if self.multiplicity != "singlet" or self.is_atom():
self.send_nwchem_cmd("unset tce:*")
self.send_nwchem_cmd("tce ; ccsd(t) ; freeze atomic ; end")
en = nwchem.task_energy("tce")
else:
self.send_nwchem_cmd("ccsd ; freeze atomic ; end")
en = nwchem.task_energy("ccsd(t)")
self.debug(' CCSD(T): en=%.6f\n' % en)
self.Ecc = en
else:
raise ValueError("Unknown correlation treatment {}".format(repr(self.highest_correlated)))
def MP2_g3mp2large(self):
'''get MP2(fc)/G3MP2large single point energy
'''
self.say('GMP2large.')
self.basis_prepare("g3mp2large", input="6-31G*", coordinates="spherical")
self.send_nwchem_cmd("unset mp2:*")
self.send_nwchem_cmd("mp2; freeze atomic; end")
en = nwchem.task_energy("mp2")
self.debug(' g3mp2large: en=%.6f\n' % en)
self.Eg3mp2large = en
def HLC_generic(self, A, B, C, D):
'''calculate High Level Correction term
from alpha and beta VALENCE electron count.
'''
self.say('HLC.')
nClosed = nwchem.rtdb_get("scf:nclosed")
nOpen = nwchem.rtdb_get("scf:nopen")
nElec = nwchem.rtdb_get("scf:nelec")
nFrozen = self.sum_core_orbitals()
# According to Curtiss,
# nBeta = num valence pairs
# nAlpha = num unpaired or remaining valence electrons
# subject to the constraint that nAlpha >= nBeta
nBeta = nClosed - nFrozen
nAlpha = nElec - (nFrozen * 2) - nBeta
self.debug('\nclosed=%d open=%d frozen=%d nAlpha=%d nBeta=%d\n' %
(nClosed,nOpen,nFrozen,nAlpha,nBeta))
if nAlpha < nBeta:
nAlpha, nBeta = nBeta, nAlpha
if self.is_molecule():
self.Ehlc = -(A * nBeta) - B * (nAlpha - nBeta)
else: # it's an atom, calc is different
self.Ehlc = -(C * nBeta) - D * (nAlpha - nBeta)
def HLC_qcisdt(self):
''' empirical correction coefficients
for Curtiss original G3(MP2) method
'''
A = 0.009279
B = 0.004471
C = 0.009345
D = 0.002021
return self.HLC_generic(A, B, C, D)
def HLC_ccsdt(self):
''' empirical correction coefficients
for later G3(MP2,CCSDT) method
'''
A = 0.009170
B = 0.004455
C = 0.009155
D = 0.001947
return self.HLC_generic(A, B, C, D)
def choose_correlated(self, key, map):
#return setting based on correlation scheme, alert on mismatch
try:
return map[key]
except KeyError:
raise ValueError("Unknown correlation treatment {}".format(repr(key)))
def HLC(self):
hlc = self.choose_correlated(self.highest_correlated,
{"qcisd(t)" : self.HLC_qcisdt,
"ccsd(t)" : self.HLC_ccsdt})
v = hlc()
return v
def init_g3mp2(self):
'''say hello
'''
ccstring = self.choose_correlated(self.highest_correlated,
{"qcisd(t)" : "QCISD(T)",
"ccsd(t)" : "CCSD(T)"})
title = nwchem.rtdb_get("title")
self.say(" %s -- NWChem G3(MP2,%s) Composite Method\n" % (title, ccstring))
def calc_total_energies(self):
'''G3MP2 step 6
calculate the E(G3MP2)@0K, @298K energies
'''
self.E0 = self.Ecc + \
(self.Eg3mp2large - self.Emp2frozen) + \
(self.Ezpe * self.ZPEScaleFactor) + \
self.Ehlc
if self.is_atom():
self.E0 += self.spin_orbit_energy()
self.E298 = self.E0 + (self.Ethermal - self.Ezpe)
self.H298 = self.E0 + (self.Hthermal - self.Ezpe) # kT_298_perMol
def E0_atom_qcisdt(self, elementNum):
e0_qcisdt = [
0.0, # 00 zero index place holder
-0.501839, # 01 H Hydrogen
-2.902543, # 02 He Helium
-7.434048, # 03 Li Lithium
-14.629262, # 04 Be Beryllium
-24.607093, # 05 B Boron
-37.789349, # 06 C Carbon
-54.525198, # 07 N Nitrogen
-74.989850, # 08 O Oxygen
-99.641120, # 09 F Fluorine
-128.828970, # 10 Ne Neon
-161.848004, # 11 Na Sodium
-199.650845, # 12 Mg Magnesium
-241.936973, # 13 Al Aluminum
-288.939460, # 14 Si Silicon
-340.826670, # 15 P Phosphorus
-397.663794, # 16 S Sulfur
-459.687272, # 17 Cl Chlorine
-527.060963, # 18 Ar Argon
-599.160512, # 19 K Potassium
-676.789424, # 20 Ca Calcium
# elements 21-30 Sc-Zn
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
-1923.538354, # 31 Ga Gallium
-2075.639002, # 32 Ge Germanium
-2234.516874, # 33 As Arsenic
-2400.180197, # 34 Se Selenium
-2572.784022, # 35 Br Bromine
-2752.417979 # 36 Kr Krypton
]
# Ideal gas kinetic energy contribution
Ethermal = (5.0 / 2) * kT_298_perMol
if elementNum < len(e0_qcisdt) and e0_qcisdt[elementNum] < 0.0:
e0 = e0_qcisdt[elementNum]
e298 = e0 + Ethermal
return (e0, e298)
else:
return (0.0, 0.0)
def E0_atom_ccsdt(self, elementNum):
e0_ccsdt = [
0.0, # 00 zero index place holder
-0.501765, # 01 H Hydrogen
-2.902353, # 02 He Helium
-7.433974, # 03 Li Lithium
-14.629072, # 04 Be Beryllium
-24.606789, # 05 B Boron
-37.788989, # 06 C Carbon
-54.524779, # 07 N Nitrogen
-74.989201, # 08 O Oxygen
-99.640198, # 09 F Fluorine
-128.827752, # 10 Ne Neon
-161.847930, # 11 Na Sodium
-199.650655, # 12 Mg Magnesium
-241.936660, # 13 Al Aluminum
-288.939067, # 14 Si Silicon
-340.826225, # 15 P Phosphorus
-397.663215, # 16 S Sulfur
-459.686583, # 17 Cl Chlorine
-527.060194, # 18 Ar Argon
-599.160438, # 19 K Potassium
-676.789234, # 20 Ca Calcium
# elements 21-30 Sc-Zn
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
-1923.536619, # 31 Ga
-2075.637891, # 32 Ge
-2234.516031, # 33 As
-2400.179359, # 34 Se
-2572.783154, # 35 Br
-2752.417075 # 36 Kr
]
# Ideal gas kinetic energy contribution
eth = (5.0 / 2) * kT_298_perMol
if elementNum < len(e0_ccsdt) and e0_ccsdt[elementNum] < 0.0:
e0 = e0_ccsdt[elementNum]
e298 = e0 + eth
return (e0, e298)
else:
return (0.0, 0.0)
#_______________________________________________________
def E0_atom(self, elementNum):
fn = self.choose_correlated(self.highest_correlated,
{"qcisd(t)" : self.E0_atom_qcisdt,
"ccsd(t)" : self.E0_atom_ccsdt})
return fn(elementNum)
def calc_deltaHf(self):
"""calculate heat of formation at 0K and 298K
"""
sum_atoms_E0 = 0.0
sum_atoms_H298 = 0.0
sum_atoms_dhf0 = 0.0
sum_atoms_dhf298 = 0.0
for atom in self.atoms:
e0, h298 = self.E0_atom(self.atomic_number(atom))
sum_atoms_E0 += e0
sum_atoms_H298 += h298
d0, d298 = self.atomic_DHF(self.atomic_number(atom))
sum_atoms_dhf0 += d0
sum_atoms_dhf298 += d298
self.debug('sumDHF0,sumDHF298 = %.2f,%.2f' %
(sum_atoms_dhf0, sum_atoms_dhf298))
self.dhf0 = (self.E0 - sum_atoms_E0) * kCalPerHartree + sum_atoms_dhf0
self.dhf298 = (self.H298 - sum_atoms_H298) * kCalPerHartree + sum_atoms_dhf298
self.debug('dhf0,dhf298 = %.2f,%.2f' % (self.dhf0, self.dhf298))
def report_summary(self):
'''Report results in GAMESS G3(MP2) output format
for easy comparison.
log() normally redirects to log file
say() appears in terminal session
'''
Szpe = self.Ezpe * self.ZPEScaleFactor
deltaMP2 = self.Eg3mp2large - self.Emp2frozen
ccstring = self.choose_correlated(self.highest_correlated,
{"qcisd(t)" : "QCISDT ",
"ccsd(t)" : "CCSD(T)"})
summary = [
("\n"),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~NWChem6.5"),
(" SUMMARY OF G3(MP2,%s) CALCULATIONS " %
ccstring),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"),
(" MP2/6-31G(d) = % 12.6f %s/6-31G(d) = % 12.6f" %
(self.Emp2frozen, ccstring, self.Ecc)),
(" MP2/G3MP2large = % 12.6f delta(MP2) = % 12.6f" %
(self.Eg3mp2large, deltaMP2)),
(" ZPE(HF/6-31G(d))= % 12.6f ZPE Scale Factor = % 12.6f" %
(Szpe, self.ZPEScaleFactor)),
(" HLC = % 12.6f Free Energy = % 12.6f" %
(self.Ehlc, 0.0)),
(" Thermal Energy = % 12.6f Thermal Enthalpy = % 12.6f" %
(self.Ethermal, self.Hthermal)),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"),
(" E(G3(MP2)) @ 0K = % 12.6f E(G3(MP2)) @298K = % 12.6f" %
(self.E0, self.E298)),
(" H(G3(MP2)) = % 12.6f G(G3(MP2)) = % 12.6f" %
(self.H298, 0.0)),
(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
]
for line in summary:
self.report(line)
def reportAll(self):
self.report_summary()
self.report_dHf()
def run(self):
g3mp2_function = [
self.quick_optimize,
self.HF_optimize,
self.HF_zpe,
self.reset_symmetry,
self.MP2_optimize,
self.MP2_frozen,
self.ccsdt_qcisdt_frozen,
self.MP2_g3mp2large,
self.HLC,
self.calc_total_energies,
self.calc_deltaHf
]
self.init_g3mp2()
t0=time.time()
for i in range(len(g3mp2_function)):
g3mp2_function[i]()
et=time.time()-t0
self.report("\nWall: %.2f seconds" % et)
self.reportAll()
|
mattbernst/composite-thermochemistry-nwchem
|
Gn.py
|
Python
|
mit
| 56,608
|
[
"Avogadro",
"GAMESS",
"Gaussian",
"NWChem"
] |
56e1e834dd8669b19b2caed4ed252d064d9a9ae998ec257a068abdd3c5dfe549
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
kashif/scikit-learn
|
sklearn/metrics/pairwise.py
|
Python
|
bsd-3-clause
| 45,532
|
[
"Gaussian"
] |
e0473eda66378cb1852459324d0138c683d4ec1620cb1db3a6021c677e26dbc1
|
"""
Upload class
"""
from galaxy import jobs, util, datatypes, web
import common
import logging, urllib
log = logging.getLogger( __name__ )
class ASync(common.Root):
@web.expose
def default(self, trans, tool_id=None, data_id=None, **kwd):
"""Catches the tool id and redirects as needed"""
return self.index( trans, tool_id=tool_id, data_id=data_id, **kwd)
@web.expose
def index(self, trans, tool_id=None, **kwd):
"""Manages ascynchronous connections"""
if tool_id is None:
return "tool_id argument is required"
#log.debug('async params -> %s' % kwd)
# redirect to main when getting no parameters
if not kwd:
return trans.response.send_redirect( "/index" )
history = trans.get_history()
params = util.Params(kwd, safe=False)
STATUS = params.STATUS
URL = params.URL
data_id = params.data_id
log.debug('async dataid -> %s' % data_id)
trans.log_event( 'Async dataid -> %s' % data_id )
# initialize the tool
toolbox = self.get_toolbox()
tool = toolbox.tools_by_id.get(tool_id, '')
if not tool:
return "Tool with id %s not found" % tool_id
#
# we have an incoming data_id
#
if data_id:
if not URL:
return "No URL parameter was submitted for data %s" % data_id
data = trans.model.Dataset.get( data_id )
if not data:
return "Data %s does not exist or has already been deleted" % data_id
if STATUS == 'OK':
# push the job into the queue
data.state = data.blurb = data.states.RUNNING
log.debug('executing tool %s' % tool.id)
trans.log_event( 'Async executing tool %s' % tool.id )
params = dict(url=URL, dataid=data.id, output=data.file_name)
#tool.execute( app=self.app, history=history, incoming=params )
tool.execute( trans, incoming=params )
else:
log.debug('async error -> %s' % STATUS)
trans.log_event( 'Async error -> %s' % STATUS )
data.state = data.blurb = jobs.JOB_ERROR
data.info = "Error -> %s" % STATUS
trans.model.flush()
return "Data %s with status %s received. OK" % (data_id, STATUS)
#
# no data_id must be parameter submission
#
if not data_id and len(params)>3:
if params.galaxyFileFormat == 'wig':
GALAXY_TYPE = 'wig'
else:
GALAXY_TYPE = params.GALAXY_TYPE or 'interval'
GALAXY_NAME = params.GALAXY_NAME or '%s query' % tool.name
GALAXY_INFO = params.GALAXY_INFO or params.galaxyDescription or ''
GALAXY_BUILD = params.GALAXY_BUILD or params.galaxyFreeze or 'hg17'
#data = datatypes.factory(ext=GALAXY_TYPE)()
#data.ext = GALAXY_TYPE
#data.name = GALAXY_NAME
#data.info = GALAXY_INFO
#data.dbkey = GALAXY_BUILD
#data.state = jobs.JOB_OK
#history.datasets.add_dataset( data )
data = trans.app.model.Dataset()
data.name = GALAXY_NAME
data.extension = GALAXY_TYPE
data.dbkey = GALAXY_BUILD
data.info = GALAXY_INFO
data.state = data.states.NEW
trans.history.add_dataset( data )
trans.model.flush()
try:
galaxy_url = trans.request.base + '/async/%s/%s' % ( tool_id, data.id )
params.update( { 'GALAXY_URL' :galaxy_url } )
url = tool.action + '?' + urllib.urlencode( params.flatten() )
log.debug("connecting to -> %s" % url)
trans.log_event( "Async connecting to -> %s" % url )
text = urllib.urlopen(url).read(-1)
text = text.strip()
if not text.endswith('OK'):
raise Exception, text
data.state = data.blurb = data.states.RUNNING
except Exception, e:
data.info = str(e)
data.state = data.blurb = data.states.ERROR
trans.model.flush()
return trans.fill_template('tool_executed.tmpl', out_data={}, tool=tool, config=self.app.config )
|
jmchilton/galaxy-central
|
galaxy/interfaces/async.py
|
Python
|
mit
| 4,565
|
[
"Galaxy"
] |
f78dee9bb475264fefda1af82b2854d6c2face9bce5ce0cb3d4881b996ae59a1
|
import os
import time
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
import mdtraj.reporters
import sys
code = "3DMV"
ff_name = "amber99sbnmr"
water_name = 'tip3p-fb'
which_forcefield = "%s.xml" % ff_name
which_water = '%s.xml' % water_name
platform_name = "CUDA"
timestep = 2.0 * u.femtoseconds
cutoff = 0.95 * u.nanometers
output_frequency = 5000
n_steps = 2500000
temperature = 300.
pressure = 1.0 * u.atmospheres
rank = int(sys.argv[1])
time.sleep(rank) # This makes sure that no two jobs run at the same time for RNG purpuses.
pdb_filename = "./equil_box/%s.pdb" % code
dcd_filename = "./equil_box2/%s_%d.dcd" % (code, rank)
log_filename = "./equil_box2/%s_%d.log" % (code, rank)
traj = mdtraj.load(pdb_filename)
top, bonds = traj.top.to_dataframe()
atom_indices = top.index[top.chainID == 0].values
pdb = app.PDBFile(pdb_filename)
topology = pdb.topology
positions = pdb.positions
ff = app.ForceField(which_forcefield, which_water)
platform = mm.Platform.getPlatformByName(platform_name)
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, timestep)
simulation = app.Simulation(topology, system, integrator, platform=platform)
simulation.context.setPositions(positions)
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(temperature)
print("Using platform %s" % simulation.context.getPlatform().getName())
simulation.reporters.append(mdtraj.reporters.DCDReporter(dcd_filename, output_frequency))
simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), output_frequency, step=True, time=True, speed=True))
simulation.step(n_steps)
|
kyleabeauchamp/fah-projects
|
old/equilibrate_boxes2.py
|
Python
|
gpl-2.0
| 1,762
|
[
"MDTraj",
"OpenMM"
] |
87524127cea5df88e9bfd17238deb88ffb0d8fb8ace8fb3128c98cc0a7fdb90f
|
# -*- coding: utf-8 -*-
"""
pysteps.nowcasts.anvil
======================
Implementation of the autoregressive nowcasting using VIL (ANVIL) nowcasting
method developed in :cite:`PCLH2020`. Compared to S-PROG, the main improvements
are using an autoregressive integrated (ARI) model and the option to use
vertically integrated liquid (VIL) as the input variable. Using the ARI model
avoids biasedness and loss of small-scale features in the forecast field, and
no statistical post-processing is needed. In addition, the model allows
localization of parameter estimates. It was shown in :cite:`PCLH2020` that due
to the above improvements, ANVIL produces more reliable deterministic nowcasts
than S-PROG.
.. autosummary::
:toctree: ../generated/
forecast
"""
import time
import numpy as np
from scipy.ndimage import gaussian_filter
from pysteps import cascade, extrapolation
from pysteps.nowcasts import utils as nowcast_utils
from pysteps.timeseries import autoregression
from pysteps import utils
try:
import dask
DASK_IMPORTED = True
except ImportError:
DASK_IMPORTED = False
def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (vil.shape[1], vil.shape[2]))
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("FFT: %s" % fft_method)
print("")
print("Parameters:")
print("-----------")
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("parallel threads: %d" % num_workers)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the ARI(p,1) model: %d" % ar_order)
if type(ar_window_radius) == int:
print("ARI(p,1) window radius: %d" % ar_window_radius)
else:
print("ARI(p,1) window radius: none")
print("R(VIL) window radius: %d" % r_vil_window_radius)
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
allow_nonfinite_values=True,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
r_f = []
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
if rainrate is not None:
r_f_prev = r_vil_a * vil[-1, :] + r_vil_b
else:
r_f_prev = vil[-1, :]
extrap_kwargs["return_displacement"] = True
dp = None
t_prev = 0.0
for t, subtimestep_idx in enumerate(timesteps):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in subtimestep_idx]
else:
subtimesteps = [t]
if (timestep_type == "list" and subtimesteps) or (
timestep_type == "int" and t > 0
):
is_nowcast_time_step = True
else:
is_nowcast_time_step = False
if is_nowcast_time_step:
print(
"Computing nowcast for time step %d... " % t,
end="",
flush=True,
)
if measure_time:
starttime = time.time()
# iterate the ARI models for each cascade level
for i in range(n_cascade_levels):
vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i])
# recompose the cascade to obtain the forecast field
vil_dec_dict = {}
vil_dec_dict["cascade_levels"] = vil_dec[:, -1, :]
vil_dec_dict["domain"] = "spatial"
vil_dec_dict["normalized"] = False
vil_f = recomp_method(vil_dec_dict)
vil_f[~mask] = np.nan
if rainrate is not None:
# convert VIL to rain rate
r_f_new = r_vil_a * vil_f + r_vil_b
else:
r_f_new = vil_f
if apply_rainrate_mask:
r_f_new[rainrate_mask] = 0.0
r_f_new[r_f_new < 0.0] = 0.0
# advect the recomposed field to obtain the forecast for the current
# time step (or subtimesteps if non-integer time steps are given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
r_f_ip = (
1.0 - t_diff_prev_int
) * r_f_prev + t_diff_prev_int * r_f_new
else:
r_f_ip = r_f_prev
t_diff_prev = t_sub - t_prev
extrap_kwargs["displacement_prev"] = dp
r_f_ep, dp = extrapolator(
r_f_ip,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
r_f.append(r_f_ep[0])
t_prev = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if not subtimesteps:
t_diff_prev = t + 1 - t_prev
extrap_kwargs["displacement_prev"] = dp
_, dp = extrapolator(
None,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
t_prev = t + 1
r_f_prev = r_f_new
if is_nowcast_time_step:
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if measure_time:
return np.stack(r_f), init_time, mainloop_time
else:
return np.stack(r_f)
def _check_inputs(vil, rainrate, velocity, timesteps, ar_order):
if vil.ndim != 3:
raise ValueError(
"vil.shape = %s, but a three-dimensional array expected" % str(vil.shape)
)
if rainrate is not None:
if rainrate.ndim != 2:
raise ValueError(
"rainrate.shape = %s, but a two-dimensional array expected"
% str(rainrate.shape)
)
if vil.shape[0] != ar_order + 2:
raise ValueError(
"vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"
% (vil.shape[0], ar_order + 2)
)
if velocity.ndim != 3:
raise ValueError(
"velocity.shape = %s, but a three-dimensional array expected"
% str(velocity.shape)
)
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
# optimized version of timeseries.autoregression.estimate_ar_params_yw_localized
# for an ARI(1,1) model
def _estimate_ar1_params(gamma):
phi = []
phi.append(1 + gamma[0, :])
phi.append(-gamma[0, :])
phi.append(np.zeros(gamma[0, :].shape))
return phi
# optimized version of timeseries.autoregression.estimate_ar_params_yw_localized
# for an ARI(2,1) model
def _estimate_ar2_params(gamma):
phi_diff = []
phi_diff.append(gamma[0, :] * (1 - gamma[1, :]) / (1 - gamma[0, :] * gamma[0, :]))
phi_diff.append(
(gamma[1, :] - gamma[0, :] * gamma[0, :]) / (1 - gamma[0, :] * gamma[0, :])
)
phi = []
phi.append(1 + phi_diff[0])
phi.append(-phi_diff[0] + phi_diff[1])
phi.append(-phi_diff[1])
phi.append(np.zeros(phi_diff[0].shape))
return phi
# Compute correlation coefficients of two 2d fields in a moving window with
# a Gaussian weight function. See Section II.G of PCLH2020. Differently to the
# standard formula for the Pearson correlation coefficient, the mean value of
# the inputs is assumed to be zero.
def _moving_window_corrcoef(x, y, window_radius):
mask = np.logical_and(np.isfinite(x), np.isfinite(y))
x = x.copy()
x[~mask] = 0.0
y = y.copy()
y[~mask] = 0.0
mask = mask.astype(float)
if window_radius is not None:
n = gaussian_filter(mask, window_radius, mode="constant")
ssx = gaussian_filter(x**2, window_radius, mode="constant")
ssy = gaussian_filter(y**2, window_radius, mode="constant")
sxy = gaussian_filter(x * y, window_radius, mode="constant")
else:
n = np.mean(mask)
ssx = np.mean(x**2)
ssy = np.mean(y**2)
sxy = np.mean(x * y)
stdx = np.sqrt(ssx / n)
stdy = np.sqrt(ssy / n)
cov = sxy / n
mask = np.logical_and(stdx > 1e-8, stdy > 1e-8)
mask = np.logical_and(mask, stdx * stdy > 1e-8)
mask = np.logical_and(mask, n > 1e-3)
corr = np.empty(x.shape)
corr[mask] = cov[mask] / (stdx[mask] * stdy[mask])
corr[~mask] = 0.0
return corr
# Determine the coefficients of the regression R=a*VIL+b.
# See Section II.G of PCLH2020.
# The parameters a and b are estimated in a localized fashion for each pixel
# in the input grid. This is done using a window specified by window_radius.
# Zero and non-finite values are not included. In addition, the regression is
# done by using a Gaussian weight function depending on the distance to the
# current grid point.
def _r_vil_regression(vil, r, window_radius):
vil = vil.copy()
vil[~np.isfinite(vil)] = 0.0
r = r.copy()
r[~np.isfinite(r)] = 0.0
mask_vil = vil > 10.0
mask_r = r > 0.1
mask_obs = np.logical_and(mask_vil, mask_r)
vil[~mask_obs] = 0.0
r[~mask_obs] = 0.0
n = gaussian_filter(mask_obs.astype(float), window_radius, mode="constant")
sx = gaussian_filter(vil, window_radius, mode="constant")
sx2 = gaussian_filter(vil * vil, window_radius, mode="constant")
sxy = gaussian_filter(vil * r, window_radius, mode="constant")
sy = gaussian_filter(r, window_radius, mode="constant")
rhs1 = sxy
rhs2 = sy
m1 = sx2
m2 = sx
m3 = sx
m4 = n
c = 1.0 / (m1 * m4 - m2 * m3)
m_inv_11 = c * m4
m_inv_12 = -c * m2
m_inv_21 = -c * m3
m_inv_22 = c * m1
mask = np.abs(m1 * m4 - m2 * m3) > 1e-8
mask = np.logical_and(mask, n > 0.01)
a = np.empty(vil.shape)
a[mask] = m_inv_11[mask] * rhs1[mask] + m_inv_12[mask] * rhs2[mask]
a[~mask] = 0.0
a[~mask_vil] = 0.0
b = np.empty(vil.shape)
b[mask] = m_inv_21[mask] * rhs1[mask] + m_inv_22[mask] * rhs2[mask]
b[~mask] = 0.0
b[~mask_vil] = 0.0
return a, b
|
pySTEPS/pysteps
|
pysteps/nowcasts/anvil.py
|
Python
|
bsd-3-clause
| 19,462
|
[
"Gaussian"
] |
7dffccaace853f549e7b650be51e60583a4d55203d6b1de3ecbb501915632e03
|
from edc_navbar import Navbar, NavbarItem, site_navbars
visit_schedule = Navbar(name='edc_visit_schedule')
visit_schedule.append_item(
NavbarItem(name='visit_schedule',
title='Visit Schedule',
label='Visit Schedule',
fa_icon='fa-calendar',
url_name='edc_visit_schedule:home_url'))
visit_schedule.append_item(
NavbarItem(name='admin',
title='Subject History',
label='Subject History',
fa_icon='fa-history',
url_name=('edc_visit_schedule:edc_visit_schedule_admin:'
'edc_visit_schedule_subjectschedulehistory_changelist')))
site_navbars.register(visit_schedule)
|
botswana-harvard/edc-visit-schedule
|
edc_visit_schedule/navbars.py
|
Python
|
gpl-2.0
| 717
|
[
"VisIt"
] |
81e54165875523aaf3f694f5f3d7d7dd9f42bf5fae95856987eeca953b0fa7a4
|
"""Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw.utilities import unpack
from gpaw.mpi import run
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10):
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
R_nG = self.Htpsit_nG
self.calculate_residuals(kpt, wfs, hamiltonian, kpt.psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, wfs.dtype)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict([(a, P_xi[:B]) for a, P_xi in P_axi.items()])
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
if self.keep_htpsit:
R_xG = R_nG[n_x]
else:
R_xG = wfs.empty(B, wfs.dtype)
psit_xG = kpt.psit_nG[n_x]
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
kpt.psit_nG[n_x], kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
# Calculate new psi'_G = psi_G + lam pR_G + lam pR'_G
# = psi_G + p(2 lam R_G + lam**2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
R_G *= 2.0 * lam
axpy(lam**2, dR_G, R_G) # R_G += lam**2 * dR_G
self.timer.start('precondition')
kpt.psit_nG[n1:n2] += self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error
|
ajylee/gpaw-rtxs
|
gpaw/eigensolvers/rmm_diis.py
|
Python
|
gpl-3.0
| 4,437
|
[
"GPAW"
] |
2048340e691b44fa12a4c4b1b02a552f8a20f9458801633e8fa4bf6e36cc840a
|
#!/usr/bin/env python
'''CREATED:2013-12-08 14:28:34 by Brian McFee <brm2132@columbia.edu>
Demonstration of phase vocoder time stretching.
'''
from __future__ import print_function
import argparse
import sys
import librosa
def stretch_demo(input_file, output_file, speed):
'''Phase-vocoder time stretch demo function.
:parameters:
- input_file : str
path to input audio
- output_file : str
path to save output (wav)
- speed : float > 0
speed up by this factor
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# 2. Time-stretch through effects module
print('Playing back at {:3.0f}% speed'.format(speed * 100))
y_stretch = librosa.effects.time_stretch(y, speed)
print('Saving stretched audio to: ', output_file)
librosa.output.write_wav(output_file, y_stretch, sr)
def process_arguments(args):
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='Time stretching example')
parser.add_argument('input_file',
action='store',
help='path to the input file (wav, mp3, etc)')
parser.add_argument('output_file',
action='store',
help='path to the stretched output (wav)')
parser.add_argument('-s', '--speed',
action='store',
type=float,
default=2.0,
required=False,
help='speed')
return vars(parser.parse_args(args))
if __name__ == '__main__':
# get the parameters
parameters = process_arguments(sys.argv[1:])
# Run the HPSS code
stretch_demo(parameters['input_file'],
parameters['output_file'],
parameters['speed'])
|
ebattenberg/librosa
|
examples/time_stretch.py
|
Python
|
isc
| 1,908
|
[
"Brian"
] |
e77b07cb1efd64318e8ace9215f590b670d2b2101cd4b77b761a1c9b74533e46
|
""" Algorithms for locating an embryo in the image
"""
# Author: Ilya Patrushev ilya.patrushev@gmail.com
# License: GPL v2.0
import sys
import numpy as np
import scipy as sp
import scipy.linalg as la
import scipy.sparse as spr
import scipy.stats as st
from scipy.signal import fftconvolve
from sklearn import linear_model
from sklearn import mixture
from sklearn.feature_extraction.image import grid_to_graph
import matplotlib.pyplot as plt
import cv2
import matplotlib.patches as pt
def bounding_box(labels, th = 0, verbose=False):
"""
Find bounding box from indicator function.
Parameters
----------
labels: array[heght, width]
Object outline in the form of indicator function.
th: float, optional, default: 0
Relative threshold
verbose=False: boolean, optional, default: False
Plots the result if True.
Returns
-------
tuple(4)
Bounding box: Left, Right, Top, Bottom
"""
hs = np.float32(np.sum(labels, axis=0))
horz = [i for i, x in enumerate(hs/np.max(hs)) if x > th]
l, r = horz[0], horz[-1]+1
vs = np.float32(np.sum(labels, axis=1))
vert = [i for i, x in enumerate(vs/np.max(vs)) if x > th]
t, b = vert[0], vert[-1]+1
if verbose == True:
plt.figure()
ax = plt.subplot(121)
ax.plot(hs/np.max(hs))
ax = plt.subplot(122)
ax.plot(vs/np.max(vs))
return l, r, t, b
def get_subgraphs(L, th = 0.05, epsilon = 1.e-7):
"""
Recursively cut connectivity graph
Parameters
----------
L: sparse matrix
Laplacian of a connectivity graph.
th: float, optional, default: 0.05
Graph cut threshold
epsilon: float, optional, default: 1.e-7
Smallest non-zero float for eigen value decomposition.
Returns
-------
list
List of disconnected subgraphs
"""
size = L.shape[0]
if size == 1:
return [np.array([0])]
#finding the smallest eigenvalue and associated eigenvector of the Laplacian
try:
evals_small, evecs_small = spr.linalg.eigsh(L, 1, sigma=epsilon, which='LM', tol=epsilon) #
except:
print "get_subgraphs:", L.shape, th, epsilon
print str(sys.exc_info()[1])
return []
indx, ys = np.array(sorted(enumerate(evecs_small[:, 0]), key=lambda x: x[1])).T
indx = np.uint32(indx)
#max jump in sorted components of the eigenvector
ys = ys[1:] - ys[:-1]
cut = np.argmax(ys)
#stop recursion if cannot cut
if ys[cut] < th or abs(evals_small) > epsilon:
return [indx]
indl, indr = indx[:cut+1], indx[cut+1:]
if L[indl, :].tocsc()[:, indl].tocsr().shape == (0, 0) or L[indr, :].tocsc()[:, indr].tocsr().shape == (0, 0):
print ys.shape, cut
print ys
#split the Laplacian and recurse
return ( [indl[l] for l in get_subgraphs(L[indl, :].tocsc()[:, indl].tocsr(), th, epsilon)]
+ [indr[l] for l in get_subgraphs(L[indr, :].tocsc()[:, indr].tocsr(), th, epsilon)] )
def smooth_contour(energy, initial, theta=-np.inf, nu=0, mu=1, max_iter=500):
"""
Contour smoothing by curvature minimization
Parameters
----------
energy: array [height, width]
External potential.
initial: array [height, width]
Countour to smooth in the form of level set. 1 for the contour
inside; 0 for outside.
theta: float, optional, default: -np.inf
External potential threshold for baloon force
nu: float, optional, default: 0
Balloon force coeficient
mu: integer, optional, default: 1
Number of iterations of curvature force operators to apply
max_iter: integer, optional, default: 500
Maximum number of iterations to run.
Returns
-------
array [height, width]
Smoothed countour in the form of level set. 1 for the contour
inside; 0 for outside.
"""
edkernel = np.ones((3, 3), dtype=np.uint8)
balloon_force = (lambda ls: cv2.dilate(ls, edkernel))
if nu < 0 :
balloon_force = (lambda ls: cv2.erode(ls, edkernel))
#Morphological kernel for curavture force
curv_kernels = np.zeros((4, 3, 3), dtype=np.uint8)
curv_kernels[0, 1] = 1
curv_kernels[2, :, 1] = 1
curv_kernels[1] = np.eye(3)
curv_kernels[3] = curv_kernels[1,::-1]
def curvature_force(ls, mu, backward=0):
si_ = lambda fn: np.max([cv2.erode (fn, kernel=k, anchor=(1, 1)) for k in curv_kernels], axis=0)
is_ = lambda fn: np.min([cv2.dilate(fn, kernel=k, anchor=(1, 1)) for k in curv_kernels], axis=0)
siis = lambda fn: si_(is_(fn))
issi = lambda fn: is_(si_(fn))
for i in range(mu):
if i % 2 == 0:
ls = siis(ls)
else:
ls = issi(ls)
return ls
#external potential gradient
dE = np.transpose(np.gradient(energy, edge_order=2), axes=(1,2,0))
u = initial.copy()
u[u > 0] = 1
nu_ = np.ones_like(u)*nu
nu_[(energy < theta)] = 0
ma = []
for i in range(max_iter):
u_ = u.copy()
#balloon force
u_[nu_ > 0] = cv2.dilate(u_, edkernel)[nu_ > 0]
u_[nu_ < 0] = cv2.erode (u_, edkernel)[nu_ < 0]
#external force
du_ = np.transpose(np.gradient(u_, edge_order=2), axes=(1,2,0))
f = np.sum(du_*dE, axis=-1)
u_[f > 0] = 1
u_[f < 0] = 0
u_ = curvature_force(u_, mu, backward=i%2)
#convergence condition, relative change of the coutour area over
#last 5 iterations <= 1.e-5
if len(ma) > 5:
msk_1 = np.mean(ma, axis=0)
ma = ma[1:] + [u_]
pix = np.sum(np.abs(np.mean(ma, axis=0) - msk_1))
if pix < 1 or round(np.log10(pix/np.sum(msk_1))) <= -5:
break
else:
ma += [u_]
u = u_
return np.uint8(np.mean(ma, axis=0) > 0.5)
def extract_data(small, num_layers=1):
"""
Extract intensity and texture data
Parameters
----------
small: array [height, width, 3]
The image the data to be extracted from.
num_layers: integer, optional, default: 1
The number of Gaussian pyramid layers to use.
Returns
-------
X: array(2)
The data extracted
shape: tuple(2)
Dimensions of the next Gaussian pyramid layer, i.e. that was
not used in data extraction.
"""
X = np.array([]).reshape(small.shape[0]*small.shape[1], -1)
small2 = np.copy(small)
for i in range(num_layers):
#extract 2D data
d = np.copy(small2[:, :, :])
dx = cv2.Scharr(d,cv2.CV_64F,1,0)
dy = cv2.Scharr(d,cv2.CV_64F,0,1)
#enlarge 2D data to match original size
for j in range(i):
d = cv2.pyrUp(d)
dx = cv2.pyrUp(dx)
dy = cv2.pyrUp(dy)
#Reshape and combine data
X = np.hstack([X,
d[:small.shape[0], :small.shape[1], :].reshape((-1, 3)),
dx[:small.shape[0], :small.shape[1], :].reshape((-1, 3)),
dy[:small.shape[0], :small.shape[1], :].reshape((-1, 3))])
small2 = cv2.pyrDown(small2)
return X, small2.shape
def find_embryo(img, max_comp=2, layer_size=200, num_layers=2, rel_threshold=.1, abs_threshold=25, smooth=True, verbose=False):
"""
Locate an embryo in the image
Parameters
----------
img: array [height, width, 3]
The RGB image to be searched. Range of values 0 - 255
max_comp: integer, optional, default: 2
The maximum number components to segment the image into.
layer_size: integer, optional, default: 200
The upper limit on the largest dimension of the larges layer
of Gaussian pyramid to use in segmentation
num_layers: integer, optional, default: 2
Number of Gaussian pyramid layers toooo use in segemntation
rel_threshold: float, optional, default: .1
Threshold for filtering disconnected foregreound islands. All
islands of size < rel_threshold*size_of_largest_island will
be reassigned to the background
abs_threshold: integer, optional, default: 25
Threshold for filtering disconnected foregreound islands. All
islands of size < abs_threshold will be reassigned to the
background
smooth: Bool, optional, default: True
Defines whether to smooth the embryo contour.
verbose: Bool/str, optional, default: False
Verbosity level:
- False: silent run
- True: report textual information
- Path prefix: report textual information and report graphical
information in verbose+<suffix>.jpg files
Returns
-------
None: if nothing is found
Otherwise:
mask: array[heigth, width]
Mask defining the position of the embryo.
touches: Bool
True if the embryo touches the image edge
rect: tuple(4)
Extended by 15% bounding box around the embryo (left, right,
top, bottom)
scaling: float
Subsampling ratio used.
"""
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
small = np.float64(lab) + .25*np.random.randn(*lab.shape)
#Compensating of linear gradients in the picture
n_y, n_x = small.shape[:2]
pos = np.vstack([np.arange(n_x*n_y)%n_x, np.arange(n_x*n_y)/n_x]).T
lm = linear_model.LinearRegression().fit(pos, small.reshape(-1, 3))
small += np.mean(small.reshape(-1, 3), axis=0).reshape(1, 1, 3) - lm.predict(pos).reshape(small.shape)
small = np.maximum(0, np.minimum(255, small))
pyramid = [small]
while np.max(small.shape[:-1]) > layer_size:
small = cv2.pyrDown(small)
pyramid += [small.copy()]
scaling = int(np.round(np.max(np.asarray(lab.shape, dtype=float)/np.asarray(small.shape, dtype=float))))
em = 3
X, small2_shape = extract_data(small, num_layers)
Xnb = X.reshape(small.shape[:2]+(X.shape[-1],))[em:-em, em:-em].reshape(-1, X.shape[-1])
max_scaling = int(np.round(np.max(np.asarray(lab.shape, dtype=float)/np.asarray(small2_shape, dtype=float))))
# Whitening and PCA
X = X.T
Xnb = Xnb.T
n, p = Xnb.shape
X_mean = Xnb.mean(axis=-1)
X -= X_mean[:, np.newaxis]
Xnb -= X_mean[:, np.newaxis]
u, d, _ = la.svd(Xnb, full_matrices=False)
del _
n_components = len(d[d/np.sum(d) > 1.e-6])
K = (u / d).T[:n_components]
del u, d
Xnb = np.dot(K, Xnb)
Xnb *= np.sqrt(p)
X = np.dot(K, X)
X *= np.sqrt(p)
X = X.T
Xnb = Xnb.T
X_ = Xnb.copy()
np.random.shuffle(X_)
sample_size = max(X.shape[0], 10*(max_comp + max_comp*n_components + max_comp*n_components*(n_components + 1)/2))
best_model = (None, -1, -np.inf, None, None, -1)
for it in range(3):
#clustering pixels
np.random.shuffle(X_)
models = [mixture.GMM(i+1, covariance_type='full', n_iter=500, n_init=1).fit(X_[:sample_size]) for i in range(max_comp)]
if not any([m.converged_ for m in models]):
if verbose:
print "GMM did not converge"
return
bics = [m.bic(X_[:sample_size]) for m in models if m.converged_]
best = models[np.argmin(bics)]
HScomps = best.get_params()['n_components']
weights = best.weights_
meansHS = best.means_
HSlabels = best.predict(X)
if HScomps == 1:
if best_model[2] == -np.inf:
if best_model[-1] == -1:
best_model = (best, 0, -np.inf, None, None, it)
elif it - best_model[-1] > 5:
break
continue
#predicting embryo component
n_y, n_x = small.shape[:2]
pos = np.vstack([np.arange(n_x*n_y)%n_x, np.arange(n_x*n_y)/n_x]).T
lps2 = []
psi = np.diag([n_x/2., n_y/2.])**2
nu = 1.
for l in range(HScomps):
obj_x = pos[HSlabels != l] - np.array([n_x/2., n_y/2.])
n = len(obj_x)
A = obj_x.T.dot(obj_x)
lx = nu/2*np.log(la.det(psi)) + sp.special.multigammaln((nu + n)/2, 2) - (nu + n)/2*np.log(la.det(psi + A)) - n*np.log(np.pi)
lx += -len(pos[HSlabels == l])*np.log(n_x*n_y)
lps2 += [lx]
bg = np.argmax(lps2)
se, fi = sorted(lps2)[-2:]
if verbose:
print "fi", fi
if best_model[2] < fi:
best_model = (best, bg, fi, se, lps2, it)
best, bg, fi, se, lps2, _ = best_model
if verbose:
print "Iterated ", it
HScomps = best.get_params()['n_components']
weights = best.weights_
meansHS = best.means_
HSlabels = best.predict(X)
proba = best.predict_proba(X)
HSlabels_ = HSlabels.copy()
del X_
del X
if type(verbose) == str:
fig = plt.figure()
ax = fig.gca()
present = np.zeros(small.shape, dtype=float)
present[HSlabels_.reshape(small.shape[:-1]) == 0] = np.array([0, 0xdc, 0xdc])
present[HSlabels_.reshape(small.shape[:-1]) == 1] = np.array([0xdc, 0xdc, 0])
present[HSlabels_.reshape(small.shape[:-1]) == 2] = np.array([0xdc, 0, 0xdc])
present /= 255
ax.imshow(present)
ax.get_yaxis().set_ticks([])#(direction='out')
ax.get_xaxis().set_ticks([])#(direction='out')
ver_name = verbose+'.labels.jpg'
fig.savefig(ver_name)
print "saving", ver_name
if HScomps == 1:
if verbose:
print "No objects found"
return
conf = (fi - se)/fi
if abs(conf) < 1.e-3:
if verbose:
print "Mode used"
borders = np.ones(small.shape[:2])
borders[1:-1,1:-1] = 0
bg = int(st.mode(HSlabels.reshape(small.shape[:2])[borders == 1])[0][0])
if type(verbose) == str:
fig = plt.figure()
ax = fig.gca()
present = np.ones(small.shape[:-1] + (3,), dtype=float)*0xcd
present[HSlabels_.reshape(small.shape[:-1]) != bg] = np.array([0, 0xdc, 0])
present /= 255
ax.imshow(present)
ax.get_yaxis().set_ticks([])#(direction='out')
ax.get_xaxis().set_ticks([])#(direction='out')
ver_name = verbose+'.object_component.jpg'
fig.savefig(ver_name)
print "saving", ver_name
bg_colour = np.mean(small[HSlabels.reshape(small.shape[:-1]) == bg], axis=0)
save = HSlabels.copy()
proba = np.hstack([
proba[:, np.arange(proba.shape[-1]) != bg].sum(axis=-1)[:, np.newaxis]
, proba[:, bg][:, np.newaxis]
])
proba[proba[:, 0] == 0, 0] = proba[proba[:, 0] > 0, 0].min()
proba[proba[:, 1] == 0, 1] = proba[proba[:, 1] > 0, 1].min()
logps = np.log(proba)
for touch_check in range(2):
dst = np.float32((HSlabels != bg).reshape(small.shape[:-1]))
edge_margins = em*touch_check
if edge_margins > 0:
dst = dst[edge_margins : -edge_margins, edge_margins : -edge_margins]
#making connectivity graph of non-background pixels
conn = spr.csc_matrix(grid_to_graph(*dst.shape), dtype=float)
conn.data *= dst.ravel()[conn.indices]
conn = conn.multiply(conn.T)
#removing disconnected background vertices
colsum = conn.sum(axis=1).A1
colsum[colsum > 0] = 1
colsum[colsum > 0] = np.cumsum(colsum[colsum > 0])
back_map = np.array([[i/dst.shape[1], i%dst.shape[1]] for i, x in enumerate(colsum) if x > 0], dtype=int)
conn = conn[:, colsum > 0].tocsr()[colsum > 0, :]
#making Laplacian matrix and getting subgraphs
L = spr.dia_matrix((conn.sum(axis=1).reshape(-1), [0]), conn.shape).tocsr() - conn
subgraphs = get_subgraphs(L, th=0.0001)
del conn
del L
subgraphs = [s for s in subgraphs if len(s) > 0]
if len(subgraphs) == 0:
return
#filtering noisy subgraphs
largest = float(max([len(s) for s in subgraphs]))
for s in subgraphs:
if len(s)/largest <= rel_threshold or len(s) <= abs_threshold:
loc = back_map[s]
dst[loc[:, 0], loc[:, 1]] = 0
subgraphs = [s for s in subgraphs if len(s)/largest > rel_threshold and len(s) > abs_threshold]
if len(subgraphs) == 0:
if verbose :
print ("Objects are too small")
return
lake_prob = []
diff = []
for s in subgraphs:
loc = back_map[s]
diff += [la.norm(np.mean(small[loc[:, 0], loc[:, 1]].reshape(-1, 3), axis=0) - bg_colour, 2)/255]
lake_prob += [-logps[:, 1].reshape(small.shape[:2])[loc[:, 0], loc[:, 1]].mean()]
diff = np.array(diff)
slens = np.array([len(s) for s in subgraphs], dtype=float)
lake_prob /= np.sum(lake_prob)
obj_subgraph = np.argmax(np.sqrt(slens)*diff)
for i, s in enumerate(subgraphs):
if i != obj_subgraph:
loc = back_map[s]
dst[loc[:, 0], loc[:, 1]] = 0
l, r, t, b = bounding_box(dst*255, 0.01, verbose=False)
touches = (l == 0 or r == dst.shape[1] or t == 0 or b == dst.shape[0])
if not touches:
break
if edge_margins > 0:
if touches:
temp = np.float32((HSlabels != bg).reshape(small.shape[:-1]))
else:
temp = np.zeros(np.array(dst.shape) + 2*edge_margins)
temp[edge_margins:-edge_margins, edge_margins:-edge_margins] = dst
dst = temp
contour_base = np.uint8(dst*255).copy()
_, contour, _ = cv2.findContours(np.uint8(dst*255), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
if len(contour) > 0:
dst = cv2.fillPoly(np.zeros_like(dst), contour, 1)
else:
return
if type(verbose) == str:
fig = plt.figure()
ax = fig.gca()
present = np.ones(small.shape[:-1] + (3,), dtype=float)*0xcd
present[HSlabels_.reshape(small.shape[:-1]) != bg] = np.array([0, 0xdc, 0])
present[(HSlabels_.reshape(small.shape[:-1]) != bg) & (dst == 0)] = np.array([0xdc, 0, 0])
present /= 255
ax.imshow(present)
ax.get_yaxis().set_ticks([])#(direction='out')
ax.get_xaxis().set_ticks([])#(direction='out')
ver_name = verbose+'.object.jpg'
fig.savefig(ver_name)
print "saving", ver_name
fig = plt.figure()
ax = fig.gca()
ax.imshow(cv2.cvtColor(np.uint8(np.round(small)), cv2.COLOR_LAB2RGB))
lims = ax.get_xlim(), ax.get_ylim()
_, contour, _ = cv2.findContours(np.uint8(dst*255), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
contour = [contour[np.argmax([cv2.contourArea(c) for c in contour])]]
contour_simple = np.array(contour, dtype=int).reshape(-1, 2)
contour_simple = np.vstack([contour_simple, contour_simple[0, :]])
ax.plot(contour_simple[:, 0], contour_simple[:, 1], 'r-')
ax.set_xlim(lims[0]), ax.set_ylim(lims[1])
ax.get_yaxis().set_ticks([])#(direction='out')
ax.get_xaxis().set_ticks([])#(direction='out')
ver_name = verbose+'.object_contour.jpg'
fig.savefig(ver_name)
print "saving", ver_name
if not smooth:
ksize = max(3, int(round(max_scaling/2)))
borders = np.ones_like(dst)
borders[ksize:-ksize,ksize:-ksize] = 0
save_border = dst[borders == 1]
morph_kernel = np.ones((ksize, )*2, dtype=np.uint8)
dst = cv2.morphologyEx(dst, cv2.MORPH_CLOSE, morph_kernel)
dst[borders == 1] = save_border
temp = np.zeros_like(dst)
temp[:-1, :-1] = dst[1:, 1:]
dst = temp
else:
borders = np.ones_like(dst)
borders[2:-2,2:-2] = 0
mins = logps.min(axis=0)
logps /= mins[np.newaxis, :]
energy = logps.sum(axis=1).reshape(small.shape[:2])
if touches:
dst_ = dst.copy()
prev_size = 0
for round_ in range(100):
noise = np.zeros_like(dst[borders == 1])
noise[:int(noise.shape[0]*.25)] = 1
np.random.shuffle(noise)
noise_ = np.zeros_like(dst)
noise_[borders == 1] = noise
dst = np.maximum(dst, noise_)
ksize = 3
iters = 1
_, contour, _ = cv2.findContours(np.uint8(dst*255), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
sizes = np.array([cv2.contourArea(c) for c in contour])
contour = list(np.array(contour)[sizes > 1]) #(round_ + 1)**1]
dst = cv2.fillPoly(np.zeros_like(dst), contour, 1)
if round_ > 0 and sizes.max()/float(prev_size) > 1.05:
break
prev_size = sizes.max()
dst = np.maximum(dst_, dst)
initial = dst
dst = smooth_contour(energy, initial, theta=np.percentile(energy.ravel(), 50), mu=8)
#scale the object back
dst *= 255
pyrpos = -1
while min(dst.shape) < min(img.shape[:-1]):
dst = cv2.pyrUp(dst)
exc = 0
dst = dst[: img.shape[0], : img.shape[1]]
dst = cv2.erode(np.uint8(dst >= 255) , np.ones((int(round(1*max_scaling + 0*scaling)),)*2, dtype=np.uint8))
_, contour_sm, _ = cv2.findContours(np.uint8(dst), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
if len(contour_sm) > 0:
contour_sm = [contour_sm[np.argmax([cv2.contourArea(c) for c in contour_sm])]]
dst = cv2.fillPoly(np.zeros_like(dst), contour_sm, 255)
else:
return
if type(verbose) == str:
fig = plt.figure()
ax = fig.gca()
ax.imshow(img)
lims = ax.get_xlim(), ax.get_ylim()
_, contour2, _ = cv2.findContours(np.uint8(dst >= 255), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
if len(contour2) > 0:
contour2 = [contour2[np.argmax([cv2.contourArea(c) for c in contour2])]]
contour_simple2 = np.array(contour2).reshape(-1, 2)
contour_simple2 = np.vstack([contour_simple2, contour_simple2[0, :]])
ax.plot(contour_simple2[:, 0], contour_simple2[:, 1], 'g-')
ax.set_xlim(lims[0]), ax.set_ylim(lims[1])
ax.get_yaxis().set_ticks([])#(direction='out')
ax.get_xaxis().set_ticks([])#(direction='out')
ver_name = verbose+'.smoothed.jpg'
fig.savefig(ver_name)
print "saving", ver_name
#getting bounding rect
l, r, t, b = bounding_box(dst, 0.01, verbose=False)
margins = np.max([.15*(b-t), .15*(r-l)])
rect = np.array([t, b, l, r], dtype=float) + np.vstack([[-margins]*2, [margins]*2]).T.reshape(-1)
rect[0::2] = np.maximum(rect[0::2], [0, 0])
rect[1::2] = np.minimum(rect[1::2], img.shape[:-1])
t, b, l, r = np.int32(np.round(rect))
if type(verbose) == str:
fig = plt.figure()
ax = fig.gca()
ax.imshow(img)
lims = ax.get_xlim(), ax.get_ylim()
plt.plot([l, l, r, r, l], [b, t, t, b, b], 'b')
ax.set_xlim(lims[0]), ax.set_ylim(lims[1])
ax.get_yaxis().set_ticks([])#(direction='out')
ax.get_xaxis().set_ticks([])#(direction='out')
ver_name = verbose+'.box.jpg'
fig.savefig(ver_name)
print "saving", ver_name
if verbose :
print 'lps', lps2, bg
print 'touches', touches
return np.uint8(dst >= 255), touches, (l, r, t, b), scaling
|
ilyapatrushev/isimage
|
isimage/find_embryo.py
|
Python
|
gpl-2.0
| 25,100
|
[
"Gaussian"
] |
1129d723cc27680ac29263fbe54d5315b5c852c52ec8e4e1434dad545d23424b
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cow mask generation."""
import math
import jax
import jax.numpy as jnp
_ROOT_2 = math.sqrt(2.0)
_ROOT_2_PI = math.sqrt(2.0 * math.pi)
def gaussian_kernels(sigmas, max_sigma):
"""Make Gaussian kernels for Gaussian blur.
Args:
sigmas: kernel sigmas as a [N] jax.numpy array
max_sigma: sigma upper limit as a float (this is used to determine
the size of kernel required to fit all kernels)
Returns:
a (N, kernel_width) jax.numpy array
"""
sigmas = sigmas[:, None]
size = round(max_sigma * 3) * 2 + 1
x = jnp.arange(-size, size + 1)[None, :].astype(jnp.float32)
y = jnp.exp(-0.5 * x ** 2 / sigmas ** 2)
return y / (sigmas * _ROOT_2_PI)
def cow_masks(n_masks, mask_size, log_sigma_range, max_sigma,
prop_range, rng_key):
"""Generate Cow Mask.
Args:
n_masks: number of masks to generate as an int
mask_size: image size as a `(height, width)` tuple
log_sigma_range: the range of the sigma (smoothing kernel)
parameter in log-space`(log(sigma_min), log(sigma_max))`
max_sigma: smoothing sigma upper limit
prop_range: range from which to draw the proportion `p` that
controls the proportion of pixel in a mask that are 1 vs 0
rng_key: a `jax.random.PRNGKey`
Returns:
Cow Masks as a [v, height, width, 1] jax.numpy array
"""
rng_k1, rng_k2 = jax.random.split(rng_key)
rng_k2, rng_k3 = jax.random.split(rng_k2)
# Draw the per-mask proportion p
p = jax.random.uniform(
rng_k1, (n_masks,), minval=prop_range[0], maxval=prop_range[1],
dtype=jnp.float32)
# Compute threshold factors
threshold_factors = jax.scipy.special.erfinv(2 * p - 1) * _ROOT_2
sigmas = jnp.exp(jax.random.uniform(
rng_k2, (n_masks,), minval=log_sigma_range[0],
maxval=log_sigma_range[1]))
# Create initial noise with the batch and channel axes swapped so we can use
# tf.nn.depthwise_conv2d to convolve it with the Gaussian kernels
noise = jax.random.normal(rng_k3, (1,) + mask_size + (n_masks,))
# Generate a kernel for each sigma
kernels = gaussian_kernels(sigmas, max_sigma)
# kernels: [batch, width] -> [width, batch]
kernels = kernels.transpose((1, 0))
# kernels in y and x
krn_y = kernels[:, None, None, :]
krn_x = kernels[None, :, None, :]
# Apply kernels in y and x separately
smooth_noise = jax.lax.conv_general_dilated(
noise, krn_y, (1, 1), 'SAME',
dimension_numbers=('NHWC', 'HWIO', 'NHWC'), feature_group_count=n_masks)
smooth_noise = jax.lax.conv_general_dilated(
smooth_noise, krn_x, (1, 1), 'SAME',
dimension_numbers=('NHWC', 'HWIO', 'NHWC'), feature_group_count=n_masks)
# [1, height, width, batch] -> [batch, height, width, 1]
smooth_noise = smooth_noise.transpose((3, 1, 2, 0))
# Compute mean and std-dev
noise_mu = smooth_noise.mean(axis=(1, 2, 3), keepdims=True)
noise_sigma = smooth_noise.std(axis=(1, 2, 3), keepdims=True)
# Compute thresholds
thresholds = threshold_factors[:, None, None, None] * noise_sigma + noise_mu
# Apply threshold
masks = (smooth_noise <= thresholds).astype(jnp.float32)
return masks
|
google-research/google-research
|
milking_cowmask/masking/cow_mask.py
|
Python
|
apache-2.0
| 3,750
|
[
"Gaussian"
] |
448bc9465e85f406adb8cab27f517654b9daf405bd68fc9c8c00e034daa83d71
|
# -*- coding: utf-8 -*-
#
# testiaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""IAF Neuron example
------------------
A DC current is injected into the neuron using a current generator
device. The membrane potential as well as the spiking activity are
recorded by corresponding devices.
It can be observed how the current charges the membrane, a spike
is emitted, the neuron becomes absolute refractory, and finally
starts to recover.
References
~~~~~~~~~~~~
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
###############################################################################
# First, we import all necessary modules for simulation and plotting
import nest
import pylab
###############################################################################
# Second the Function build_network is defined to build the network and
# return the handles of the spike detector and the voltmeter
def build_network(dt):
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 1, "resolution": dt})
neuron = nest.Create('iaf_psc_alpha')
nest.SetStatus(neuron, "I_e", 376.0)
vm = nest.Create('voltmeter')
nest.SetStatus(vm, "withtime", True)
sd = nest.Create('spike_detector')
nest.Connect(vm, neuron)
nest.Connect(neuron, sd)
return vm, sd
###############################################################################
# The function build_network takes the resolution as argument.
# First the Kernel is reset and the number of threads is set to zero as well
# as the resolution to the specified value dt. The iaf_psc_alpha is
# created and the handle is stored in the variable neuron The status of the
# neuron is changed so it receives an external current. Next the voltmeter
# is created and the handle stored in vm and the option 'withtime' is set,
# therefore times are given in the times vector in events. Now the
# spike_detecor is created and its handle is stored in sd.
#
# Voltmeter and spikedetector are then connected to the neuron. The connect
# function takes the handles as input. The Voltmeter is connected to the
# neuron and the neuron to the spikedetector because the neuron sends spikes
# to the detector and the voltmeter 'observes' the neuron.
###############################################################################
# The neuron is simulated for three different resolutions and then the
# voltage trace is plotted
for dt in [0.1, 0.5, 1.0]:
print("Running simulation with dt=%.2f" % dt)
vm, sd = build_network(dt)
###########################################################################
# First using build_network the network is build and the handles of the
# spike detector and the voltmeter are stored in vm and sd
nest.Simulate(1000.0)
###########################################################################
# The network is simulated using `Simulate`, which takes the desired
# simulation time in milliseconds and advances the network state by this
# amount of time. During simulation, the `spike_detector` counts the
# spikes of the target neuron and the total number is read out at the
# end of the simulation period.
potentials = nest.GetStatus(vm, "events")[0]["V_m"]
times = nest.GetStatus(vm, "events")[0]["times"]
###########################################################################
# The values of the voltage recorded by the voltmeter are read out and
# the values for the membrane potential are stored in potential and the
# corresponding times in the times array
pylab.plot(times, potentials, label="dt=%.2f" % dt)
print(" Number of spikes: {0}".format(nest.GetStatus(sd, "n_events")[0]))
###########################################################################
# Using the pylab library the voltage trace is plotted over time
pylab.legend(loc=3)
pylab.xlabel("time (ms)")
pylab.ylabel("V_m (mV)")
###########################################################################
# Finally the axis are labelled and a legend is generated
|
terhorstd/nest-simulator
|
pynest/examples/testiaf.py
|
Python
|
gpl-2.0
| 4,728
|
[
"NEURON"
] |
a5899ac5181be4205aff41f7ccb2e3933ccf6e503f4ca1ef3bf9965b4c9e4d81
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from string import ascii_letters, digits
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
from ansible.utils.path import makedirs_safe
BOOL_TRUE = frozenset(["true", "t", "y", "1", "yes", "on"])
def mk_boolean(value):
ret = value
if not isinstance(value, bool):
if value is None:
ret = False
ret = (str(value).lower() in BOOL_TRUE)
return ret
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, value_type=None, expand_relative_paths=False):
''' return a configuration variable with casting
:arg p: A ConfigParser object to look for the configuration in
:arg section: A section of the ini config that should be examined for this section.
:arg key: The config key to get this config from
:arg env_var: An Environment variable to check for the config var. If
this is set to None then no environment variable will be used.
:arg default: A default value to assign to the config var if nothing else sets it.
:kwarg value_type: The type of the value. This can be any of the following strings:
:boolean: sets the value to a True or False value
:integer: Sets the value to an integer or raises a ValueType error
:float: Sets the value to a float or raises a ValueType error
:list: Treats the value as a comma separated list. Split the value
and return it as a python list.
:none: Sets the value to None
:path: Expands any environment variables and tilde's in the value.
:tmp_path: Create a unique temporary directory inside of the directory
specified by value and return its path.
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
means colon separated strings.) Split the value and then expand
each part for environment variables and tildes.
:kwarg expand_relative_paths: for pathlist and path types, if this is set
to True then also change any relative paths into absolute paths. The
default is False.
'''
value = _get_config(p, section, key, env_var, default)
if value_type == 'boolean':
value = mk_boolean(value)
elif value:
if value_type == 'integer':
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif value_type == 'none':
if value == "None":
value = None
elif value_type == 'path':
value = shell_expand(value, expand_relative_paths=expand_relative_paths)
elif value_type == 'tmppath':
value = shell_expand(value)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif value_type == 'pathlist':
if isinstance(value, string_types):
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
value = default
if p is not None:
try:
value = p.get(section, key, raw=True)
except:
pass
if env_var is not None:
env_value = os.environ.get(env_var, None)
if env_value is not None:
value = env_value
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
try:
path1 = os.getcwd() + "/ansible.cfg"
except OSError:
path1 = None
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# non configurable but used as defaults
BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www']
# sections in config file
DEFAULTS = 'defaults'
# DEPRECATED VARS # FIXME: add deprecation warning when these get set
# none left now
# DEPRECATED FEATURE TOGGLES: these will eventually be removed as it becomes the standard
# If --tags or --skip-tags is given multiple times on the CLI and this is True, merge the lists of tags together.
# If False, let the last argument overwrite any previous ones.
# Behaviour is overwrite through 2.2. 2.3 overwrites but prints deprecation. 2.4 the default is to merge.
MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'ANSIBLE_MERGE_MULTIPLE_CLI_TAGS', True, value_type='boolean')
# Controls which 'precedence path' to take, remove when decide on which!
SOURCE_OVER_GROUPS = get_config(p, 'vars', 'source_over_groups', 'ANSIBLE_SOURCE_OVER_GROUPS', True, value_type='boolean')
# GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean')
DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer')
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH',
'~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles', value_type='pathlist', expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FACT_PATH = get_config(p, DEFAULTS, 'fact_path', 'ANSIBLE_FACT_PATH', None, value_type='path')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, value_type='integer')
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale', 'ANSIBLE_MODULE_SET_LOCALE', False, value_type='boolean')
DEFAULT_MODULE_COMPRESSION = get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, value_type='integer')
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, value_type='integer')
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, value_type='boolean')
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, value_type='path')
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, value_type='integer')
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, value_type='boolean')
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, value_type='path')
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', 'smart')
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, value_type='boolean')
DEFAULT_SSH_TRANSFER_METHOD = get_config(p, 'ssh_connection', 'transfer_method', 'ANSIBLE_SSH_TRANSFER_METHOD', None)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, value_type='boolean')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, value_type='boolean')
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower()
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer')
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path')
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean')
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer')
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float')
DEFAULT_ALLOW_UNSAFE_LOOKUPS = get_config(p, DEFAULTS, 'allow_unsafe_lookups', None, False, value_type='boolean')
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean')
SHOW_CUSTOM_STATS = get_config(p, DEFAULTS, 'show_custom_stats', 'ANSIBLE_SHOW_CUSTOM_STATS', False, value_type='boolean')
NAMESPACE_FACTS = get_config(p, DEFAULTS, 'restrict_facts_namespace', 'ANSIBLE_RESTRICT_FACTS', False, value_type='boolean')
# Inventory
DEFAULT_HOST_LIST = get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', '/etc/ansible/hosts', value_type='path', expand_relative_paths=True)
INVENTORY_ENABLED = get_config(p, DEFAULTS, 'inventory_enabled', 'ANSIBLE_INVENTORY_ENABLED',
['host_list', 'script', 'yaml', 'ini'], value_type='list')
INVENTORY_IGNORE_EXTS = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE',
BLACKLIST_EXTS + (".orig", ".ini", ".cfg", ".retry"), value_type='list')
INVENTORY_IGNORE_PATTERNS = get_config(p, DEFAULTS, 'inventory_ignore_patterns', 'ANSIBLE_INVENTORY_IGNORE_REGEX', [], value_type='list')
VARIABLE_PRECEDENCE = get_config(p, DEFAULTS, 'precedence', 'ANSIBLE_PRECEDENCE',
['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play',
'groups_plugins_inventory', 'groups_plugins_play'],
value_type='list')
# Static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, value_type='boolean')
DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, value_type='boolean')
# Disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, value_type='boolean')
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, value_type='boolean')
ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, value_type='boolean')
# Selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs, 9p', value_type='list')
DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, value_type='boolean')
# PRIVILEGE ESCALATION
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, value_type='boolean')
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, value_type='boolean')
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, value_type='boolean')
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean')
# Become
BECOME_ERROR_STRINGS = {
'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect',
'pmrun': 'You are not permitted to run this command'
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given',
'pmrun': ''
} # FIXME: deal with i18n
BECOME_METHODS = ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean')
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD',
'sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo').lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME', False, value_type='boolean')
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, value_type='boolean')
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS',
"apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list')
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS',
'~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS',
'~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS',
'~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS',
'~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS',
'~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY',
'~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules', value_type='pathlist')
DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS',
'~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils', value_type='pathlist')
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS',
'~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS',
'~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS',
'~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS',
'~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS',
'~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules', 'NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'ce',
'vyos', 'sros', 'dellos9', 'dellos10', 'dellos6'],
value_type='list')
DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, value_type='integer')
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, value_type='boolean')
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, value_type='boolean')
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, value_type='boolean')
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, value_type='list')
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, value_type='boolean')
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, value_type='boolean')
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, value_type='boolean')
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, value_type='boolean')
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, value_type='boolean')
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], value_type='list')
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, value_type='boolean')
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, value_type='boolean')
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], value_type='list')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, value_type='boolean')
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, value_type='path')
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, value_type='none')
DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, value_type='boolean')
MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024 * 1024, value_type='integer')
# CONNECTION RELATED
USE_PERSISTENT_CONNECTIONS = get_config(p, DEFAULTS, 'use_persistent_connections', 'ANSIBLE_USE_PERSISTENT_CONNECTIONS', False, value_type='boolean')
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s')
# WARNING: Someone might be tempted to switch this from percent-formatting
# to .format() in the future. be sure to read this:
# http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/ and understand
# that it may be a security risk to do so.
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', None)
ANSIBLE_SSH_CONTROL_PATH_DIR = get_config(p, 'ssh_connection', 'control_path_dir', 'ANSIBLE_SSH_CONTROL_PATH_DIR', u'~/.ansible/cp')
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, value_type='boolean')
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, value_type='integer')
ANSIBLE_SSH_EXECUTABLE = get_config(p, 'ssh_connection', 'ssh_executable', 'ANSIBLE_SSH_EXECUTABLE', 'ssh')
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, value_type='boolean')
PARAMIKO_HOST_KEY_AUTO_ADD = get_config(p, 'paramiko_connection', 'host_key_auto_add', 'ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD', False, value_type='boolean')
PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None)
PARAMIKO_LOOK_FOR_KEYS = get_config(p, 'paramiko_connection', 'look_for_keys', 'ANSIBLE_PARAMIKO_LOOK_FOR_KEYS', True, value_type='boolean')
PERSISTENT_CONNECT_TIMEOUT = get_config(p, 'persistent_connection', 'connect_timeout', 'ANSIBLE_PERSISTENT_CONNECT_TIMEOUT', 30, value_type='integer')
PERSISTENT_CONNECT_RETRIES = get_config(p, 'persistent_connection', 'connect_retries', 'ANSIBLE_PERSISTENT_CONNECT_RETRIES', 30, value_type='integer')
PERSISTENT_CONNECT_INTERVAL = get_config(p, 'persistent_connection', 'connect_interval', 'ANSIBLE_PERSISTENT_CONNECT_INTERVAL', 1, value_type='integer')
# obsolete -- will be formally removed
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, value_type='integer')
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, value_type='integer')
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, value_type='float')
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, value_type='integer')
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, value_type='boolean')
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, value_type='boolean')
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, value_type='boolean')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list')
GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path')
GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'],
value_type='list')
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS',
['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list')
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
# diff
DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, value_type='integer')
DIFF_ALWAYS = get_config(p, 'diff', 'always', 'ANSIBLE_DIFF_ALWAYS', False, value_type='bool')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'win_command', 'shell', 'win_shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'win_command', 'shell', 'win_shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
# module search
IGNORE_FILES = ["COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES"]
INTERNAL_RESULT_KEYS = ['add_host', 'add_group']
RESTRICTED_RESULT_KEYS = ['ansible_rsync_path', 'ansible_playbook_python']
# check all of these extensions when looking for 'variable' files which should be YAML or JSON.
YAML_FILENAME_EXTENSIONS = [".yml", ".yaml", ".json"]
|
jmehnle/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 30,286
|
[
"Galaxy",
"MOOSE"
] |
bb836d84177ed0f25e694bc7849fd730b4918fc281937faab904fe97134fe594
|
# -*- coding: utf-8 -*-
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2018 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU General Public License version 3 or later.
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import numpy as np
import moose
import moose.fixXreacs as fixXreacs
def makeModel():
# create container for model
num = 1 # number of compartments
model = moose.Neutral( '/model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.x1 = 1.0e-6 # Set it to a 1 micron single-voxel cylinder
# create molecules and reactions
s = moose.Pool( '/model/compartment/s' )
t = moose.Pool( '/model/compartment/t' )
rXfer = moose.Reac( '/model/compartment/rXfer' )
#####################################################################
# Put in endo compartment. Add molecule s
endo = moose.EndoMesh( '/model/endo' )
endo.isMembraneBound = True
endo.surround = compartment
es = moose.Pool( '/model/endo/s' )
et = moose.Pool( '/model/endo/t' )
#####################################################################
moose.connect( rXfer, 'sub', s, 'reac' )
moose.connect( rXfer, 'sub', t, 'reac' )
moose.connect( rXfer, 'prd', es, 'reac' )
moose.connect( rXfer, 'prd', et, 'reac' )
rXfer.Kf = 0.02 # 0.02/mM/sec
rXfer.Kb = 0.02 # 0.02/mM/sec
#####################################################################
fixXreacs.fixXreacs( '/model' )
#fixXreacs.restoreXreacs( '/model' )
#fixXreacs.fixXreacs( '/model' )
#####################################################################
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/dsolve' )
eksolve = moose.Ksolve( '/model/endo/ksolve' )
edsolve = moose.Dsolve( '/model/endo/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/compartment/##"
assert( dsolve.numPools == 4 )
s.vec.concInit = [1]*num
t.vec.concInit = [1]*num
estoich = moose.Stoich( '/model/endo/stoich' )
estoich.compartment = endo
estoich.ksolve = eksolve
estoich.dsolve = edsolve
estoich.path = "/model/endo/##"
assert( edsolve.numPools == 2 )
edsolve.buildMeshJunctions( dsolve )
plot1 = moose.Table2( '/model/plot1' )
plot2 = moose.Table2( '/model/plot2' )
moose.connect( '/model/plot1', 'requestOut', s, 'getN' )
moose.connect( '/model/plot2', 'requestOut', es, 'getN' )
plot3 = moose.Table2( '/model/plot3' )
plot4 = moose.Table2( '/model/plot4' )
moose.connect( '/model/plot3', 'requestOut', s, 'getConc' )
moose.connect( '/model/plot4', 'requestOut', es, 'getConc' )
def doPlot( ax, plot1, plot2, label ):
plt.ylabel( label )
plt.xlabel( 'time(s)' )
v1 = moose.element(plot1).vector
v2 = moose.element(plot2).vector
ax.plot( v1, label='s' )
ax.plot( v2, label='es' )
ax.plot( np.array( v1 ) + np.array( v2 ), label='sum' )
plt.legend()
def almostEq( a, b ):
#print a, b, (a-b)/(a+b)
return abs(a-b)/(a+b) < 5e-5
def main( standalone = False ):
for i in range( 10, 18):
moose.setClock( i, 0.01 )
runtime = 100
displayInterval = 2
makeModel()
moose.reinit()
moose.start( runtime )
assert( almostEq( moose.element( 'model/compartment/s' ).conc,
moose.element( '/model/endo/s' ).conc ) )
if standalone:
fig = plt.figure( figsize=(12,10) )
ax1 = fig.add_subplot(211)
doPlot( ax1, '/model/plot1', '/model/plot2', '# of molecules' )
ax2 = fig.add_subplot(212)
doPlot( ax2, '/model/plot3', '/model/plot4', 'conc (mM)' )
plt.show()
moose.delete( '/model' )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main( standalone = True )
|
upibhalla/moose-core
|
tests/python/testXreacs6.py
|
Python
|
gpl-3.0
| 4,205
|
[
"MOOSE"
] |
4dd61812004a5b846a456da88d4fe1f198fc0a1ee8e243ec6afa72bdfb88f007
|
import unittest
from iSDM.environment import ClimateLayer
from iSDM.environment import RasterEnvironmentalLayer
from iSDM.environment import Source
import geopandas as gp
from shapely.geometry import Polygon
from rasterio.transform import Affine
import numpy as np
class TestEnvironment(unittest.TestCase):
def setUp(self):
self.climate_layer = ClimateLayer(file_path="./data/watertemp/max_wt_2000.tif")
self.climate_layer_bad = ClimateLayer()
self.biomes_layer = ClimateLayer(file_path="./data/rebioms/w001001.adf")
self.realms = RasterEnvironmentalLayer(file_path="./data/terrestrial_ecoregions/terrestrial_ecoregions_30arcmin_final.tif", source=Source.WWL)
self.realms_reader = self.realms.load_data()
self.realms_rasters = self.realms_reader.read(1)
# realms_rasters = realms.rasterize(raster_file="./data/terrestrial_ecoregions/realms_raster.tif", pixel_size=0.5, classifier_column="realm")
# realms_rasters[0] = realms_rasters[0] + realms_rasters[2] # combine Europe and Asia
# realms_rasters[0][realms_rasters[0] > 1] = 1
# self.realms_rasters = np.delete(realms_rasters, 2, 0)
def test_RasterEnvironmentalLayer_load_data(self):
with self.assertRaises(AttributeError):
self.climate_layer_bad.load_data()
self.climate_layer.load_data()
self.assertIsNotNone(self.climate_layer.raster_reader)
self.assertEqual(self.climate_layer.resolution, (0.5, 0.5))
self.assertIsInstance(self.climate_layer.raster_affine, Affine)
self.assertIsInstance(self.climate_layer.metadata, dict)
def test_RasterEnvironmentalLayer_pixel_to_world_coordinates(self):
with self.assertRaises(AttributeError):
self.climate_layer_bad.pixel_to_world_coordinates()
self.climate_layer.read(1)
self.climate_layer.load_data()
band = self.climate_layer.read(1)
self.assertEqual(band.shape, (360, 720))
coordinates = self.climate_layer.pixel_to_world_coordinates()
self.assertIsInstance(coordinates, tuple)
self.assertIsInstance(coordinates[0], np.ndarray)
self.assertEqual(len(coordinates[0]), 259200)
self.assertEqual(coordinates[0][0], 89.75)
def test_RasterEnvironmentalLayer_polygonize(self):
with self.assertRaises(AttributeError):
self.climate_layer.polygonize()
self.climate_layer.load_data()
df_polygons = self.climate_layer.polygonize()
self.assertIsInstance(df_polygons, gp.GeoDataFrame)
self.assertIsNotNone(df_polygons.geometry)
self.assertIsInstance(df_polygons.geometry.iat[0], Polygon)
def test_RasterEnvironmentalLayer_close_dataset(self):
with self.assertRaises(AttributeError):
self.climate_layer.close_dataset()
self.climate_layer.load_data()
self.assertFalse(self.climate_layer.raster_reader.closed)
self.climate_layer.close_dataset()
self.assertTrue(self.climate_layer.raster_reader.closed)
self.assertIsNotNone(self.climate_layer.raster_reader)
def test_RasterEnvironmentalLayer_read(self):
with self.assertRaises(AttributeError):
self.climate_layer.read(1)
self.climate_layer.load_data()
band = self.climate_layer.read(1)
self.assertEqual(band.shape, (360, 720))
self.assertIsInstance(band, np.ndarray)
with self.assertRaises(IndexError):
self.climate_layer.read(2)
def test_RasterEnvironmentalLayer_reproject(self):
self.climate_layer.load_data()
original_resolution = self.climate_layer.resolution
self.climate_layer.reproject(destination_file="./data/tmp.tif", resolution=(original_resolution[0] * 2, original_resolution[1] * 2))
self.climate_layer.load_data("./data/tmp.tif")
self.assertEqual(original_resolution, (self.climate_layer.resolution[0] / 2, self.climate_layer.resolution[1] / 2))
def test_RasterEnvironmentalLayer_sample_pseudo_absences(self):
self.biomes_layer.load_data()
some_species = np.ones_like(self.biomes_layer.read(1))
some_species[0][0] = 0 # only one pixel set to zero, species covers entire range
pixels_to_sample_from, sampled_pixels = self.biomes_layer.sample_pseudo_absences(species_raster_data=some_species)
self.assertFalse(sampled_pixels.any())
# set half of the pixels to 0, now the species covers about half of the map
for index in range(int(some_species.shape[0] / 2)):
some_species[index] = 0
pixels_to_sample_from, sampled_pixels = self.biomes_layer.sample_pseudo_absences(species_raster_data=some_species)
self.assertIsNotNone(sampled_pixels)
self.assertIsInstance(pixels_to_sample_from, np.ndarray)
self.assertIsInstance(sampled_pixels, np.ndarray)
self.assertEqual(pixels_to_sample_from.shape, self.biomes_layer.read(1).shape)
self.assertEqual(sampled_pixels.shape, self.biomes_layer.read(1).shape)
self.assertEqual(sampled_pixels.nonzero()[0].shape[0], 1000)
# adding realms should further reduce the sampling area
pixels_to_sample_from_1, sampled_pixels_1 = self.realms.sample_pseudo_absences(species_raster_data=some_species)
self.assertIsNotNone(sampled_pixels_1)
self.assertIsInstance(pixels_to_sample_from_1, np.ndarray)
self.assertIsInstance(sampled_pixels_1, np.ndarray)
self.assertEqual(sampled_pixels_1.nonzero()[0].shape[0], 1000)
self.assertGreater(np.sum(pixels_to_sample_from), np.sum(pixels_to_sample_from_1))
def tearDown(self):
del self.climate_layer
if __name__ == '__main__':
# logging.getLogger( "iSDM.environment" ).setLevel( logging.DEBUG )
unittest.main()
|
remenska/iSDM
|
tests/test_environment.py
|
Python
|
apache-2.0
| 5,831
|
[
"ADF"
] |
7ee6a2d299ef9a9b52b54f1110b28592ae23abd9bae3c665802cf62611786504
|
from IPython.display import HTML, display
devs = [
('Fernando Perez', 'fperez.jpg'),
('Brian Granger', 'ellisonbg.jpg'),
('Min Ragan-Kelley', 'minrk.jpg'),
('Thomas Kluyver', 'takluyver.jpg'),
('Jonathan Frederic', 'jdfreder.jpg'),
('Paul Ivanov', 'ivanov.jpg'),
('Matthias Bussonnier', 'matthias.jpg'),
# ('Evan Patterson', 'epatters.jpg'),
('Kyle Kelley', 'rgbkrk.jpg'),
('Damian Avila', 'damianavila.jpg'),
('Jessica Hamrick', 'jhamrick.jpg')
# ('Brad Froehle', 'brad.jpg'),
# ('Zach Sailer', 'zsailer.jpg'),
# ('Robert Kern', 'rkern.jpg'),
# ('Jorgen Stenarson', 'jorgen.jpg'),
# ('Jonathan March', 'jdmarch.jpg'),
]
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
s = "<table>"
for row in chunks(devs, 4):
s += "<tr>"
for person in row:
s += "<td>"
s += '<img src="ipythonteam/{image}" style="height: 150px; text-align: center; margin-left: auto; margin-right: auto;"/>'.format(image=person[1])
s += '<h3 style="text-align: center;">{name}</h3>'.format(name=person[0])
s += "</td>"
s += "</tr>"
s += "</table>"
def core_devs():
display(HTML(s))
|
sccolbert/talk-2015
|
ipythonproject.py
|
Python
|
mit
| 1,192
|
[
"Brian"
] |
a468c7cd455de2cb454157dbc0aea412c1057fe4f2047761e599ec0537a705fc
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import logging
log = logging.getLogger( __name__ )
repository_name = 'filtering_0310'
repository_description = "Galaxy's filtering tool for test 0310"
repository_long_description = "Long description of Galaxy's filtering tool for test 0310"
category_name = 'Test 0310 - HTTP Repo features'
category_description = 'Test 0310 for verifying the tool shed http interface to mercurial.'
# Declare clone_path here so multiple tests can access it.
clone_path = None
'''
1. Create a repository.
2. Clone the repository to a local path.
3. Change a file and try to push as non-owner.
4. Change another file and push as owner.
5. Verify that the changesets have been applied.
'''
class TestHgWebFeatures( ShedTwillTestCase ):
'''Test http mercurial interface.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
test_user_2 = self.test_db_util.get_user( common.test_user_2_email )
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
test_user_2_private_role = self.test_db_util.get_private_role( test_user_2 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
def test_0005_create_filtering_repository( self ):
'''Create and populate the filtering_0310 repository.'''
'''
We are at step 1 - Create a repository.
Create and populate the filtering_0310 repository.
'''
category = self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
repository = self.get_or_create_repository( name=repository_name,
description=repository_description,
long_description=repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=True,
commit_message="Uploaded filtering 1.1.0.",
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( repository,
filename='filtering/filtering_test_data.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message="Uploaded filtering test data.",
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_edit_and_commit( self ):
'''Edit a file and attempt a push as a user that does not have write access.'''
'''
We are at step 3 - Change a file and try to push as non-owner.
The repository should have the following files:
filtering.py
filtering.xml
test-data/
test-data/1.bed
test-data/7.bed
test-data/filter1_in3.sam
test-data/filter1_inbad.bed
test-data/filter1_test1.bed
test-data/filter1_test2.bed
test-data/filter1_test3.sam
test-data/filter1_test4.bed
We will be prepending a comment to filtering.py.
'''
repository = self.test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
clone_path = self.generate_temp_path( 'test_0310', additional_paths=[ 'filtering_0310', 'user2' ] )
self.clone_repository( repository, clone_path )
hgrepo = self.get_hg_repo( clone_path )
files_in_repository = os.listdir( clone_path )
assert 'filtering.py' in files_in_repository, 'File not found in repository: filtering.py'
filepath = os.path.join( clone_path, 'filtering.py' )
file_contents = [ '# This is a dummy comment to generate a new changeset.' ]
file_contents.extend( file( filepath, 'r' ).readlines() )
file( filepath, 'w' ).write( '\n'.join( file_contents ) )
commit_options = dict( user=common.test_user_2_name, message='Added a line to filtering.py' )
# The repository is owned by test_user_1, so this operation should fail.
authorized = self.commit_and_push( repository, hgrepo, commit_options, username=common.test_user_2_name, password='testuser' )
assert authorized is False, 'Test user 2 was able to commit and push to the remote repository.'
def test_0015_edit_and_commit( self ):
'''Edit a file again and attempt a push as a user that does have write access.'''
'''
We are at step 4 - Change another file and try to push as non-owner.
The repository should have the following files:
filtering.py
filtering.xml
test-data/
test-data/1.bed
test-data/7.bed
test-data/filter1_in3.sam
test-data/filter1_inbad.bed
test-data/filter1_test1.bed
test-data/filter1_test2.bed
test-data/filter1_test3.sam
test-data/filter1_test4.bed
We will be prepending a second comment to filtering.py.
'''
repository = self.test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
clone_path = self.generate_temp_path( 'test_0310', additional_paths=[ 'filtering_0310', 'user1' ] )
self.clone_repository( repository, clone_path )
hgrepo = self.get_hg_repo( clone_path )
files_in_repository = os.listdir( clone_path )
assert 'filtering.py' in files_in_repository, 'File not found in repository: filtering.py'
filepath = os.path.join( clone_path, 'filtering.py' )
file_contents = [ '# This is another dummy comment to generate a new changeset.' ]
file_contents.extend( file( filepath, 'r' ).readlines() )
file( filepath, 'w' ).write( '\n'.join( file_contents ) )
commit_options = dict( user=common.test_user_1_name, message='Added another line to filtering.py.' )
# The repository is owned by test_user_1, so this operation should succeed.
authorized = self.commit_and_push( repository, hgrepo, commit_options, username=common.test_user_1_name, password='testuser' )
assert authorized is True, 'Test user 1 was not able to commit and push to the remote repository.'
def test_0020_verify_new_changelog( self ):
'''Verify that the authorized commit was applied, and the unauthorized commit was not..'''
'''
We are at step 5 - Verify that the changeset has been applied.
The repository changelog should now look like:
0:nnnnnnnnnnnn: Uploaded filtering 1.1.0.
1:nnnnnnnnnnnn: Uploaded filtering test data.
2:nnnnnnnnnnnn: Added another line to filtering.py.
The commit from test_user_2 should not be present in the changelog, since the repositories were cloned to separate locations.
'''
repository = self.test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
strings_displayed = [ 'Uploaded filtering 1.1.0.', 'Uploaded filtering test data.',
'Added another line to filtering.py.' ]
strings_not_displayed = [ 'Added a line to filtering.py' ]
self.check_repository_changelog( repository, strings_displayed=strings_displayed, strings_not_displayed=[] )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_0310_hg_push_from_api.py
|
Python
|
gpl-3.0
| 8,930
|
[
"Galaxy"
] |
6d8f3ee716e5830a75f95b0967503f5665c8ba7916da84302dcbadf731f7827d
|
## \example atom/charmm_forcefield.py
# In this example, a PDB file is read in and scored using the CHARMM
# forcefield. For more control over the setup of the forcefield, see
# the 'charmm_forcefield_verbose.py' example.
from __future__ import print_function
import IMP.atom
import IMP.container
import sys
IMP.setup_from_argv(sys.argv, "CHARMM forcefield")
# Create an IMP model and add a heavy atom-only protein from a PDB file
m = IMP.Model()
prot = IMP.atom.read_pdb(IMP.atom.get_example_path("example_protein.pdb"), m,
IMP.atom.NonWaterNonHydrogenPDBSelector())
# Read in the CHARMM heavy atom topology and parameter files
ff = IMP.atom.get_heavy_atom_CHARMM_parameters()
# Using the CHARMM libraries, determine the ideal topology (atoms and their
# connectivity) for the PDB file's primary sequence
topology = ff.create_topology(prot)
# Typically this modifies the C and N termini of each chain in the protein by
# applying the CHARMM CTER and NTER patches. Patches can also be manually
# applied at this point, e.g. to add disulfide bridges.
topology.apply_default_patches()
# Make the PDB file conform with the topology; i.e. if it contains extra
# atoms that are not in the CHARMM topology file, remove them; if it is
# missing atoms (e.g. sidechains, hydrogens) that are in the CHARMM topology,
# add them and construct their Cartesian coordinates from internal coordinate
# information.
topology.setup_hierarchy(prot)
# Set up and evaluate the stereochemical part (bonds, angles, dihedrals,
# impropers) of the CHARMM forcefield
r = IMP.atom.CHARMMStereochemistryRestraint(prot, topology)
# Add non-bonded interaction (in this case, Lennard-Jones). This needs to
# know the radii and well depths for each atom, so add them from the forcefield
# (they can also be assigned manually using the XYZR or LennardJones
# decorators):
ff.add_radii(prot)
ff.add_well_depths(prot)
# Get a list of all atoms in the protein, and put it in a container
atoms = IMP.atom.get_by_type(prot, IMP.atom.ATOM_TYPE)
cont = IMP.container.ListSingletonContainer(m, atoms)
# Add a restraint for the Lennard-Jones interaction. This is built from
# a collection of building blocks. First, a ClosePairContainer maintains a list
# of all pairs of Particles that are close. Next, all 1-2, 1-3 and 1-4 pairs
# from the stereochemistry created above are filtered out.
# Then, a LennardJonesPairScore scores a pair of atoms with the Lennard-Jones
# potential. Finally, a PairsRestraint is used which simply applies the
# LennardJonesPairScore to each pair in the ClosePairContainer.
nbl = IMP.container.ClosePairContainer(cont, 4.0)
nbl.add_pair_filter(r.get_pair_filter())
sf = IMP.atom.ForceSwitch(6.0, 7.0)
ps = IMP.atom.LennardJonesPairScore(sf)
restraints = [r, IMP.container.PairsRestraint(ps, nbl)]
scoring_function = IMP.core.RestraintsScoringFunction(restraints)
# it gets awfully slow with internal checks
IMP.set_check_level(IMP.USAGE)
# Finally, evaluate the score of the whole system (without derivatives)
print(scoring_function.evaluate(False))
|
shanot/imp
|
modules/atom/examples/charmm_forcefield.py
|
Python
|
gpl-3.0
| 3,076
|
[
"CHARMM"
] |
4eab1b2263c4d19344ae7c9ba468d0cc73b0d334b75b5fb1cb149a91b0795591
|
"""
================================
Time-related feature engineering
================================
This notebook introduces different strategies to leverage time-related features
for a bike sharing demand regression task that is highly dependent on business
cycles (days, weeks, months) and yearly season cycles.
In the process, we introduce how to perform periodic feature engineering using
the :class:`sklearn.preprocessing.SplineTransformer` class and its
`extrapolation="periodic"` option.
"""
# %%
# Data exploration on the Bike Sharing Demand dataset
# ---------------------------------------------------
#
# We start by loading the data from the OpenML repository.
from sklearn.datasets import fetch_openml
bike_sharing = fetch_openml("Bike_Sharing_Demand", version=2, as_frame=True)
df = bike_sharing.frame
# %%
# To get a quick understanding of the periodic patterns of the data, let us
# have a look at the average demand per hour during a week.
#
# Note that the week starts on a Sunday, during the weekend. We can clearly
# distinguish the commute patterns in the morning and evenings of the work days
# and the leisure use of the bikes on the weekends with a more spread peak
# demand around the middle of the days:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 4))
average_week_demand = df.groupby(["weekday", "hour"]).mean()["count"]
average_week_demand.plot(ax=ax)
_ = ax.set(
title="Average hourly bike demand during the week",
xticks=[i * 24 for i in range(7)],
xticklabels=["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"],
xlabel="Time of the week",
ylabel="Number of bike rentals",
)
# %%
#
# The target of the prediction problem is the absolute count of bike rentals on
# a hourly basis:
df["count"].max()
# %% [markdown]
#
# Let us rescale the target variable (number of hourly bike rentals) to predict
# a relative demand so that the mean absolute error is more easily interpreted
# as a fraction of the maximum demand.
#
# .. note::
#
# The fit method of the models used in this notebook all minimize the
# mean squared error to estimate the conditional mean instead of the mean
# absolute error that would fit an estimator of the conditional median.
#
# When reporting performance measure on the test set in the discussion, we
# instead choose to focus on the mean absolute error that is more
# intuitive than the (root) mean squared error. Note, however, that the
# best models for one metric are also the best for the other in this
# study.
y = df["count"] / 1000
# %%
fig, ax = plt.subplots(figsize=(12, 4))
y.hist(bins=30, ax=ax)
_ = ax.set(
xlabel="Fraction of rented fleet demand",
ylabel="Number of hours",
)
# %%
# The input feature data frame is a time annotated hourly log of variables
# describing the weather conditions. It includes both numerical and categorical
# variables. Note that the time information has already been expanded into
# several complementary columns.
#
X = df.drop("count", axis="columns")
X
# %%
# .. note::
#
# If the time information was only present as a date or datetime column, we
# could have expanded it into hour-in-the-day, day-in-the-week,
# day-in-the-month, month-in-the-year using pandas:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#time-date-components
#
# We now introspect the distribution of the categorical variables, starting
# with `"weather"`:
#
X["weather"].value_counts()
# %%
# Since there are only 3 `"heavy_rain"` events, we cannot use this category to
# train machine learning models with cross validation. Instead, we simplify the
# representation by collapsing those into the `"rain"` category.
#
X["weather"].replace(to_replace="heavy_rain", value="rain", inplace=True)
# %%
X["weather"].value_counts()
# %%
# As expected, the `"season"` variable is well balanced:
#
X["season"].value_counts()
# %%
# Time-based cross-validation
# ---------------------------
#
# Since the dataset is a time-ordered event log (hourly demand), we will use a
# time-sensitive cross-validation splitter to evaluate our demand forecasting
# model as realistically as possible. We use a gap of 2 days between the train
# and test side of the splits. We also limit the training set size to make the
# performance of the CV folds more stable.
#
# 1000 test datapoints should be enough to quantify the performance of the
# model. This represents a bit less than a month and a half of contiguous test
# data:
from sklearn.model_selection import TimeSeriesSplit
ts_cv = TimeSeriesSplit(
n_splits=5,
gap=48,
max_train_size=10000,
test_size=1000,
)
# %%
# Let us manually inspect the various splits to check that the
# `TimeSeriesSplit` works as we expect, starting with the first split:
all_splits = list(ts_cv.split(X, y))
train_0, test_0 = all_splits[0]
# %%
X.iloc[test_0]
# %%
X.iloc[train_0]
# %%
# We now inspect the last split:
train_4, test_4 = all_splits[4]
# %%
X.iloc[test_4]
# %%
X.iloc[train_4]
# %%
# All is well. We are now ready to do some predictive modeling!
#
# Gradient Boosting
# -----------------
#
# Gradient Boosting Regression with decision trees is often flexible enough to
# efficiently handle heteorogenous tabular data with a mix of categorical and
# numerical features as long as the number of samples is large enough.
#
# Here, we do minimal ordinal encoding for the categorical variables and then
# let the model know that it should treat those as categorical variables by
# using a dedicated tree splitting rule. Since we use an ordinal encoder, we
# pass the list of categorical values explicitly to use a logical order when
# encoding the categories as integers instead of the lexicographical order.
# This also has the added benefit of preventing any issue with unknown
# categories when using cross-validation.
#
# The numerical variables need no preprocessing and, for the sake of simplicity,
# we only try the default hyper-parameters for this model:
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.model_selection import cross_validate
categorical_columns = [
"weather",
"season",
"holiday",
"workingday",
]
categories = [
["clear", "misty", "rain"],
["spring", "summer", "fall", "winter"],
["False", "True"],
["False", "True"],
]
ordinal_encoder = OrdinalEncoder(categories=categories)
gbrt_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", ordinal_encoder, categorical_columns),
],
remainder="passthrough",
),
HistGradientBoostingRegressor(
categorical_features=range(4),
),
)
# %%
#
# Lets evaluate our gradient boosting model with the mean absolute error of the
# relative demand averaged accross our 5 time-based cross-validation splits:
def evaluate(model, X, y, cv):
cv_results = cross_validate(
model,
X,
y,
cv=cv,
scoring=["neg_mean_absolute_error", "neg_root_mean_squared_error"],
)
mae = -cv_results["test_neg_mean_absolute_error"]
rmse = -cv_results["test_neg_root_mean_squared_error"]
print(
f"Mean Absolute Error: {mae.mean():.3f} +/- {mae.std():.3f}\n"
f"Root Mean Squared Error: {rmse.mean():.3f} +/- {rmse.std():.3f}"
)
evaluate(gbrt_pipeline, X, y, cv=ts_cv)
# %%
# This model has an average error around 4 to 5% of the maximum demand. This is
# quite good for a first trial without any hyper-parameter tuning! We just had
# to make the categorical variables explicit. Note that the time related
# features are passed as is, i.e. without processing them. But this is not much
# of a problem for tree-based models as they can learn a non-monotonic
# relationship between ordinal input features and the target.
#
# This is not the case for linear regression models as we will see in the
# following.
#
# Naive linear regression
# -----------------------
#
# As usual for linear models, categorical variables need to be one-hot encoded.
# For consistency, we scale the numerical features to the same 0-1 range using
# class:`sklearn.preprocessing.MinMaxScaler`, although in this case it does not
# impact the results much because they are already on comparable scales:
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import RidgeCV
import numpy as np
one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
alphas = np.logspace(-6, 6, 25)
naive_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(naive_linear_pipeline, X, y, cv=ts_cv)
# %%
#
# The performance is not good: the average error is around 14% of the maximum
# demand. This is more than three times higher than the average error of the
# gradient boosting model. We can suspect that the naive original encoding
# (merely min-max scaled) of the periodic time-related features might prevent
# the linear regression model to properly leverage the time information: linear
# regression does not automatically model non-monotonic relationships between
# the input features and the target. Non-linear terms have to be engineered in
# the input.
#
# For example, the raw numerical encoding of the `"hour"` feature prevents the
# linear model from recognizing that an increase of hour in the morning from 6
# to 8 should have a strong positive impact on the number of bike rentals while
# an increase of similar magnitude in the evening from 18 to 20 should have a
# strong negative impact on the predicted number of bike rentals.
#
# Time-steps as categories
# ------------------------
#
# Since the time features are encoded in a discrete manner using integers (24
# unique values in the "hours" feature), we could decide to treat those as
# categorical variables using a one-hot encoding and thereby ignore any
# assumption implied by the ordering of the hour values.
#
# Using one-hot encoding for the time features gives the linear model a lot
# more flexibility as we introduce one additional feature per discrete time
# level.
one_hot_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_linear_pipeline, X, y, cv=ts_cv)
# %%
# The average error rate of this model is 10% which is much better than using
# the original (ordinal) encoding of the time feature, confirming our intuition
# that the linear regression model benefits from the added flexibility to not
# treat time progression in a monotonic manner.
#
# However, this introduces a very large number of new features. If the time of
# the day was represented in minutes since the start of the day instead of
# hours, one-hot encoding would have introduced 1440 features instead of 24.
# This could cause some significant overfitting. To avoid this we could use
# :func:`sklearn.preprocessing.KBinsDiscretizer` instead to re-bin the number
# of levels of fine-grained ordinal or numerical variables while still
# benefitting from the non-monotonic expressivity advantages of one-hot
# encoding.
#
# Finally, we also observe that one-hot encoding completely ignores the
# ordering of the hour levels while this could be an interesting inductive bias
# to preserve to some level. In the following we try to explore smooth,
# non-monotonic encoding that locally preserves the relative ordering of time
# features.
#
# Trigonometric features
# ----------------------
#
# As a first attempt, we can try to encode each of those periodic features
# using a sine and cosine transformation with the matching period.
#
# Each ordinal time feature is transformed into 2 features that together encode
# equivalent information in a non-monotonic way, and more importantly without
# any jump between the first and the last value of the periodic range.
from sklearn.preprocessing import FunctionTransformer
def sin_transformer(period):
return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi))
def cos_transformer(period):
return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi))
# %%
#
# Let us visualize the effect of this feature expansion on some synthetic hour
# data with a bit of extrapolation beyond hour=23:
import pandas as pd
hour_df = pd.DataFrame(
np.arange(26).reshape(-1, 1),
columns=["hour"],
)
hour_df["hour_sin"] = sin_transformer(24).fit_transform(hour_df)["hour"]
hour_df["hour_cos"] = cos_transformer(24).fit_transform(hour_df)["hour"]
hour_df.plot(x="hour")
_ = plt.title("Trigonometric encoding for the 'hour' feature")
# %%
#
# Let's use a 2D scatter plot with the hours encoded as colors to better see
# how this representation maps the 24 hours of the day to a 2D space, akin to
# some sort of a 24 hour version of an analog clock. Note that the "25th" hour
# is mapped back to the 1st hour because of the periodic nature of the
# sine/cosine representation.
fig, ax = plt.subplots(figsize=(7, 5))
sp = ax.scatter(hour_df["hour_sin"], hour_df["hour_cos"], c=hour_df["hour"])
ax.set(
xlabel="sin(hour)",
ylabel="cos(hour)",
)
_ = fig.colorbar(sp)
# %%
#
# We can now build a feature extraction pipeline using this strategy:
cyclic_cossin_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("month_sin", sin_transformer(12), ["month"]),
("month_cos", cos_transformer(12), ["month"]),
("weekday_sin", sin_transformer(7), ["weekday"]),
("weekday_cos", cos_transformer(7), ["weekday"]),
("hour_sin", sin_transformer(24), ["hour"]),
("hour_cos", cos_transformer(24), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_cossin_linear_pipeline = make_pipeline(
cyclic_cossin_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_cossin_linear_pipeline, X, y, cv=ts_cv)
# %%
#
# The performance of our linear regression model with this simple feature
# engineering is a bit better than using the original ordinal time features but
# worse than using the one-hot encoded time features. We will further analyze
# possible reasons for this disappointing outcome at the end of this notebook.
#
# Periodic spline features
# ------------------------
#
# We can try an alternative encoding of the periodic time-related features
# using spline transformations with a large enough number of splines, and as a
# result a larger number of expanded features compared to the sine/cosine
# transformation:
from sklearn.preprocessing import SplineTransformer
def periodic_spline_transformer(period, n_splines=None, degree=3):
if n_splines is None:
n_splines = period
n_knots = n_splines + 1 # periodic and include_bias is True
return SplineTransformer(
degree=degree,
n_knots=n_knots,
knots=np.linspace(0, period, n_knots).reshape(n_knots, 1),
extrapolation="periodic",
include_bias=True,
)
# %%
#
# Again, let us visualize the effect of this feature expansion on some
# synthetic hour data with a bit of extrapolation beyond hour=23:
hour_df = pd.DataFrame(
np.linspace(0, 26, 1000).reshape(-1, 1),
columns=["hour"],
)
splines = periodic_spline_transformer(24, n_splines=12).fit_transform(hour_df)
splines_df = pd.DataFrame(
splines,
columns=[f"spline_{i}" for i in range(splines.shape[1])],
)
pd.concat([hour_df, splines_df], axis="columns").plot(x="hour", cmap=plt.cm.tab20b)
_ = plt.title("Periodic spline-based encoding for the 'hour' feature")
# %%
# Thanks to the use of the `extrapolation="periodic"` parameter, we observe
# that the feature encoding stays smooth when extrapolating beyond midnight.
#
# We can now build a predictive pipeline using this alternative periodic
# feature engineering strategy.
#
# It is possible to use fewer splines than discrete levels for those ordinal
# values. This makes spline-based encoding more efficient than one-hot encoding
# while preserving most of the expressivity:
cyclic_spline_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("cyclic_month", periodic_spline_transformer(12, n_splines=6), ["month"]),
("cyclic_weekday", periodic_spline_transformer(7, n_splines=3), ["weekday"]),
("cyclic_hour", periodic_spline_transformer(24, n_splines=12), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_spline_linear_pipeline = make_pipeline(
cyclic_spline_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_linear_pipeline, X, y, cv=ts_cv)
# %%
# Spline features make it possible for the linear model to successfully
# leverage the periodic time-related features and reduce the error from ~14% to
# ~10% of the maximum demand, which is similar to what we observed with the
# one-hot encoded features.
#
# Qualitative analysis of the impact of features on linear model predictions
# --------------------------------------------------------------------------
#
# Here, we want to visualize the impact of the feature engineering choices on
# the time related shape of the predictions.
#
# To do so we consider an arbitrary time-based split to compare the predictions
# on a range of held out data points.
naive_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
naive_linear_predictions = naive_linear_pipeline.predict(X.iloc[test_0])
one_hot_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_linear_predictions = one_hot_linear_pipeline.predict(X.iloc[test_0])
cyclic_cossin_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_cossin_linear_predictions = cyclic_cossin_linear_pipeline.predict(X.iloc[test_0])
cyclic_spline_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_linear_predictions = cyclic_spline_linear_pipeline.predict(X.iloc[test_0])
# %%
# We visualize those predictions by zooming on the last 96 hours (4 days) of
# the test set to get some qualitative insights:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by linear models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(naive_linear_predictions[last_hours], "x-", label="Ordinal time features")
ax.plot(
cyclic_cossin_linear_predictions[last_hours],
"x-",
label="Trigonometric time features",
)
ax.plot(
cyclic_spline_linear_predictions[last_hours],
"x-",
label="Spline-based time features",
)
ax.plot(
one_hot_linear_predictions[last_hours],
"x-",
label="One-hot time features",
)
_ = ax.legend()
# %%
# We can draw the following conclusions from the above plot:
#
# - The **raw ordinal time-related features** are problematic because they do
# not capture the natural periodicity: we observe a big jump in the
# predictions at the end of each day when the hour features goes from 23 back
# to 0. We can expect similar artifacts at the end of each week or each year.
#
# - As expected, the **trigonometric features** (sine and cosine) do not have
# these discontinuities at midnight, but the linear regression model fails to
# leverage those features to properly model intra-day variations.
# Using trigonometric features for higher harmonics or additional
# trigonometric features for the natural period with different phases could
# potentially fix this problem.
#
# - the **periodic spline-based features** fix those two problems at once: they
# give more expressivity to the linear model by making it possible to focus
# on specific hours thanks to the use of 12 splines. Furthermore the
# `extrapolation="periodic"` option enforces a smooth representation between
# `hour=23` and `hour=0`.
#
# - The **one-hot encoded features** behave similarly to the periodic
# spline-based features but are more spiky: for instance they can better
# model the morning peak during the week days since this peak lasts shorter
# than an hour. However, we will see in the following that what can be an
# advantage for linear models is not necessarily one for more expressive
# models.
# %%
# We can also compare the number of features extracted by each feature
# engineering pipeline:
naive_linear_pipeline[:-1].transform(X).shape
# %%
one_hot_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_cossin_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_spline_linear_pipeline[:-1].transform(X).shape
# %%
# This confirms that the one-hot encoding and the spline encoding strategies
# create a lot more features for the time representation than the alternatives,
# which in turn gives the downstream linear model more flexibility (degrees of
# freedom) to avoid underfitting.
#
# Finally, we observe that none of the linear models can approximate the true
# bike rentals demand, especially for the peaks that can be very sharp at rush
# hours during the working days but much flatter during the week-ends: the most
# accurate linear models based on splines or one-hot encoding tend to forecast
# peaks of commuting-related bike rentals even on the week-ends and
# under-estimate the commuting-related events during the working days.
#
# These systematic prediction errors reveal a form of under-fitting and can be
# explained by the lack of interactions terms between features, e.g.
# "workingday" and features derived from "hours". This issue will be addressed
# in the following section.
# %%
# Modeling pairwise interactions with splines and polynomial features
# -------------------------------------------------------------------
#
# Linear models do not automatically capture interaction effects between input
# features. It does not help that some features are marginally non-linear as is
# the case with features constructed by `SplineTransformer` (or one-hot
# encoding or binning).
#
# However, it is possible to use the `PolynomialFeatures` class on coarse
# grained spline encoded hours to model the "workingday"/"hours" interaction
# explicitly without introducing too many new variables:
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import FeatureUnion
hour_workday_interaction = make_pipeline(
ColumnTransformer(
[
("cyclic_hour", periodic_spline_transformer(24, n_splines=8), ["hour"]),
("workingday", FunctionTransformer(lambda x: x == "True"), ["workingday"]),
]
),
PolynomialFeatures(degree=2, interaction_only=True, include_bias=False),
)
# %%
# Those features are then combined with the ones already computed in the
# previous spline-base pipeline. We can observe a nice performance improvemnt
# by modeling this pairwise interaction explicitly:
cyclic_spline_interactions_pipeline = make_pipeline(
FeatureUnion(
[
("marginal", cyclic_spline_transformer),
("interactions", hour_workday_interaction),
]
),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_interactions_pipeline, X, y, cv=ts_cv)
# %%
# Modeling non-linear feature interactions with kernels
# -----------------------------------------------------
#
# The previous analysis highlighted the need to model the interactions between
# `"workingday"` and `"hours"`. Another example of a such a non-linear
# interaction that we would like to model could be the impact of the rain that
# might not be the same during the working days and the week-ends and holidays
# for instance.
#
# To model all such interactions, we could either use a polynomial expansion on
# all marginal features at once, after their spline-based expansion. However,
# this would create a quadratic number of features which can cause overfitting
# and computational tractability issues.
#
# Alternatively, we can use the Nyström method to compute an approximate
# polynomial kernel expansion. Let us try the latter:
from sklearn.kernel_approximation import Nystroem
cyclic_spline_poly_pipeline = make_pipeline(
cyclic_spline_transformer,
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_poly_pipeline, X, y, cv=ts_cv)
# %%
#
# We observe that this model can almost rival the performance of the gradient
# boosted trees with an average error around 6% of the maximum demand.
#
# Note that while the final step of this pipeline is a linear regression model,
# the intermediate steps such as the spline feature extraction and the Nyström
# kernel approximation are highly non-linear. As a result the compound pipeline
# is much more expressive than a simple linear regression model with raw features.
#
# For the sake of completeness, we also evaluate the combination of one-hot
# encoding and kernel approximation:
one_hot_poly_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder="passthrough",
),
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_poly_pipeline, X, y, cv=ts_cv)
# %%
# While one-hot encoded features were competitive with spline-based features
# when using linear models, this is no longer the case when using a low-rank
# approximation of a non-linear kernel: this can be explained by the fact that
# spline features are smoother and allow the kernel approximation to find a
# more expressive decision function.
#
# Let us now have a qualitative look at the predictions of the kernel models
# and of the gradient boosted trees that should be able to better model
# non-linear interactions between features:
gbrt_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
gbrt_predictions = gbrt_pipeline.predict(X.iloc[test_0])
one_hot_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_poly_predictions = one_hot_poly_pipeline.predict(X.iloc[test_0])
cyclic_spline_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_poly_predictions = cyclic_spline_poly_pipeline.predict(X.iloc[test_0])
# %%
# Again we zoom on the last 4 days of the test set:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by non-linear regression models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(
gbrt_predictions[last_hours],
"x-",
label="Gradient Boosted Trees",
)
ax.plot(
one_hot_poly_predictions[last_hours],
"x-",
label="One-hot + polynomial kernel",
)
ax.plot(
cyclic_spline_poly_predictions[last_hours],
"x-",
label="Splines + polynomial kernel",
)
_ = ax.legend()
# %%
# First, note that trees can naturally model non-linear feature interactions
# since, by default, decision trees are allowed to grow beyond a depth of 2
# levels.
#
# Here, we can observe that the combinations of spline features and non-linear
# kernels works quite well and can almost rival the accuracy of the gradient
# boosting regression trees.
#
# On the contrary, one-hot encoded time features do not perform that well with
# the low rank kernel model. In particular, they significantly over-estimate
# the low demand hours more than the competing models.
#
# We also observe that none of the models can successfully predict some of the
# peak rentals at the rush hours during the working days. It is possible that
# access to additional features would be required to further improve the
# accuracy of the predictions. For instance, it could be useful to have access
# to the geographical repartition of the fleet at any point in time or the
# fraction of bikes that are immobilized because they need servicing.
#
# Let us finally get a more quantative look at the prediction errors of those
# three models using the true vs predicted demand scatter plots:
fig, axes = plt.subplots(ncols=3, figsize=(12, 4), sharey=True)
fig.suptitle("Non-linear regression models")
predictions = [
one_hot_poly_predictions,
cyclic_spline_poly_predictions,
gbrt_predictions,
]
labels = [
"One hot + polynomial kernel",
"Splines + polynomial kernel",
"Gradient Boosted Trees",
]
for ax, pred, label in zip(axes, predictions, labels):
ax.scatter(y.iloc[test_0].values, pred, alpha=0.3, label=label)
ax.plot([0, 1], [0, 1], "--", label="Perfect model")
ax.set(
xlim=(0, 1),
ylim=(0, 1),
xlabel="True demand",
ylabel="Predicted demand",
)
ax.legend()
# %%
# This visualization confirms the conclusions we draw on the previous plot.
#
# All models under-estimate the high demand events (working day rush hours),
# but gradient boosting a bit less so. The low demand events are well predicted
# on average by gradient boosting while the one-hot polynomial regression
# pipeline seems to systematically over-estimate demand in that regime. Overall
# the predictions of the gradient boosted trees are closer to the diagonal than
# for the kernel models.
#
# Concluding remarks
# ------------------
#
# We note that we could have obtained slightly better results for kernel models
# by using more components (higher rank kernel approximation) at the cost of
# longer fit and prediction durations. For large values of `n_components`, the
# performance of the one-hot encoded features would even match the spline
# features.
#
# The `Nystroem` + `RidgeCV` regressor could also have been replaced by
# :class:`~sklearn.neural_network.MLPRegressor` with one or two hidden layers
# and we would have obtained quite similar results.
#
# The dataset we used in this case study is sampled on a hourly basis. However
# cyclic spline-based features could model time-within-day or time-within-week
# very efficiently with finer-grained time resolutions (for instance with
# measurements taken every minute instead of every hours) without introducing
# more features. One-hot encoding time representations would not offer this
# flexibility.
#
# Finally, in this notebook we used `RidgeCV` because it is very efficient from
# a computational point of view. However, it models the target variable as a
# Gaussian random variable with constant variance. For positive regression
# problems, it is likely that using a Poisson or Gamma distribution would make
# more sense. This could be achieved by using
# `GridSearchCV(TweedieRegressor(power=2), param_grid({"alpha": alphas}))`
# instead of `RidgeCV`.
|
shyamalschandra/scikit-learn
|
examples/applications/plot_cyclical_feature_engineering.py
|
Python
|
bsd-3-clause
| 31,010
|
[
"Gaussian"
] |
e2f06d5ca56ed3f7fffd316e65b8947786a599d466f110c44be333eb985e8ca0
|
# Copyright 2009-2012 Ram Rachum.
# This program is distributed under the MIT license.
'''
This module defines number-selecting scripts.
See their documentation for more information.
'''
from __future__ import with_statement
import re
import _ast
import os.path, sys
sys.path += [
os.path.dirname(__file__),
os.path.join(os.path.dirname(__file__), 'third_party.zip'),
]
import wingapi
import edit
import shared
number_pattern = re.compile(r'''-?(([0-9]+(\.[0-9]+)?)|(\.[0-9]+))''')
def _get_all_number_positions(editor):
text = shared.get_text(editor.GetDocument())
matches = tuple(number_pattern.finditer(text))
return tuple((match.start(), match.end()) for match in matches)
def _get_relevant_number_positions(editor, caret_position):
assert isinstance(editor, wingapi.CAPIEditor)
all_number_positions = _get_all_number_positions(editor)
last_start_position = last_end_position = None
for i, (start_position, end_position) in enumerate(all_number_positions):
if end_position >= caret_position:
next_start_position, next_end_position = \
start_position, end_position
break
else:
last_start_position, last_end_position = \
start_position, end_position
else:
next_start_position = next_end_position = None
return ((last_start_position, last_end_position),
(next_start_position, next_end_position))
def select_next_number(editor=wingapi.kArgEditor,
app=wingapi.kArgApplication):
'''
Select the next (or current) number in the document.
Suggested key combination: `Ctrl-0`
'''
assert isinstance(editor, wingapi.CAPIEditor)
document = editor.GetDocument()
caret_position = editor.GetSelection()[1] + 1
_, next_number_position = _get_relevant_number_positions(editor,
caret_position)
if next_number_position != (None, None):
app.ExecuteCommand('set-visit-history-anchor')
editor.SetSelection(*next_number_position)
def select_prev_number(editor=wingapi.kArgEditor,
app=wingapi.kArgApplication):
'''
Select the previous number in the document.
Suggested key combination: `Ctrl-9`
'''
assert isinstance(editor, wingapi.CAPIEditor)
document = editor.GetDocument()
#document_start = 0
#document_end = document.GetLength()
#selection_start, selection_end = editor.GetSelection()
#number_positions_in_document = get_number_positions_in_document()
caret_position = editor.GetSelection()[0]
prev_number_position, _ = _get_relevant_number_positions(editor,
caret_position)
if prev_number_position != (None, None):
app.ExecuteCommand('set-visit-history-anchor')
editor.SetSelection(*prev_number_position)
|
cool-RR/cute-wing-stuff
|
scripts/selecting_numbers.py
|
Python
|
mit
| 3,069
|
[
"VisIt"
] |
5caf89b15c88a3bb589470229e2489a652f8d29484aa84c9993ae2e8c2b791c9
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import codecs
import ntpath
import os
import posixpath
from unittest.case import expectedFailure
from unittest.case import skipIf
import commoncode.date
from commoncode.testcase import FileBasedTesting
from commoncode import filetype
from commoncode import fileutils
from commoncode.system import on_linux
from commoncode.system import on_mac
from commoncode.system import on_windows
import typecode.contenttype
from extractcode_assert_utils import check_files
from extractcode_assert_utils import check_size
from extractcode import all_kinds
from extractcode import archive
from extractcode import default_kinds
from extractcode.archive import get_best_handler
from extractcode import ExtractErrorFailedToExtract
from extractcode import libarchive2
from extractcode import sevenzip
from extractcode import tar
"""
For each archive type --when possible-- we are testing extraction of:
- basic, plain archive, no tricks
- with trailing data appended to archive
- broken, either truncated or with extra junk inserted
- with hardlinks and symlinks, either valid or broken when supported
- with hardlinks and symlinks loops (aka. tarbomb) when supported
- with FIFO, character, sparse and other special files when supported
- with relative paths pointing outside of the archive when supported
- with absolute paths when supported
- with invalid paths or mixed slash paths when supported
- with unicode or binary path names
- with duplicate names or paths when case is ignored
- password-protected when supported
"""
class TestSmokeTest(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_get_extractors(self):
test_data = [
('archive/zip/basic.zip', [archive.extract_zip]),
('archive/rar/basic.rar', [archive.extract_rar]),
('archive/deb/adduser_3.112ubuntu1_all.deb', [archive.extract_ar]),
('archive/cpio/elfinfo-1.0-1.fc9.src.cpio', [archive.extract_cpio]),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', [archive.extract_rpm, archive.extract_cpio]),
('archive/gzip/file_4.26-1.diff.gz', [archive.uncompress_gzip]),
('archive/ar/liby.a', [archive.extract_ar]),
('archive/bz2/single_file_not_tarred.bz2', [archive.uncompress_bzip2]),
('archive/tar/tarred.tar', [archive.extract_tar]),
('archive/tbz/tarred_bzipped.bz', [archive.uncompress_bzip2]),
('archive/tbz/tarred_bzipped.tar.bz2', [archive.extract_tar]),
('archive/tbz/tarred_bzipped.tbz', [archive.extract_tar]),
('archive/tgz/tarred_gzipped.gz', [archive.uncompress_gzip]),
('archive/tgz/tarred_gzipped.tar.gz', [archive.extract_tar]),
('archive/tgz/tarred_gzipped.tgz', [archive.extract_tar]),
('archive/7z/z.7z', [archive.extract_7z]),
('archive/Z/tr2tex.Z', [archive.extract_Z, ]),
('archive/Z/tkWWW-0.11.tar.Z', [archive.extract_Z, archive.extract_tar]),
('archive/xar/xar-1.4.xar', [archive.extract_xarpkg]),
]
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
extractors = archive.get_extractors(test_loc)
assert expected == extractors
def test_get_extractors_with_kinds(self):
test_data = [
('archive/deb/adduser_3.112ubuntu1_all.deb', []),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', []),
('archive/ar/liby.a', []),
('archive/tar/tarred.tar', [archive.extract_tar]),
('archive/tbz/tarred_bzipped.tar.bz2', []),
]
kinds = (archive.regular, archive.file_system, archive.docs)
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
extractors = archive.get_extractors(test_loc, kinds)
ft = typecode.contenttype.get_type(test_loc).filetype_file
mt = typecode.contenttype.get_type(test_loc).mimetype_file
fe = fileutils.file_extension(test_loc).lower()
msg = ('%(expected)r == %(extractors)r for %(test_file)s\n'
'with ft:%(ft)r, mt:%(mt)r, fe:%(fe)r' % locals())
assert expected == extractors, msg
def test_get_handlers(self):
test_data = [
('archive/deb/adduser_3.112ubuntu1_all.deb', ['Debian package']),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', ['RPM package']),
('archive/ar/liby.a', ['ar archive', 'Static Library']),
('archive/tar/tarred.tar', ['Tar', 'Ruby Gem package']),
('archive/tbz/tarred_bzipped.tar.bz2', ['bzip2', 'Tar bzip2']),
('archive/tbz/tarred_bzipped.bz', ['bzip2', 'Tar bzip2']),
('archive/tgz/tarred_gzipped.gz', ['Tar gzip', 'Gzip']),
]
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
handlers = archive.get_handlers(test_loc)
assert expected == [h[0].name for h in handlers]
def test_score_handlers(self):
test_data = [
('archive/deb/adduser_3.112ubuntu1_all.deb', [(31, 'Debian package')]),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', [(32, 'RPM package')]),
('archive/ar/liby.a', [(31, 'Static Library'), (17, 'ar archive')]),
('archive/tar/tarred.tar', [(29, 'Tar'), (19, 'Ruby Gem package')]),
('archive/tbz/tarred_bzipped.tar.bz2', [(30, 'Tar bzip2'), (29, 'bzip2')]),
('archive/tbz/tarred_bzipped.bz', [(29, 'bzip2'), (18, 'Tar bzip2')]),
('archive/tgz/tarred_gzipped.gz', [(29, 'Gzip'), (18, 'Tar gzip')]),
]
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
handlers = archive.get_handlers(test_loc)
scored = archive.score_handlers(handlers)
assert expected == sorted([(h[0], h[1].name) for h in scored], reverse=True)
def test_no_handler_is_selected_for_a_non_archive(self):
# failed because of libmagic bug: http://bugs.gw.com/view.php?id=467
# passing by introducing strict flag for handlers
test_loc = self.get_test_loc('archive/not_archive/hashfile')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=all_kinds)
assert not archive.should_extract(test_loc, kinds=default_kinds)
def test_no_handler_is_selected_for_a_non_archive2(self):
# FWIW there is a related libmagic bug: http://bugs.gw.com/view.php?id=473
test_loc = self.get_test_loc('archive/not_archive/wildtest.txt')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=all_kinds)
assert not archive.should_extract(test_loc, kinds=default_kinds)
def test_no_handler_is_selected_for_a_non_archive3(self):
test_loc = self.get_test_loc('archive/not_archive/savetransfer.c')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=all_kinds)
assert not archive.should_extract(test_loc, kinds=default_kinds)
def test_7zip_extract_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code
from os.path import dirname, join, abspath
import tempfile
import shutil
from extractcode.sevenzip import extract
test_file = self.get_test_loc('archive/relative_path/basic.zip')
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'basic.zip')
result = list(extract(test_src_file, test_tgt_dir))
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_tgt_dir, expected)
def test_libarchive_extract_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code
from os.path import dirname, join, abspath
import tempfile
import shutil
from extractcode.libarchive2 import extract
test_file = self.get_test_loc('archive/relative_path/basic.zip')
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'basic.zip')
result = list(extract(test_src_file, test_tgt_dir))
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_tgt_dir, expected)
def test_windows_media_player_skins_are_zip(self):
test_file = self.get_test_loc('archive/wmz/Go.wmz')
extractors = archive.get_extractors(test_file)
assert [archive.extract_zip] == extractors
def test_windows_ntfs_wmz_are_sometimes_gzip(self):
test_file = self.get_test_loc('archive/wmz/image003.wmz')
extractors = archive.get_extractors(test_file)
assert [archive.uncompress_gzip] == extractors
class BaseArchiveTestCase(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def check_extract(self, test_function, test_file, expected, expected_warnings=None, check_all=False):
"""
Run the extraction `test_function` on `test_file` checking that a map of
expected paths --> size exist in the extracted target directory.
Does not test the presence of all files unless `check_all` is True.
"""
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
if expected_warnings is not None:
assert expected_warnings == warnings
if check_all:
len_test_dir = len(test_dir)
extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.file_iter(test_dir)}
expected = {os.path.join(test_dir, exp_path): exp_size for exp_path, exp_size in expected.items()}
assert sorted(expected.items()) == sorted(extracted.items())
else:
for exp_path, exp_size in expected.items():
exp_loc = os.path.join(test_dir, exp_path)
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to find expected path: %(exp_loc)s'''
assert os.path.exists(exp_loc), msg % locals()
if exp_size is not None:
res_size = os.stat(exp_loc).st_size
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to assert the correct size %(exp_size)d
Got instead: %(res_size)d
for expected path: %(exp_loc)s'''
assert exp_size == res_size, msg % locals()
def collect_extracted_path(self, test_dir):
result = []
td = fileutils.as_posixpath(test_dir)
for t, dirs, files in os.walk(test_dir):
t = fileutils.as_posixpath(t)
for d in dirs:
nd = posixpath.join(t, d).replace(td, '') + '/'
result.append(nd)
for f in files:
nf = posixpath.join(t, f).replace(td, '')
result.append(nf)
result = sorted(result)
return result
def assertExceptionContains(self, text, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except Exception, e:
if text not in str(e):
raise self.failureException(
'Exception %(e)r raised, '
'it should contain the text %(text)r '
'and does not' % locals())
else:
raise self.failureException(
'Exception containing %(text)r not raised' % locals())
class TestTarGzip(BaseArchiveTestCase):
def test_extract_targz_basic(self):
test_file = self.get_test_loc('archive/tgz/tarred_gzipped.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_targz_with_trailing_data(self):
test_file = self.get_test_loc('archive/tgz/trailing.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_targz_broken(self):
test_file = self.get_test_loc('archive/tgz/broken.tar.gz')
test_dir = self.get_temp_dir()
expected = Exception("'Unrecognized archive format'")
self.assertRaisesInstance(expected, archive.extract_tar, test_file, test_dir)
def test_extract_targz_with_absolute_path(self):
non_result = '/tmp/subdir'
assert not os.path.exists(non_result)
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tgz/absolute_path.tar.gz')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_targz_with_relative_path(self):
test_file = self.get_test_loc('archive/tgz/relative.tar.gz')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w:gz")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
expected = [
'dotdot/dotdot/another_folder/b_two_root.txt',
'dotdot/a_parent_folder.txt',
'dotdot/folder/subfolder/b_subfolder.txt'
]
check_files(test_dir, expected)
def test_extract_targz_with_trailing_data2(self):
test_dir1 = self.get_temp_dir()
test_file = self.get_test_loc('archive/tgz/trailing2.tar.gz')
archive.extract_tar(test_file, test_dir1)
test_dir2 = self.get_temp_dir()
test_file2 = self.get_test_loc('archive/tgz/no_trailing.tar.gz')
archive.extract_tar(test_file2, test_dir2)
assert commoncode.testcase.is_same(test_dir1, test_dir2)
def test_extract_targz_with_mixed_case_and_symlink(self):
test_file = self.get_test_loc('archive/tgz/mixed_case_and_symlink.tgz')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
assert [] == result
import json
exp_file = self.get_test_loc('archive/tgz/mixed_case_and_symlink.tgz.expected')
with codecs.open(exp_file, encoding='utf-8') as ef:
expected_files = json.load(ef)
check_files(test_dir, map(str, expected_files))
def test_extract_targz_symlinks(self):
test_file = self.get_test_loc('archive/tgz/symlink.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
expected = [
'z/x/a',
# 'z/y/a': this is a symlink which is skipped by design
]
check_files(test_dir, expected)
def test_extract_targz_from_apache_should_not_return_errors(self):
# from http://archive.apache.org/dist/commons/logging/source/commons-logging-1.1.2-src.tar.gz
# failed with ReadError('not a bzip2 file',)
test_file = self.get_test_loc('archive/tgz/commons-logging-1.1.2-src.tar.gz')
test_dir = self.get_temp_dir()
extractor = archive.get_extractor(test_file)
assert archive.extract_tar == extractor
result = archive.extract_tar(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
def test_extract_targz_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/tgz/tgz_unicode.tgz')
test_dir = self.get_temp_dir()
extractor = archive.get_extractor(test_file)
assert archive.extract_tar == extractor
result = archive.extract_tar(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
class TestGzip(BaseArchiveTestCase):
def test_uncompress_gzip_basic(self):
test_file = self.get_test_loc('archive/gzip/file_4.26-1.diff.gz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'file_4.26-1.diff.gz-extract')
assert os.path.exists(result)
def test_uncompress_concatenated_gzip(self):
# Archive created with:
# echo "f1content" > f1
# echo "f2content" > f2
# gzip -k f1
# gzip -k -c f2 >> twofiles.gz
test_file = self.get_test_loc('archive/gzip/twofiles.gz')
test_dir = self.get_temp_dir()
warnings = archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'twofiles.gz-extract')
assert os.path.exists(result)
assert 'f1content\nf2content\n' == open(result, 'rb').read()
assert [] == warnings
def test_uncompress_gzip_with_trailing_data(self):
test_file = self.get_test_loc('archive/gzip/trailing_data.gz')
test_dir = self.get_temp_dir()
warnings = archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'trailing_data.gz-extract')
assert os.path.exists(result)
assert [] == warnings
def test_uncompress_gzip_with_leading_data(self):
# even though we do not fail when there is invalid trailing data we
# should still fail on invalid leading data
test_file = self.get_test_loc('archive/gzip/leading_data.gz')
test_dir = self.get_temp_dir()
expected = Exception('Not a gzipped file')
self.assertRaisesInstance(expected, archive.uncompress_gzip, test_file, test_dir)
def test_uncompress_gzip_with_random_data(self):
test_file = self.get_test_loc('archive/gzip/random_binary.data')
test_dir = self.get_temp_dir()
expected = Exception('Not a gzipped file')
self.assertRaisesInstance(expected, archive.uncompress_gzip, test_file, test_dir)
def test_uncompress_gzip_with_backslash_in_path(self):
# weirdly enough, gzip keeps the original path/name
test_file = self.get_test_loc('archive/gzip/backslash_path.gz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'backslash_path.gz-extract')
assert os.path.exists(result)
def test_uncompress_gzip_can_uncompress_windows_ntfs_wmz(self):
test_file = self.get_test_loc('archive/wmz/image003.wmz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
print(os.listdir(test_dir))
result = os.path.join(test_dir, 'image003.wmz-extract')
assert os.path.exists(result)
class TestTarBz2(BaseArchiveTestCase):
def test_extract_tar_bz2_basic(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_basic_bz(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped.bz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_with_trailing_data__and_wrong_extension(self):
test_file = self.get_test_loc('archive/tbz/single_file_trailing_data.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_broken(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped_broken.tar.bz2')
test_dir = self.get_temp_dir()
expected = Exception("'bzip decompression failed'")
self.assertRaisesInstance(expected, archive.extract_tar, test_file, test_dir)
def test_extract_tar_bz2_absolute_path(self):
assert not os.path.exists('/tmp/subdir')
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tbz/absolute_path.tar.bz2')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_relative_path(self):
test_file = self.get_test_loc('archive/tbz/bz2withtar_relative.tar.bz2')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w:bz")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'dotdot/folder/subfolder/b_subfolder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot', 'a_parent_folder.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_iproute(self):
test_file = self.get_test_loc('archive/tbz/iproute2.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'iproute2/README')
assert os.path.exists(result)
def test_extract_tar_bz2_multistream(self):
test_file = self.get_test_loc('archive/tbz/bzip2_multistream/example-file.csv.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
expected = self.get_test_loc('archive/tbz/bzip2_multistream/example-file.csv')
result = os.path.join(test_dir, 'example-file.csv')
assert open(expected, 'rb').read() == open(result, 'rb').read()
class TestBz2(BaseArchiveTestCase):
def test_uncompress_bzip2_basic(self):
test_file = self.get_test_loc('archive/bz2/single_file_not_tarred.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'single_file_not_tarred.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_with_trailing_data(self):
test_file = self.get_test_loc('archive/bz2/single_file_trailing_data.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'single_file_trailing_data.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_broken(self):
test_file = self.get_test_loc('archive/bz2/bz2_not_tarred_broken.bz2')
test_dir = self.get_temp_dir()
expected = Exception('invalid data stream')
self.assertRaisesInstance(expected, archive.uncompress_bzip2,
test_file, test_dir)
def test_uncompress_bzip2_with_invalid_path(self):
test_file = self.get_test_loc('archive/bz2/bz_invalidpath.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'bz_invalidpath.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_multistream(self):
test_file = self.get_test_loc('archive/bz2/bzip2_multistream/example-file.csv.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
expected = self.get_test_loc('archive/bz2/bzip2_multistream/expected.csv')
result = os.path.join(test_dir, 'example-file.csv.bz2-extract')
assert open(expected, 'rb').read() == open(result, 'rb').read()
def test_sevenzip_extract_can_handle_bz2_multistream_differently(self):
test_file = self.get_test_loc('archive/bz2/bzip2_multistream/example-file.csv.bz2')
test_dir = self.get_temp_dir()
sevenzip.extract(test_file, test_dir)
expected = self.get_test_loc('archive/bz2/bzip2_multistream/expected.csv')
# the extraction dir is not created with suffix by z7
result = os.path.join(test_dir, 'example-file.csv')
expected_extracted = open(expected, 'rb').read()
expected_result = open(result, 'rb').read()
assert expected_extracted == expected_result
class TestZip(BaseArchiveTestCase):
def test_extract_zip_basic(self):
test_file = self.get_test_loc('archive/zip/basic.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_dir, expected)
def test_extract_zip_broken(self):
test_file = self.get_test_loc('archive/zip/zip_broken.zip')
test_dir = self.get_temp_dir()
self.assertRaises(Exception, archive.extract_zip, test_file, test_dir)
# note: broken zip opens and extracts with 7z with exceptions sometimes
# something is extracted in latest 7z
# result = os.path.join(test_dir, 'a.txt')
# print(test_dir)
# assert os.path.exists(result)
def test_extract_zip_with_invalid_path(self):
test_file = self.get_test_loc('archive/zip/zip_invalidpath.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
@expectedFailure
def test_extract_zip_with_trailing_data(self):
test_file = self.get_test_loc('archive/zip/zip_trailing_data.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except libarchive2.ArchiveError, ae:
assert 'Invalid central directory signature' in str(ae)
# fails because of https://github.com/libarchive/libarchive/issues/545
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
@expectedFailure
def test_extract_zip_with_trailing_data2(self):
# test archive created on cygwin with:
# $ echo "test content" > f1
# $ zip test f1
# $ echo "some junk" >> test.zip
test_file = self.get_test_loc('archive/zip/zip_trailing_data2.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except libarchive2.ArchiveError, ae:
assert 'Invalid central directory signature' in str(ae)
# fails because of https://github.com/libarchive/libarchive/issues/545
result = os.path.join(test_dir, 'f1')
assert os.path.exists(result)
def test_extract_zip_with_relative_path_simple(self):
# The test files for this test and the next one were created with:
# from zipfile import ZipFile
# f = open('/tmp/a.txt', 'w')
# f.write('some data')
# f.close()
# f = open('/tmp/b.txt', 'w')
# f.write('some data')
# f.close()
# f = ZipFile(os.path.join(self.get_test_loc('archive'), 'relative_parent_folders.zip'), 'w')
# f.write('/tmp/a.txt', '../a_parent_folder.txt')
# f.write('/tmp/b.txt', '../../another_folder/b_two_root.txt')
# f.write('/tmp/b.txt', '../folder/subfolder/b_subfolder.txt')
# f.close()
# f = ZipFile(os.path.join(self.get_test_loc('archive'), 'high_ancest.zip'), 'w')
# f.write('/tmp/a.txt', ('../' * 12) + 'a_parent_folder.txt')
# f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 6) + 'a_parent_folder_in_sub_1.txt')
# f.write('/tmp/a.txt', ('../' * 6) + ('sub/' * 12) + 'a_parent_folder_in_sub_2.txt')
# f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 12) + 'a_parent_folder_in_sub_3.txt')
# f.close()
test_file = self.get_test_loc('archive/zip/relative_parent_folders.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
abs_path = os.path.join(test_dir , '../a_parent_folder.txt')
assert not os.path.exists(abs_path)
result = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/',
'/dotdot/dotdot/another_folder/',
'/dotdot/dotdot/another_folder/b_two_root.txt',
'/dotdot/folder/',
'/dotdot/folder/subfolder/',
'/dotdot/folder/subfolder/b_subfolder.txt'
]
assert expected == result
def test_extract_zip_with_relative_path_deeply_nested(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_1.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_3.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_2.txt'
]
assert expected == result
def test_extract_zip_with_password(self):
test_file = self.get_test_loc('archive/zip/zip_password_nexb.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except Exception, e:
assert isinstance(e, libarchive2.ArchiveError)
assert 'Encrypted file is unsupported' in str(e)
# self.assertRaisesI(libarchive2.ArchiveError, archive.extract_zip, test_file, test_dir)
def test_extract_zip_java_jar(self):
test_file = self.get_test_loc('archive/zip/jar/simple.jar')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/META-INF/',
'/META-INF/MANIFEST.MF',
'/org/',
'/org/jvnet/',
'/org/jvnet/glassfish/',
'/org/jvnet/glassfish/comms/',
'/org/jvnet/glassfish/comms/sipagent/',
'/org/jvnet/glassfish/comms/sipagent/actions/',
'/org/jvnet/glassfish/comms/sipagent/actions/Bundle.properties',
'/org/jvnet/glassfish/comms/sipagent/actions/SipAgentCookieAction.class',
'/org/jvnet/glassfish/comms/sipagent/actions/bd.png',
'/org/jvnet/glassfish/comms/sipagent/actions/bd24.png',
'/org/jvnet/glassfish/comms/sipagent/org-jvnet-glassfish-comms-sipagent-actions-SipAgentCookieAction.instance',
'/org/jvnet/glassfish/comms/sipagent/org-jvnet-glassfish-comms-sipagent-actions-SipAgentCookieAction_1.instance'
]
assert sorted(expected) == sorted(extracted)
def test_extract_zip_with_duplicated_lowercase_paths(self):
test_file = self.get_test_loc('archive/zip/dup_names.zip')
expected = {'META-INF/license/': None, # a directory
'META-INF/license/LICENSE.base64.txt': 1618,
'META-INF/LICENSE_1': 11366}
self.check_extract(archive.extract_zip, test_file, expected)
def test_extract_zip_with_timezone(self):
test_file = self.get_test_loc('archive/zip/timezone/c.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
expected = [
(os.path.join(test_dir, 'c/a/a.txt'), '2008-07-29'),
(os.path.join(test_dir, 'c/b/a.txt'), '2008-07-29'),
(os.path.join(test_dir, 'c/c/a.txt'), '2008-07-29'),
]
# DST sends a monkey wrench.... so we only test the date, not the time
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_zip_with_timezone_2(self):
test_file = self.get_test_loc('archive/zip/timezone/projecttest.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
# DST sends a monkey wrench.... so we only test the date, not the time
# and we accept some varation in the date ...
expected = [
(os.path.join(test_dir, 'primes.txt'), ('2009-12-05', '2009-12-06',)),
(os.path.join(test_dir, 'primes2.txt'), ('2009-12-05', '2009-12-06',))
]
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_zip_with_backslash_in_path_1(self):
test_file = self.get_test_loc('archive/zip/backslash/backslash1.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
# Info-ZIP 'zip' displays:
# warning: booxw-1202-bin.distribution.zip appears to use
# backslashes as path separators (which is the right thing to do)
expected = ['scripts/AutomaticClose.int']
check_files(test_dir, expected)
result = os.path.join(test_dir, 'scripts/AutomaticClose.int')
assert os.path.exists(result)
def test_extract_zip_with_backslash_in_path_2(self):
test_file = self.get_test_loc('archive/zip/backslash/AspectJTest.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
expected = '''
AspectJTest/.classpath
AspectJTest/.project
AspectJTest/src/META-INF/aop.xml
AspectJTest/src/p3/ExpertFlyable.java
AspectJTest/src/p3/MakeFlyableAspect.java
AspectJTest/src/p3/Flyable.java
AspectJTest/src/p3/MakeFlyable.java
AspectJTest/src/p3/Main2.java
AspectJTest/src/p3/p4/Person.java
AspectJTest/src/p2/MyLoggingAspect.java
AspectJTest/src/p1/MyService.java
AspectJTest/src/p1/Main1.java
AspectJTest/bin/META-INF/aop.xml
AspectJTest/bin/p3/MakeFlyableAspect.class
AspectJTest/bin/p3/ExpertFlyable.class
AspectJTest/bin/p3/Flyable.class
AspectJTest/bin/p3/Main2.class
AspectJTest/bin/p3/MakeFlyable.class
AspectJTest/bin/p3/p4/Person.class
AspectJTest/bin/p2/MyLoggingAspect.class
AspectJTest/bin/p1/Main1.class
AspectJTest/bin/p1/MyService.class
'''.split()
check_files(test_dir, expected)
def test_extract_zip_with_backslash_in_path_3(self):
test_file = self.get_test_loc('archive/zip/backslash/boo-0.3-src.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
print()
map(print, fileutils.file_iter(test_dir))
result = os.path.join(test_dir, 'src/Boo.Lang.Compiler/TypeSystem/InternalCallableType.cs')
assert os.path.exists(result)
def test_get_best_handler_nuget_is_selected_over_zip(self):
test_file = self.get_test_loc('archive/zip/moq.4.2.1507.118.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_get_best_handler_nuget_is_selected_over_zip2(self):
test_file = self.get_test_loc('archive/zip/exceptionhero.javascript.1.0.5.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_get_best_handler_nuget_is_selected_over_zip3(self):
test_file = self.get_test_loc('archive/zip/javascript-fastclass.1.1.729.121805.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_extract_zip_can_extract_windows_media_player_skins(self):
test_file = self.get_test_loc('archive/wmz/Go.wmz')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['32px.png', 'go.js', 'go.wms']
check_files(test_dir, expected)
def test_extract_zip_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/zip/zip_unicode.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
class TestLibarch(BaseArchiveTestCase):
def test_extract_zip_with_relative_path_libarchive(self):
test_file = self.get_test_loc('archive/zip/relative_parent_folders.zip')
test_dir = self.get_temp_dir()
result = libarchive2.extract(test_file, test_dir)
assert [] == result
abs_path = os.path.join(test_dir , '../a_parent_folder.txt')
assert not os.path.exists(abs_path)
result = os.path.join(test_dir, 'dotdot/folder/subfolder/b_subfolder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot/a_parent_folder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot/dotdot/another_folder/b_two_root.txt')
assert os.path.exists(result)
class TestTar(BaseArchiveTestCase):
def test_extract_tar_basic(self):
test_file = self.get_test_loc('archive/tar/tarred.tar')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_broken(self):
test_file = self.get_test_loc('archive/tar/tarred_broken.tar')
test_dir = self.get_temp_dir()
expected = Exception("'Unrecognized archive format'")
self.assertRaisesInstance(expected, archive.extract_tar,
test_file, test_dir)
def test_extract_tar_absolute_path(self):
non_result = '/home/li/Desktop/absolute_folder'
assert not os.path.exists(non_result)
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tar/tar_absolute.tar')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'home/li/Desktop/absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_tar_with_absolute_path2(self):
assert not os.path.exists('/tmp/subdir')
test_file = self.get_test_loc('archive/tar/absolute_path.tar')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_tar_with_relative_path(self):
test_file = self.get_test_loc('archive/tar/tar_relative.tar')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.abspath(test_file + '/../a_parent_folder.txt')
assert not os.path.exists(non_result)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/another_folder/',
'/dotdot/dotdot/another_folder/b_two_root.txt',
'/dotdot/folder/',
'/dotdot/folder/subfolder/',
'/dotdot/folder/subfolder/b_subfolder.txt'
]
assert sorted(expected) == sorted(extracted)
def test_extract_tar_archive_with_special_files(self):
test_file = self.get_test_loc('archive/tar/special.tar')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
expected = [
'0-REGTYPE',
'0-REGTYPE-TEXT',
'0-REGTYPE-VEEEERY_LONG_NAME_____________________________________________________________________________________________________________________155',
# '1-LNKTYPE', links are skipped
'S-SPARSE',
'S-SPARSE-WITH-NULLS',
]
check_files(test_dir, expected)
assert [] == result
@skipIf(on_windows, 'Long paths are not handled well yet on windows')
def test_extract_python_testtar_tar_archive_with_special_files(self):
test_file = self.get_test_loc('archive/tar/testtar.tar')
# this is from:
# https://hg.python.org/cpython/raw-file/bff88c866886/Lib/test/testtar.tar
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
expected_warnings = ["pax/regtype4: Pathname can't be converted from UTF-8 to current locale."]
assert sorted(expected_warnings) == sorted(result)
expected = [
'gnu/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/longname',
'gnu/regtype-gnu-uid',
'gnu/sparse',
'gnu/sparse-0.0',
'gnu/sparse-0.1',
'gnu/sparse-1.0',
'misc/eof',
'misc/regtype-hpux-signed-chksum-AOUaouss',
'misc/regtype-old-v7',
'misc/regtype-old-v7-signed-chksum-AOUaouss',
'misc/regtype-suntar',
'misc/regtype-xstar',
'pax/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/longname',
'pax/hdrcharset-aou',
'pax/regtype1',
'pax/regtype2',
'pax/regtype3',
'pax/regtype4',
'pax/regtype4_1',
'pax/umlauts-AOUaouss',
'ustar/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/1234567/longname',
'ustar/conttype',
'ustar/linktest1/regtype',
'ustar/regtype',
'ustar/sparse',
'ustar/umlauts-AOUaouss'
]
check_files(test_dir, expected)
class TestDebian(BaseArchiveTestCase):
def test_extract_deb_package_1(self):
test_file = self.get_test_loc('archive/deb/adduser_3.112ubuntu1_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(110198, os.path.join(test_dir, 'data.tar.gz'))
def test_extract_deb_package_2(self):
test_file = self.get_test_loc('archive/deb/adduser_3.113+nmu3ubuntu3_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(158441, os.path.join(test_dir, 'data.tar.gz'))
def test_get_best_handler_deb_package_is_an_archive(self):
test_file = self.get_test_loc('archive/deb/libjama-dev_1.2.4-2_all.deb')
handler = get_best_handler(test_file)
assert archive.DebHandler == handler
def test_extract_deb_package_3(self):
test_file = self.get_test_loc('archive/deb/wget-el_0.5.0-8_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(36376, os.path.join(test_dir, 'data.tar.gz'))
class TestAr(BaseArchiveTestCase):
def test_extract_ar_basic_7z(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['1.txt', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_basic(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_verify_dates(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
expected = [
(os.path.join(test_dir, 'main.o'), '2007-06-12'),
(os.path.join(test_dir, 'yyerror.o'), '2007-06-12'),
]
# DST sends a monkey wrench.... so we only test the date, not the time
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_ar_broken_7z(self):
test_file = self.get_test_loc('archive/ar/liby-corrupted.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_broken(self):
test_file = self.get_test_loc('archive/ar/liby-corrupted.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = [
'__.SYMDEF',
'main.o',
'main_1.o',
'main_10.o',
'main_11.o',
'main_2.o',
'main_3.o',
'main_4.o',
'main_5.o',
'main_6.o',
'main_7.o',
'main_8.o',
'main_9.o'
]
check_files(test_dir, expected)
assert ['main.o: Incorrect file header signature'] == result
def test_extract_ar_with_invalid_path(self):
test_file = self.get_test_loc('archive/ar/ar_invalidpath.ar')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['this/that']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_relative_path_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/htmlhelp.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = [
'1.txt',
'2.txt',
'release/init.obj'
]
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_relative_path_libarch(self):
test_file = self.get_test_loc('archive/ar/winlib/htmlhelp.lib')
test_dir = self.get_temp_dir()
result = archive.libarchive2.extract(test_file, test_dir)
expected_warns = [
'/: Invalid string table',
"/: Invalid string table\nCan't find long filename for entry"
]
assert expected_warns == result
# inccorrect for now: need this: ['__.SYMDEF', 'release/init.obj']
expected = ['dot', 'dot_1', 'dot_2', 'dot_3']
check_files(test_dir, expected)
def test_extract_ar_with_relative_path_and_backslashes_in_names_libarch(self):
test_file = self.get_test_loc('archive/ar/winlib/freetype.lib')
test_dir = self.get_temp_dir()
result = archive.libarchive2.extract(test_file, test_dir)
expected_warns = [
'/: Invalid string table',
"/: Invalid string table\nCan't find long filename for entry"
]
assert expected_warns == result
# 7zip is better, but has a security bug for now
expected = [
'dot',
'dot_1',
'dot_10',
'dot_11',
'dot_12',
'dot_13',
'dot_14',
'dot_15',
'dot_16',
'dot_17',
'dot_18',
'dot_19',
'dot_2',
'dot_20',
'dot_21',
'dot_22',
'dot_23',
'dot_24',
'dot_25',
'dot_26',
'dot_27',
'dot_28',
'dot_29',
'dot_3',
'dot_30',
'dot_31',
'dot_32',
'dot_33',
'dot_34',
'dot_35',
'dot_4',
'dot_5',
'dot_6',
'dot_7',
'dot_8',
'dot_9'
]
check_files(test_dir, expected)
def test_extract_ar_with_relative_path_and_backslashes_in_names_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/freetype.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
expected = [
'1.txt',
'2.txt',
'objs/debug_mt/autofit.obj',
'objs/debug_mt/bdf.obj',
'objs/debug_mt/cff.obj',
'objs/debug_mt/ftbase.obj',
'objs/debug_mt/ftbbox.obj',
'objs/debug_mt/ftbitmap.obj',
'objs/debug_mt/ftcache.obj',
'objs/debug_mt/ftdebug.obj',
'objs/debug_mt/ftgasp.obj',
'objs/debug_mt/ftglyph.obj',
'objs/debug_mt/ftgzip.obj',
'objs/debug_mt/ftinit.obj',
'objs/debug_mt/ftlzw.obj',
'objs/debug_mt/ftmm.obj',
'objs/debug_mt/ftpfr.obj',
'objs/debug_mt/ftstroke.obj',
'objs/debug_mt/ftsynth.obj',
'objs/debug_mt/ftsystem.obj',
'objs/debug_mt/fttype1.obj',
'objs/debug_mt/ftwinfnt.obj',
'objs/debug_mt/pcf.obj',
'objs/debug_mt/pfr.obj',
'objs/debug_mt/psaux.obj',
'objs/debug_mt/pshinter.obj',
'objs/debug_mt/psmodule.obj',
'objs/debug_mt/raster.obj',
'objs/debug_mt/sfnt.obj',
'objs/debug_mt/smooth.obj',
'objs/debug_mt/truetype.obj',
'objs/debug_mt/type1.obj',
'objs/debug_mt/type1cid.obj',
'objs/debug_mt/type42.obj',
'objs/debug_mt/winfnt.obj'
]
check_files(test_dir, expected)
def test_extract_ar_static_library_does_not_delete_symdefs_7z(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
# the symdef file is 1.txt with 7z
expected = ['1.txt', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_static_library_does_not_delete_symdefs(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_trailing_data(self):
test_file = self.get_test_loc('archive/ar/ar_trailing.a')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
result = os.path.join(test_dir, 'main.o')
assert os.path.exists(result)
result = os.path.join(test_dir, 'yyerror.o')
assert os.path.exists(result)
def test_extract_ar_with_permissions_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/zlib.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['1.txt', '1.zlib.pyd', '2.txt', '2.zlib.pyd', '3.zlib.pyd', '4.zlib.pyd']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_permissions(self):
# this behavior is not correct: 7z is better, but has security flaws for now
test_file = self.get_test_loc('archive/ar/winlib/zlib.lib')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
assert [] == result
expected = ['dot', 'dot_1']
check_files(test_dir, expected)
class TestCpio(BaseArchiveTestCase):
def test_extract_cpio_basic(self):
test_file = self.get_test_loc('archive/cpio/elfinfo-1.0-1.fc9.src.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0.tar.gz')
assert os.path.exists(result)
def test_extract_cpio_with_trailing_data(self):
test_file = self.get_test_loc('archive/cpio/cpio_trailing.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0.tar.gz')
assert os.path.exists(result)
def test_extract_cpio_broken_7z(self):
test_file = self.get_test_loc('archive/cpio/cpio_broken.cpio')
test_dir = self.get_temp_dir()
self.assertRaisesInstance(Exception('No error returned'), sevenzip.extract, test_file, test_dir)
def test_extract_cpio_broken2(self):
test_file = self.get_test_loc('archive/cpio/cpio_broken.cpio')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
assert ['elfinfo-1.0.tar.gz', 'elfinfo-1_1.0.tar.gz'] == sorted(os.listdir(test_dir))
assert ['elfinfo-1.0.tar.gz: Skipped 72 bytes before finding valid header'] == result
def test_extract_cpio_with_absolute_path(self):
assert not os.path.exists('/tmp/subdir')
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/cpio/cpio_absolute.cpio')
archive.extract_cpio(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'home/li/Desktop/absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_cpio_with_relative_path(self):
# test file is created by cmd: find ../.. - |cpio -ov >relative.cpio
# We should somehow add a "parent" folder to extract relative paths
test_file = self.get_test_loc('archive/cpio/cpio_relative.cpio')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/dotdot/2folder/',
'/dotdot/dotdot/2folder/3folder/',
'/dotdot/dotdot/2folder/3folder/cpio_relative.cpio',
'/dotdot/dotdot/2folder/3folder/relative_file',
'/dotdot/dotdot/2folder/3folder/relative_file~',
'/dotdot/dotdot/2folder/relative_file',
'/dotdot/dotdot/relative_file'
]
assert expected == extracted
def test_extract_cpio_with_invalidpath(self):
test_file = self.get_test_loc('archive/cpio/cpio-invalidpath.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'backup')
assert os.path.exists(result)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_cpio_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cpio/t.cpio.foo')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestRpm(BaseArchiveTestCase):
def test_extract_rpm_basic_1(self):
test_file = self.get_test_loc('archive/rpm/elfinfo-1.0-1.fc9.src.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0-1.fc9.src.cpio.gz')
assert os.path.exists(result)
def test_extract_rpm_basic_2(self):
test_file = self.get_test_loc('archive/rpm/python-glc-0.7.1-1.src.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'python-glc-0.7.1-1.src.cpio.gz')
assert os.path.exists(result)
def test_extract_rpm_nested_correctly(self):
test_file = self.get_test_loc('archive/rpm/extract_once/libsqueeze0.2_0-0.2.3-8mdv2010.0.i586.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'libsqueeze0.2_0-0.2.3-8mdv2010.0.i586.cpio.lzma')
assert os.path.exists(result)
def test_extract_rpm_with_trailing_data(self):
test_file = self.get_test_loc('archive/rpm/rpm_trailing.rpm')
test_dir = self.get_temp_dir()
result = archive.extract_rpm(test_file, test_dir)
expected = ['elfinfo-1.0-1.fc9.src.cpio.gz']
check_files(test_dir, expected)
assert [] == result
def test_extract_rpm_with_renamed_content(self):
# When the RPM is renamed, we should still be able to find the cpio
test_file = self.get_test_loc('archive/rpm/renamed.rpm')
test_dir = self.get_temp_dir()
result = archive.extract_rpm(test_file, test_dir)
expected = ['python-glc-0.7.1-1.src.cpio.gz']
check_files(test_dir, expected)
assert [] == result
def test_extract_rpm_broken(self):
test_file = self.get_test_loc('archive/rpm/broken.rpm')
test_dir = self.get_temp_dir()
expected = Exception('No error returned')
self.assertRaisesInstance(expected, archive.extract_rpm,
test_file, test_dir)
class TestExtractTwice(BaseArchiveTestCase):
def test_extract_twice_with_rpm_with_xz_compressed_cpio(self):
test_file = self.get_test_loc('archive/rpm/xz-compressed-cpio.rpm')
test_dir = self.get_temp_dir()
# this will return an extractor that extracts twice
extractor = archive.get_extractor(test_file)
result = list(extractor(test_file, test_dir))
assert [] == result
expected = [
'etc/abrt/abrt-action-save-package-data.conf',
'etc/abrt/abrt.conf',
'etc/abrt/gpg_keys',
'etc/dbus-1/system.d/dbus-abrt.conf',
'etc/libreport/events.d/abrt_event.conf',
'etc/libreport/events.d/smart_event.conf',
'etc/rc.d/init.d/abrtd',
'usr/bin/abrt-action-save-package-data',
'usr/bin/abrt-handle-upload',
'usr/libexec/abrt-handle-event',
'usr/libexec/abrt1-to-abrt2',
'usr/sbin/abrt-dbus',
'usr/sbin/abrt-server',
'usr/sbin/abrtd',
'usr/share/dbus-1/system-services/com.redhat.abrt.service',
'usr/share/doc/abrt-2.0.8/COPYING',
'usr/share/doc/abrt-2.0.8/README',
'usr/share/locale/ar/LC_MESSAGES/abrt.mo',
'usr/share/locale/as/LC_MESSAGES/abrt.mo',
'usr/share/locale/ast/LC_MESSAGES/abrt.mo',
'usr/share/locale/bg/LC_MESSAGES/abrt.mo',
'usr/share/locale/bn_IN/LC_MESSAGES/abrt.mo',
'usr/share/locale/ca/LC_MESSAGES/abrt.mo',
'usr/share/locale/cs/LC_MESSAGES/abrt.mo',
'usr/share/locale/da/LC_MESSAGES/abrt.mo',
'usr/share/locale/de/LC_MESSAGES/abrt.mo',
'usr/share/locale/el/LC_MESSAGES/abrt.mo',
'usr/share/locale/en_GB/LC_MESSAGES/abrt.mo',
'usr/share/locale/es/LC_MESSAGES/abrt.mo',
'usr/share/locale/fa/LC_MESSAGES/abrt.mo',
'usr/share/locale/fi/LC_MESSAGES/abrt.mo',
'usr/share/locale/fr/LC_MESSAGES/abrt.mo',
'usr/share/locale/gu/LC_MESSAGES/abrt.mo',
'usr/share/locale/he/LC_MESSAGES/abrt.mo',
'usr/share/locale/hi/LC_MESSAGES/abrt.mo',
'usr/share/locale/hu/LC_MESSAGES/abrt.mo',
'usr/share/locale/id/LC_MESSAGES/abrt.mo',
'usr/share/locale/it/LC_MESSAGES/abrt.mo',
'usr/share/locale/ja/LC_MESSAGES/abrt.mo',
'usr/share/locale/kn/LC_MESSAGES/abrt.mo',
'usr/share/locale/ko/LC_MESSAGES/abrt.mo',
'usr/share/locale/ml/LC_MESSAGES/abrt.mo',
'usr/share/locale/mr/LC_MESSAGES/abrt.mo',
'usr/share/locale/nb/LC_MESSAGES/abrt.mo',
'usr/share/locale/nl/LC_MESSAGES/abrt.mo',
'usr/share/locale/or/LC_MESSAGES/abrt.mo',
'usr/share/locale/pa/LC_MESSAGES/abrt.mo',
'usr/share/locale/pl/LC_MESSAGES/abrt.mo',
'usr/share/locale/pt/LC_MESSAGES/abrt.mo',
'usr/share/locale/pt_BR/LC_MESSAGES/abrt.mo',
'usr/share/locale/ru/LC_MESSAGES/abrt.mo',
'usr/share/locale/sk/LC_MESSAGES/abrt.mo',
'usr/share/locale/sr/LC_MESSAGES/abrt.mo',
'usr/share/locale/sr@latin/LC_MESSAGES/abrt.mo',
'usr/share/locale/sv/LC_MESSAGES/abrt.mo',
'usr/share/locale/ta/LC_MESSAGES/abrt.mo',
'usr/share/locale/te/LC_MESSAGES/abrt.mo',
'usr/share/locale/uk/LC_MESSAGES/abrt.mo',
'usr/share/locale/zh_CN/LC_MESSAGES/abrt.mo',
'usr/share/locale/zh_TW/LC_MESSAGES/abrt.mo',
'usr/share/man/man1/abrt-action-save-package-data.1.gz',
'usr/share/man/man1/abrt-handle-upload.1.gz',
'usr/share/man/man1/abrt-server.1.gz',
'usr/share/man/man5/abrt-action-save-package-data.conf.5.gz',
'usr/share/man/man5/abrt.conf.5.gz',
'usr/share/man/man8/abrt-dbus.8.gz',
'usr/share/man/man8/abrtd.8.gz'
]
check_files(test_dir, expected)
def test_extract_twice_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code
from os.path import dirname, join, abspath, exists
import shutil
import tempfile
test_file = self.get_test_loc('archive/rpm/xz-compressed-cpio.rpm')
# this will return an extractor that extracts twice
extractor = archive.get_extractor(test_file)
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'xz-compressed-cpio.rpm')
result = list(extractor(test_src_file, test_tgt_dir))
assert [] == result
assert exists(join(test_tgt_dir, 'usr/sbin/abrt-dbus'))
class TestRar(BaseArchiveTestCase):
def test_extract_rar_basic(self):
test_file = self.get_test_loc('archive/rar/basic.rar')
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'd', 'b', 'a.txt')
assert os.path.exists(result)
def test_extract_rar_with_invalid_path(self):
test_file = self.get_test_loc('archive/rar/rar_invalidpath.rar')
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_rar_with_trailing_data(self):
test_file = self.get_test_loc('archive/rar/rar_trailing.rar')
test_dir = self.get_temp_dir()
Exception('No error returned')
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'd', 'b', 'a.txt')
assert os.path.exists(result)
def test_extract_rar_broken(self):
test_file = self.get_test_loc('archive/rar/broken.rar')
test_dir = self.get_temp_dir()
expected = Exception('No error returned')
self.assertRaisesInstance(expected, archive.extract_rar, test_file, test_dir)
def test_extract_rar_with_relative_path(self):
# FIXME: this file may not have a real relative path
test_file = self.get_test_loc('archive/rar/rar_relative.rar', copy=True)
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.abspath(test_file + '/../a_parent_folder.txt')
assert not os.path.exists(result)
result = os.path.join(test_dir, '2folder/relative_file')
assert os.path.exists(result)
result = os.path.join(test_dir, '2folder/3folder/relative_file')
assert os.path.exists(result)
def test_extract_rar_with_absolute_path(self):
# FIXME: this file may not have a real absolute path
assert not os.path.exists('/home/li/Desktop/zip_folder')
test_file = self.get_test_loc('archive/rar/rar_absolute.rar', copy=True)
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
assert not os.path.exists('/home/li/Desktop/absolute_folder')
result = os.path.join(test_dir, 'home/li/Desktop',
'absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_rar_with_password(self):
test_file = self.get_test_loc('archive/rar/rar_password.rar')
test_dir = self.get_temp_dir()
expected = Exception('Password protected archive, unable to extract')
self.assertRaisesInstance(expected, archive.extract_rar,
test_file, test_dir)
def test_extract_rar_with_non_ascii_path(self):
test_file = self.get_test_loc('archive/rar/non_ascii_corrupted.rar')
# The bug only occurs if the path was given as Unicode !
test_file = unicode(test_file)
test_dir = self.get_temp_dir()
# raise an exception but still extracts some
expected = Exception('No error returned')
self.assertRaisesInstance(expected, archive.extract_rar,
test_file, test_dir)
result = os.path.join(test_dir, 'EdoProject_java/WebContent'
'/WEB-INF/lib/cos.jar')
assert os.path.exists(result)
class TestSevenZip(BaseArchiveTestCase):
def test_extract_7z_basic(self):
test_file = self.get_test_loc('archive/7z/z.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['z/a/a.txt', 'z/b/a.txt', 'z/c/a.txt']
check_files(test_dir, expected)
def test_extract_7z_with_trailing_data(self):
test_file = self.get_test_loc('archive/7z/7zip_trailing.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['z/a/a.txt', 'z/b/a.txt', 'z/c/a.txt']
check_files(test_dir, expected)
def test_extract_7z_with_broken_archive_with7z(self):
test_file = self.get_test_loc('archive/7z/corrupted7z.7z')
test_dir = self.get_temp_dir()
msg = 'No error returned'
self.assertRaisesInstance(ExtractErrorFailedToExtract(msg), sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_broken_archive_does_not_fail_when_using_fallback(self):
test_file = self.get_test_loc('archive/7z/corrupted7z.7z')
test_dir = self.get_temp_dir()
msg = 'No error returned'
self.assertRaisesInstance(ExtractErrorFailedToExtract(msg), archive.extract_7z, test_file, test_dir)
def test_extract_7z_with_non_existing_archive(self):
test_file = 'archive/7z/I_DO_NOT_EXIST.zip'
test_dir = self.get_temp_dir()
msg = 'No error returned'
self.assertExceptionContains(msg, sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_invalid_path_using_7z(self):
test_file = self.get_test_loc('archive/7z/7zip_invalidpath.7z')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/this/', '/this/that']
assert expected == extracted
def test_extract_7z_with_invalid_path(self):
test_file = self.get_test_loc('archive/7z/7zip_invalidpath.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/this/', '/this/that']
assert expected == extracted
def test_extract_7z_with_relative_path(self):
test_file = self.get_test_loc('archive/7z/7zip_relative.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/2folder/',
'/dotdot/2folder/3folder/',
'/dotdot/2folder/3folder/relative_file',
'/dotdot/2folder/3folder/relative_file~',
'/dotdot/2folder/relative_file',
'/dotdot/relative_file'
]
assert expected == extracted
def test_extract_7z_with_password_with_7z(self):
test_file = self.get_test_loc('archive/7z/7zip_password.7z')
test_dir = self.get_temp_dir()
expected = Exception('Password protected archive, unable to extract')
self.assertRaisesInstance(expected, sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_password(self):
test_file = self.get_test_loc('archive/7z/7zip_password.7z')
test_dir = self.get_temp_dir()
expected = Exception('Password protected archive, unable to extract')
self.assertRaisesInstance(expected, archive.extract_7z, test_file, test_dir)
def test_extract_7zip_native_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
assert 2 == len(os.listdir(os.path.join(test_dir, 'zip')))
def test_extract_7zip_with_fallback_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
assert 2 == len(os.listdir(os.path.join(test_dir, 'zip')))
def test_extract_7zip_libarchive_with_unicode_path_extracts_with_errors(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
try:
archive.extract_7z(test_file, test_dir)
except libarchive2.ArchiveError, e:
assert 'Damaged 7-Zip archive' in e.msg
def test_extract_7z_basic_with_space_in_file_name(self):
test_file = self.get_test_loc('archive/7z/t .7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['t/t.txt']
check_files(test_dir, expected)
class TestIso(BaseArchiveTestCase):
def test_extract_iso_basic(self):
test_file = self.get_test_loc('archive/iso/small.iso')
test_dir = self.get_temp_dir()
archive.extract_iso(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/ChangeLog',
'/ChangeLog (copy)',
'/freebase.ABOUT',
'/this/',
'/this/that'
]
assert sorted(expected) == sorted(extracted)
def test_get_extractor_not_iso_text_is_not_mistaken_for_an_iso_image(self):
test_file = self.get_test_loc('archive/iso/ChangeLog')
extractor = archive.get_extractor(test_file)
assert not extractor
def test_extract_iso_basic_with_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/iso/t.iso.foo')
test_dir = self.get_temp_dir()
archive.extract_iso(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestXzLzma(BaseArchiveTestCase):
def check_lzma_extract(self, extract_fun, test_file, expected):
"""
Run the 'extract_fun' function using the 'test_file' file as an input
and verifies that the 'expected' file has been extracted correctly.
"""
test_file = self.get_test_loc(test_file)
extract_dir = self.get_temp_dir()
expected_file = os.path.join(extract_dir, expected)
extract_fun(test_file, extract_dir)
assert os.path.exists(expected_file), (
'%(expected_file)s file was not extracted '
'correctly from archive %(test_file)s'
% locals())
def test_extract_archive_tar_xz_1(self):
test_file = 'archive/lzma_xz/basic/texlive-core-patches-20.tar.xz'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected='texlive-core-patches-20.tar')
def test_extract_archive_tar_xz_2(self):
test_file = 'archive/lzma_xz/all/texlive-core-patches-20.tar.xz'
expected = 'texlive-core-patches-20.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_3(self):
test_file = 'archive/lzma_xz/all/binutils-2.22.52.0.3-patches-1.0.tar.xz'
expected = 'binutils-2.22.52.0.3-patches-1.0.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_4(self):
test_file = 'archive/lzma_xz/all/bdsup2sub-4.0.0.tar.xz'
expected = 'bdsup2sub-4.0.0.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_5(self):
test_file = 'archive/lzma_xz/all/desktop-file-utils-0.19.tar.xz'
expected = 'desktop-file-utils-0.19.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_1(self):
test_file = 'archive/lzma_xz/basic/coreutils-8.5-patches-1.tar.lzma'
expected = 'coreutils-8.5-patches-1.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_2(self):
test_file = 'archive/lzma_xz/all/orionsocket-1.0.9.tar.lzma'
expected = 'orionsocket-1.0.9.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_3(self):
test_file = 'archive/lzma_xz/all/MinGW-5.1.6.exe-src.tar.lzma'
expected = 'MinGW-5.1.6.exe-src.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_4(self):
test_file = 'archive/lzma_xz/all/dnsmasq-2.57.tar.lzma'
expected = 'dnsmasq-2.57.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_lzma_1(self):
test_file = 'archive/lzma_xz/all/cromwell-2.40-r3-cvs-fixes.patch.lzma'
expected = 'cromwell-2.40-r3-cvs-fixes.patch'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_5(self):
test_file = 'archive/lzma_xz/all/coreutils-8.5-patches-1.tar.lzma'
expected = 'coreutils-8.5-patches-1.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
class TestDia(BaseArchiveTestCase):
def test_extract_dia_basic(self):
test_file = self.get_test_loc('archive/dia/dia.dia')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'dia.dia-extract')
assert os.path.exists(result)
def test_extract_dia_with_trailing_data(self):
test_file = self.get_test_loc('archive/dia/dia_trailing.dia')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'dia_trailing.dia-extract')
assert os.path.exists(result)
def test_extract_dia_broken_1(self):
test_file = self.get_test_loc('archive/dia/dia_broken.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('CRC check failed',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_2(self):
test_file = self.get_test_loc('archive/dia/broken/PublisherUML.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_3(self):
test_file = self.get_test_loc('archive/dia/broken/schedulerClassDiagram.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_4(self):
test_file = self.get_test_loc('archive/dia/broken/ServletProxyGenerator.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_can_get_extractor_and_uncompress_dia_files(self):
test_file = self.get_test_loc('archive/dia/guess/infoset-doc.dia')
test_dir = self.get_temp_dir()
archive.get_extractor(test_file)(test_file, test_dir)
result = os.path.join(test_dir, 'infoset-doc.dia-extract')
assert os.path.exists(result)
class TestTarZ(BaseArchiveTestCase):
def test_extract_tarz_compress_basic(self):
test_file = self.get_test_loc('archive/Z/tkWWW-0.11.tar.Z')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, 'tkWWW-0.11.tar')
assert os.path.exists(result)
def test_extract_z_compress_basic(self):
test_file = self.get_test_loc('archive/Z/tr2tex.Z')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, 'tr2tex')
assert os.path.exists(result)
class TestXar(BaseArchiveTestCase):
def test_extract_xar_basic(self):
test_file = self.get_test_loc('archive/xar/xar-1.4.xar')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, '[TOC].xml')
assert os.path.exists(result)
result = os.path.join(test_dir, 'xar-1.4', 'Makefile.in')
assert os.path.exists(result)
class TestCb7(BaseArchiveTestCase):
def test_get_extractor_cb7(self):
test_file = self.get_test_loc('archive/cb7/t .cb7')
result = archive.get_extractor(test_file)
expected = archive.extract_7z
assert expected == result
def test_extract_cb7_basic_with_space_in_file_name(self):
test_file = self.get_test_loc('archive/cb7/t .cb7')
test_dir = self.get_temp_dir()
archive.extract_7z(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cb7_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cb7/t.cb7.foo')
test_dir = self.get_temp_dir()
archive.extract_7z(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCab(BaseArchiveTestCase):
def test_get_extractor_cab(self):
test_file = self.get_test_loc('archive/cab/basic.cab')
result = archive.get_extractor(test_file)
expected = archive.extract_cab
assert expected == result
def test_extract_cab_basic(self):
test_file = self.get_test_loc('archive/cab/basic.cab')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/TREEHELP.TXT']
assert expected == extracted
def test_extract_cab_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cab/t.cab.foo')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbr(BaseArchiveTestCase):
def test_get_extractor_cbr(self):
test_file = self.get_test_loc('archive/cbr/t.cbr')
result = archive.get_extractor(test_file)
expected = archive.extract_rar
assert expected == result
def test_extract_cbr_basic(self):
test_file = self.get_test_loc('archive/cbr/t.cbr')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbr_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbr/t.cbr.foo')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbt(BaseArchiveTestCase):
def test_get_extractor_cbt(self):
test_file = self.get_test_loc('archive/cbt/t.cbt')
result = archive.get_extractor(test_file)
expected = archive.extract_tar
assert expected == result
def test_extract_cbt_basic(self):
test_file = self.get_test_loc('archive/cbt/t.cbt')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbt_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbt/t.cbt.foo')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbz(BaseArchiveTestCase):
def test_get_extractor_cbz(self):
test_file = self.get_test_loc('archive/cbz/t.cbz')
result = archive.get_extractor(test_file)
expected = archive.extract_zip
assert expected == result
def test_extract_cbz_basic(self):
test_file = self.get_test_loc('archive/cbz/t.cbz')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbz_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbz/t.cbz.foo')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
# Note: this series of test is not easy to grasp but unicode archives on multiple OS
# are hard to tests. So we have one test class for each libarchive and sevenzip on
# each of the three OSses which makes siz test classes each duplicated with
# eventually different expectations on each OS. Then each test class has a subclass
# with check_warnings set to True to tests only possible warnings separately.
# The code tries to avoid too much duplication, but this is at the cost of readability
def is_posixpath(location):
"""
Return True if the `location` path is likely a POSIX-like path using POSIX path
separators (slash or "/")or has no path separator.
Return False if the `location` path is likely a Windows-like path using backslash
as path separators (e.g. "\").
"""
has_slashes = '/' in location
has_backslashes = '\\' in location
# windows paths with drive
if location:
drive, _ = ntpath.splitdrive(location)
if drive:
return False
# a path is always POSIX unless it contains ONLY backslahes
# which is a rough approximation (it could still be posix)
is_posix = True
if has_backslashes and not has_slashes:
is_posix = False
return is_posix
def to_posix(path):
"""
Return a path using the posix path separator given a path that may contain posix
or windows separators, converting \ to /. NB: this path will still be valid in
the windows explorer (except as a UNC or share name). It will be a valid path
everywhere in Python. It will not be valid for windows command line operations.
"""
is_unicode = isinstance(path, unicode)
ntpath_sep = is_unicode and u'\\' or '\\'
posixpath_sep = is_unicode and u'/' or '/'
if is_posixpath(path):
if on_windows:
return path.replace(ntpath_sep, posixpath_sep)
else:
return path
return path.replace(ntpath_sep, posixpath_sep)
class ExtractArchiveWithIllegalFilenamesTestCase(BaseArchiveTestCase):
check_only_warnings = False
def check_extract(self, test_function, test_file, expected_suffix, expected_warnings=None, regen=False):
"""
Run the extraction `test_function` on `test_file` checking that the paths
listed in the `test_file.excepted` file exist in the extracted target
directory. Regen expected file if True.
"""
if not isinstance(test_file, unicode):
test_file = unicode(test_file)
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
# shortcut if check of warnings are requested
if self.check_only_warnings and expected_warnings is not None:
assert sorted(expected_warnings) == sorted(warnings)
return
len_test_dir = len(test_dir)
extracted = sorted(path[len_test_dir:] for path in fileutils.file_iter(test_dir))
extracted = [unicode(p) for p in extracted]
extracted = [to_posix(p) for p in extracted]
if on_linux:
os_suffix = 'linux'
elif on_mac:
os_suffix = 'mac'
elif on_windows:
os_suffix = 'win'
expected_file = test_file + '_' + expected_suffix + '_' + os_suffix + '.expected'
import json
if regen:
with open(expected_file, 'wb') as ef:
ef.write(json.dumps(extracted, indent=2))
expected = json.loads(open(expected_file).read())
expected = [p for p in expected if p.strip()]
assert expected == extracted
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinux(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = ['COM3.txt: Incorrect file header signature', 'com4: Incorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinuxWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinux):
check_only_warnings = True
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMac(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = ['COM3.txt: Incorrect file header signature', 'com4: Incorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMacWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMac):
check_only_warnings = True
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindows(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = ['COM3.txt: Incorrect file header signature', 'com4: Incorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindowsWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindows):
check_only_warnings = True
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinux(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinuxWarnings(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinux):
check_only_warnings = True
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithPytarOnLinux(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_tar_with_weird_filenames_with_pytar(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
warns = [
'weird_names/win/LPT7.txt: Skipping duplicate file name.',
'weird_names/win/COM5.txt: Skipping duplicate file name.',
'weird_names/win/LPT1.txt: Skipping duplicate file name.',
'weird_names/win/con: Skipping duplicate file name.',
'weird_names/win/COM7.txt: Skipping duplicate file name.',
'weird_names/win/LPT6.txt: Skipping duplicate file name.',
'weird_names/win/com6: Skipping duplicate file name.',
'weird_names/win/nul: Skipping duplicate file name.',
'weird_names/win/com2: Skipping duplicate file name.',
'weird_names/win/com9.txt: Skipping duplicate file name.',
'weird_names/win/LPT8.txt: Skipping duplicate file name.',
'weird_names/win/prn.txt: Skipping duplicate file name.',
'weird_names/win/aux.txt: Skipping duplicate file name.',
'weird_names/win/com9: Skipping duplicate file name.',
'weird_names/win/com8: Skipping duplicate file name.',
'weird_names/win/LPT5.txt: Skipping duplicate file name.',
'weird_names/win/lpt8: Skipping duplicate file name.',
'weird_names/win/COM6.txt: Skipping duplicate file name.',
'weird_names/win/lpt4: Skipping duplicate file name.',
'weird_names/win/lpt5: Skipping duplicate file name.',
'weird_names/win/lpt6: Skipping duplicate file name.',
'weird_names/win/lpt7: Skipping duplicate file name.',
'weird_names/win/com5: Skipping duplicate file name.',
'weird_names/win/lpt1: Skipping duplicate file name.',
'weird_names/win/COM1.txt: Skipping duplicate file name.',
'weird_names/win/lpt9: Skipping duplicate file name.',
'weird_names/win/COM2.txt: Skipping duplicate file name.',
'weird_names/win/COM4.txt: Skipping duplicate file name.',
'weird_names/win/aux: Skipping duplicate file name.',
'weird_names/win/LPT9.txt: Skipping duplicate file name.',
'weird_names/win/LPT2.txt: Skipping duplicate file name.',
'weird_names/win/com1: Skipping duplicate file name.',
'weird_names/win/com3: Skipping duplicate file name.',
'weird_names/win/COM8.txt: Skipping duplicate file name.',
'weird_names/win/COM3.txt: Skipping duplicate file name.',
'weird_names/win/prn: Skipping duplicate file name.',
'weird_names/win/lpt2: Skipping duplicate file name.',
'weird_names/win/com4: Skipping duplicate file name.',
'weird_names/win/nul.txt: Skipping duplicate file name.',
'weird_names/win/LPT3.txt: Skipping duplicate file name.',
'weird_names/win/lpt3: Skipping duplicate file name.',
'weird_names/win/con.txt: Skipping duplicate file name.',
'weird_names/win/LPT4.txt: Skipping duplicate file name.',
'weird_names/win/com7: Skipping duplicate file name.'
]
self.check_extract(tar.extract, test_file, expected_warnings=warns, expected_suffix='pytar')
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithPytarOnLinuxWarnings(TestExtractArchiveWithIllegalFilenamesWithPytarOnLinux):
check_only_warnings = True
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMac(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
@expectedFailure # not a problem: we use libarchive for these
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # This is a problem
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # This is a problem, but unrar seems to fail the same way
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMac):
check_only_warnings = True
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithPytarOnMac(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_tar_with_weird_filenames_with_pytar(self):
# This really does not work well but this not a problem: we use libarchive
# for these and pytar is not equipped to handle these
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
warns = [
'weird_names/win/COM1.txt: Skipping duplicate file name.',
'weird_names/win/COM2.txt: Skipping duplicate file name.',
'weird_names/win/COM3.txt: Skipping duplicate file name.',
'weird_names/win/COM4.txt: Skipping duplicate file name.',
'weird_names/win/COM5.txt: Skipping duplicate file name.',
'weird_names/win/COM6.txt: Skipping duplicate file name.',
'weird_names/win/COM7.txt: Skipping duplicate file name.',
'weird_names/win/COM8.txt: Skipping duplicate file name.',
'weird_names/win/LPT1.txt: Skipping duplicate file name.',
'weird_names/win/LPT2.txt: Skipping duplicate file name.',
'weird_names/win/LPT3.txt: Skipping duplicate file name.',
'weird_names/win/LPT4.txt: Skipping duplicate file name.',
'weird_names/win/LPT5.txt: Skipping duplicate file name.',
'weird_names/win/LPT6.txt: Skipping duplicate file name.',
'weird_names/win/LPT7.txt: Skipping duplicate file name.',
'weird_names/win/LPT8.txt: Skipping duplicate file name.',
'weird_names/win/LPT9.txt: Skipping duplicate file name.',
'weird_names/win/aux.txt: Skipping duplicate file name.',
'weird_names/win/aux: Skipping duplicate file name.',
'weird_names/win/com1: Skipping duplicate file name.',
'weird_names/win/com2: Skipping duplicate file name.',
'weird_names/win/com3: Skipping duplicate file name.',
'weird_names/win/com4: Skipping duplicate file name.',
'weird_names/win/com5: Skipping duplicate file name.',
'weird_names/win/com6: Skipping duplicate file name.',
'weird_names/win/com7: Skipping duplicate file name.',
'weird_names/win/com8: Skipping duplicate file name.',
'weird_names/win/com9.txt: Skipping duplicate file name.',
'weird_names/win/com9: Skipping duplicate file name.',
'weird_names/win/con.txt: Skipping duplicate file name.',
'weird_names/win/con: Skipping duplicate file name.',
'weird_names/win/lpt1: Skipping duplicate file name.',
'weird_names/win/lpt2: Skipping duplicate file name.',
'weird_names/win/lpt3: Skipping duplicate file name.',
'weird_names/win/lpt4: Skipping duplicate file name.',
'weird_names/win/lpt5: Skipping duplicate file name.',
'weird_names/win/lpt6: Skipping duplicate file name.',
'weird_names/win/lpt7: Skipping duplicate file name.',
'weird_names/win/lpt8: Skipping duplicate file name.',
'weird_names/win/lpt9: Skipping duplicate file name.',
'weird_names/win/nul.txt: Skipping duplicate file name.',
'weird_names/win/nul: Skipping duplicate file name.',
'weird_names/win/prn.txt: Skipping duplicate file name.',
'weird_names/win/prn: Skipping duplicate file name.'
]
self.check_extract(tar.extract, test_file, expected_warnings=warns, expected_suffix='pytar')
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithPytarOnMacWarnings(TestExtractArchiveWithIllegalFilenamesWithPytarOnMac):
check_only_warnings = True
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
@expectedFailure # not a problem: we use libarchive for these
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
# The results are not correct but not a problem: we use libarchive for these
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWinWarning(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin):
check_only_warnings = True
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithPytarOnWin(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
@expectedFailure # not a problem: we use libarchive for these and pytar is not equipped to handle these
def test_extract_tar_with_weird_filenames_with_pytar(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
warns = [
'weird_names/win/LPT7.txt: Skipping duplicate file name.',
'weird_names/win/COM5.txt: Skipping duplicate file name.',
'weird_names/win/LPT1.txt: Skipping duplicate file name.',
'weird_names/win/con: Skipping duplicate file name.',
'weird_names/win/COM7.txt: Skipping duplicate file name.',
'weird_names/win/LPT6.txt: Skipping duplicate file name.',
'weird_names/win/com6: Skipping duplicate file name.',
'weird_names/win/nul: Skipping duplicate file name.',
'weird_names/win/com2: Skipping duplicate file name.',
'weird_names/win/com9.txt: Skipping duplicate file name.',
'weird_names/win/LPT8.txt: Skipping duplicate file name.',
'weird_names/win/prn.txt: Skipping duplicate file name.',
'weird_names/win/aux.txt: Skipping duplicate file name.',
'weird_names/win/com9: Skipping duplicate file name.',
'weird_names/win/com8: Skipping duplicate file name.',
'weird_names/win/LPT5.txt: Skipping duplicate file name.',
'weird_names/win/lpt8: Skipping duplicate file name.',
'weird_names/win/COM6.txt: Skipping duplicate file name.',
'weird_names/win/lpt4: Skipping duplicate file name.',
'weird_names/win/lpt5: Skipping duplicate file name.',
'weird_names/win/lpt6: Skipping duplicate file name.',
'weird_names/win/lpt7: Skipping duplicate file name.',
'weird_names/win/com5: Skipping duplicate file name.',
'weird_names/win/lpt1: Skipping duplicate file name.',
'weird_names/win/COM1.txt: Skipping duplicate file name.',
'weird_names/win/lpt9: Skipping duplicate file name.',
'weird_names/win/COM2.txt: Skipping duplicate file name.',
'weird_names/win/COM4.txt: Skipping duplicate file name.',
'weird_names/win/aux: Skipping duplicate file name.',
'weird_names/win/LPT9.txt: Skipping duplicate file name.',
'weird_names/win/LPT2.txt: Skipping duplicate file name.',
'weird_names/win/com1: Skipping duplicate file name.',
'weird_names/win/com3: Skipping duplicate file name.',
'weird_names/win/COM8.txt: Skipping duplicate file name.',
'weird_names/win/COM3.txt: Skipping duplicate file name.',
'weird_names/win/prn: Skipping duplicate file name.',
'weird_names/win/lpt2: Skipping duplicate file name.',
'weird_names/win/com4: Skipping duplicate file name.',
'weird_names/win/nul.txt: Skipping duplicate file name.',
'weird_names/win/LPT3.txt: Skipping duplicate file name.',
'weird_names/win/lpt3: Skipping duplicate file name.',
'weird_names/win/con.txt: Skipping duplicate file name.',
'weird_names/win/LPT4.txt: Skipping duplicate file name.',
'weird_names/win/com7: Skipping duplicate file name.'
]
self.check_extract(tar.extract, test_file, expected_warnings=warns, expected_suffix='pytar')
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithPytarOnWinWarnings(TestExtractArchiveWithIllegalFilenamesWithPytarOnWin):
check_only_warnings = True
|
michaelrup/scancode-toolkit
|
tests/extractcode/test_archive.py
|
Python
|
apache-2.0
| 121,251
|
[
"VisIt"
] |
3ff63d7c199d56d7eb017bb8e81072e65fc9f67946a82b5382204287c1005915
|
from sympy import E as e
from sympy import (Symbol, Abs, exp, expint, S, pi, simplify, Interval, erf, erfc, Ne,
EulerGamma, Eq, log, lowergamma, uppergamma, symbols, sqrt, And,
gamma, beta, Piecewise, Integral, sin, cos, tan, sinh, cosh,
besseli, floor, expand_func, Rational, I, re,
im, lambdify, hyper, diff, Or, Mul, sign, Dummy, Sum,
factorial, binomial, N, atan, erfi, besselj)
from sympy.core.compatibility import range
from sympy.external import import_module
from sympy.functions.special.error_functions import erfinv
from sympy.functions.special.hyper import meijerg
from sympy.sets.sets import Intersection, FiniteSet
from sympy.stats import (P, E, where, density, variance, covariance, skewness, kurtosis,
given, pspace, cdf, characteristic_function, moment_generating_function,
ContinuousRV, sample, Arcsin, Benini, Beta, BetaNoncentral, BetaPrime,
Cauchy, Chi, ChiSquared, ChiNoncentral, Dagum, Erlang, ExGaussian,
Exponential, ExponentialPower, FDistribution, FisherZ, Frechet, Gamma,
GammaInverse, Gompertz, Gumbel, Kumaraswamy, Laplace, Logistic,
LogLogistic, LogNormal, Maxwell, Nakagami, Normal, GaussianInverse,
Pareto, QuadraticU, RaisedCosine, Rayleigh, ShiftedGompertz, StudentT,
Trapezoidal, Triangular, Uniform, UniformSum, VonMises, Weibull,
WignerSemicircle, Wald, correlation, moment, cmoment, smoment, quantile)
from sympy.stats.crv_types import (NormalDistribution, GumbelDistribution, GompertzDistribution, LaplaceDistribution,
ParetoDistribution, RaisedCosineDistribution, BeniniDistribution, BetaDistribution,
CauchyDistribution, GammaInverseDistribution, LogNormalDistribution, StudentTDistribution,
QuadraticUDistribution, WignerSemicircleDistribution, ChiDistribution)
from sympy.stats.joint_rv import JointPSpace
from sympy.utilities.pytest import raises, XFAIL, slow, skip
from sympy.utilities.randtest import verify_numerically as tn
oo = S.Infinity
x, y, z = map(Symbol, 'xyz')
def test_single_normal():
mu = Symbol('mu', real=True)
sigma = Symbol('sigma', positive=True)
X = Normal('x', 0, 1)
Y = X*sigma + mu
assert E(Y) == mu
assert variance(Y) == sigma**2
pdf = density(Y)
x = Symbol('x', real=True)
assert (pdf(x) ==
2**S.Half*exp(-(x - mu)**2/(2*sigma**2))/(2*pi**S.Half*sigma))
assert P(X**2 < 1) == erf(2**S.Half/2)
assert quantile(Y)(x) == Intersection(S.Reals, FiniteSet(sqrt(2)*sigma*(sqrt(2)*mu/(2*sigma) + erfinv(2*x - 1))))
assert E(X, Eq(X, mu)) == mu
def test_conditional_1d():
X = Normal('x', 0, 1)
Y = given(X, X >= 0)
z = Symbol('z')
assert density(Y)(z) == 2 * density(X)(z)
assert Y.pspace.domain.set == Interval(0, oo)
assert E(Y) == sqrt(2) / sqrt(pi)
assert E(X**2) == E(Y**2)
def test_ContinuousDomain():
X = Normal('x', 0, 1)
assert where(X**2 <= 1).set == Interval(-1, 1)
assert where(X**2 <= 1).symbol == X.symbol
where(And(X**2 <= 1, X >= 0)).set == Interval(0, 1)
raises(ValueError, lambda: where(sin(X) > 1))
Y = given(X, X >= 0)
assert Y.pspace.domain.set == Interval(0, oo)
@slow
def test_multiple_normal():
X, Y = Normal('x', 0, 1), Normal('y', 0, 1)
p = Symbol("p", positive=True)
assert E(X + Y) == 0
assert variance(X + Y) == 2
assert variance(X + X) == 4
assert covariance(X, Y) == 0
assert covariance(2*X + Y, -X) == -2*variance(X)
assert skewness(X) == 0
assert skewness(X + Y) == 0
assert kurtosis(X) == 3
assert kurtosis(X+Y) == 3
assert correlation(X, Y) == 0
assert correlation(X, X + Y) == correlation(X, X - Y)
assert moment(X, 2) == 1
assert cmoment(X, 3) == 0
assert moment(X + Y, 4) == 12
assert cmoment(X, 2) == variance(X)
assert smoment(X*X, 2) == 1
assert smoment(X + Y, 3) == skewness(X + Y)
assert smoment(X + Y, 4) == kurtosis(X + Y)
assert E(X, Eq(X + Y, 0)) == 0
assert variance(X, Eq(X + Y, 0)) == S.Half
assert quantile(X)(p) == sqrt(2)*erfinv(2*p - S.One)
def test_symbolic():
mu1, mu2 = symbols('mu1 mu2', real=True)
s1, s2 = symbols('sigma1 sigma2', positive=True)
rate = Symbol('lambda', positive=True)
X = Normal('x', mu1, s1)
Y = Normal('y', mu2, s2)
Z = Exponential('z', rate)
a, b, c = symbols('a b c', real=True)
assert E(X) == mu1
assert E(X + Y) == mu1 + mu2
assert E(a*X + b) == a*E(X) + b
assert variance(X) == s1**2
assert variance(X + a*Y + b) == variance(X) + a**2*variance(Y)
assert E(Z) == 1/rate
assert E(a*Z + b) == a*E(Z) + b
assert E(X + a*Z + b) == mu1 + a/rate + b
def test_cdf():
X = Normal('x', 0, 1)
d = cdf(X)
assert P(X < 1) == d(1).rewrite(erfc)
assert d(0) == S.Half
d = cdf(X, X > 0) # given X>0
assert d(0) == 0
Y = Exponential('y', 10)
d = cdf(Y)
assert d(-5) == 0
assert P(Y > 3) == 1 - d(3)
raises(ValueError, lambda: cdf(X + Y))
Z = Exponential('z', 1)
f = cdf(Z)
assert f(z) == Piecewise((1 - exp(-z), z >= 0), (0, True))
def test_characteristic_function():
X = Uniform('x', 0, 1)
cf = characteristic_function(X)
assert cf(1) == -I*(-1 + exp(I))
Y = Normal('y', 1, 1)
cf = characteristic_function(Y)
assert cf(0) == 1
assert cf(1) == exp(I - S.Half)
Z = Exponential('z', 5)
cf = characteristic_function(Z)
assert cf(0) == 1
assert cf(1).expand() == Rational(25, 26) + I*Rational(5, 26)
X = GaussianInverse('x', 1, 1)
cf = characteristic_function(X)
assert cf(0) == 1
assert cf(1) == exp(1 - sqrt(1 - 2*I))
X = ExGaussian('x', 0, 1, 1)
cf = characteristic_function(X)
assert cf(0) == 1
assert cf(1) == (1 + I)*exp(Rational(-1, 2))/2
def test_moment_generating_function():
t = symbols('t', positive=True)
# Symbolic tests
a, b, c = symbols('a b c')
mgf = moment_generating_function(Beta('x', a, b))(t)
assert mgf == hyper((a,), (a + b,), t)
mgf = moment_generating_function(Chi('x', a))(t)
assert mgf == sqrt(2)*t*gamma(a/2 + S.Half)*\
hyper((a/2 + S.Half,), (Rational(3, 2),), t**2/2)/gamma(a/2) +\
hyper((a/2,), (S.Half,), t**2/2)
mgf = moment_generating_function(ChiSquared('x', a))(t)
assert mgf == (1 - 2*t)**(-a/2)
mgf = moment_generating_function(Erlang('x', a, b))(t)
assert mgf == (1 - t/b)**(-a)
mgf = moment_generating_function(ExGaussian("x", a, b, c))(t)
assert mgf == exp(a*t + b**2*t**2/2)/(1 - t/c)
mgf = moment_generating_function(Exponential('x', a))(t)
assert mgf == a/(a - t)
mgf = moment_generating_function(Gamma('x', a, b))(t)
assert mgf == (-b*t + 1)**(-a)
mgf = moment_generating_function(Gumbel('x', a, b))(t)
assert mgf == exp(b*t)*gamma(-a*t + 1)
mgf = moment_generating_function(Gompertz('x', a, b))(t)
assert mgf == b*exp(b)*expint(t/a, b)
mgf = moment_generating_function(Laplace('x', a, b))(t)
assert mgf == exp(a*t)/(-b**2*t**2 + 1)
mgf = moment_generating_function(Logistic('x', a, b))(t)
assert mgf == exp(a*t)*beta(-b*t + 1, b*t + 1)
mgf = moment_generating_function(Normal('x', a, b))(t)
assert mgf == exp(a*t + b**2*t**2/2)
mgf = moment_generating_function(Pareto('x', a, b))(t)
assert mgf == b*(-a*t)**b*uppergamma(-b, -a*t)
mgf = moment_generating_function(QuadraticU('x', a, b))(t)
assert str(mgf) == ("(3*(t*(-4*b + (a + b)**2) + 4)*exp(b*t) - "
"3*(t*(a**2 + 2*a*(b - 2) + b**2) + 4)*exp(a*t))/(t**2*(a - b)**3)")
mgf = moment_generating_function(RaisedCosine('x', a, b))(t)
assert mgf == pi**2*exp(a*t)*sinh(b*t)/(b*t*(b**2*t**2 + pi**2))
mgf = moment_generating_function(Rayleigh('x', a))(t)
assert mgf == sqrt(2)*sqrt(pi)*a*t*(erf(sqrt(2)*a*t/2) + 1)\
*exp(a**2*t**2/2)/2 + 1
mgf = moment_generating_function(Triangular('x', a, b, c))(t)
assert str(mgf) == ("(-2*(-a + b)*exp(c*t) + 2*(-a + c)*exp(b*t) + "
"2*(b - c)*exp(a*t))/(t**2*(-a + b)*(-a + c)*(b - c))")
mgf = moment_generating_function(Uniform('x', a, b))(t)
assert mgf == (-exp(a*t) + exp(b*t))/(t*(-a + b))
mgf = moment_generating_function(UniformSum('x', a))(t)
assert mgf == ((exp(t) - 1)/t)**a
mgf = moment_generating_function(WignerSemicircle('x', a))(t)
assert mgf == 2*besseli(1, a*t)/(a*t)
# Numeric tests
mgf = moment_generating_function(Beta('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 1) == hyper((2,), (3,), 1)/2
mgf = moment_generating_function(Chi('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == sqrt(2)*hyper((1,), (Rational(3, 2),), S.Half
)/sqrt(pi) + hyper((Rational(3, 2),), (Rational(3, 2),), S.Half) + 2*sqrt(2)*hyper((2,),
(Rational(5, 2),), S.Half)/(3*sqrt(pi))
mgf = moment_generating_function(ChiSquared('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == I
mgf = moment_generating_function(Erlang('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(ExGaussian("x", 0, 1, 1))(t)
assert mgf.diff(t).subs(t, 2) == -exp(2)
mgf = moment_generating_function(Exponential('x', 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(Gamma('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(Gumbel('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == EulerGamma + 1
mgf = moment_generating_function(Gompertz('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 1) == -e*meijerg(((), (1, 1)),
((0, 0, 0), ()), 1)
mgf = moment_generating_function(Laplace('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(Logistic('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == beta(1, 1)
mgf = moment_generating_function(Normal('x', 0, 1))(t)
assert mgf.diff(t).subs(t, 1) == exp(S.Half)
mgf = moment_generating_function(Pareto('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == expint(1, 0)
mgf = moment_generating_function(QuadraticU('x', 1, 2))(t)
assert mgf.diff(t).subs(t, 1) == -12*e - 3*exp(2)
mgf = moment_generating_function(RaisedCosine('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 1) == -2*e*pi**2*sinh(1)/\
(1 + pi**2)**2 + e*pi**2*cosh(1)/(1 + pi**2)
mgf = moment_generating_function(Rayleigh('x', 1))(t)
assert mgf.diff(t).subs(t, 0) == sqrt(2)*sqrt(pi)/2
mgf = moment_generating_function(Triangular('x', 1, 3, 2))(t)
assert mgf.diff(t).subs(t, 1) == -e + exp(3)
mgf = moment_generating_function(Uniform('x', 0, 1))(t)
assert mgf.diff(t).subs(t, 1) == 1
mgf = moment_generating_function(UniformSum('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == 1
mgf = moment_generating_function(WignerSemicircle('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == -2*besseli(1, 1) + besseli(2, 1) +\
besseli(0, 1)
def test_sample_continuous():
Z = ContinuousRV(z, exp(-z), set=Interval(0, oo))
assert sample(Z) in Z.pspace.domain.set
sym, val = list(Z.pspace.sample().items())[0]
assert sym == Z and val in Interval(0, oo)
assert density(Z)(-1) == 0
def test_ContinuousRV():
pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
# X and Y should be equivalent
X = ContinuousRV(x, pdf)
Y = Normal('y', 0, 1)
assert variance(X) == variance(Y)
assert P(X > 0) == P(Y > 0)
def test_arcsin():
from sympy import asin
a = Symbol("a", real=True)
b = Symbol("b", real=True)
X = Arcsin('x', a, b)
assert density(X)(x) == 1/(pi*sqrt((-x + b)*(x - a)))
assert cdf(X)(x) == Piecewise((0, a > x),
(2*asin(sqrt((-a + x)/(-a + b)))/pi, b >= x),
(1, True))
def test_benini():
alpha = Symbol("alpha", positive=True)
beta = Symbol("beta", positive=True)
sigma = Symbol("sigma", positive=True)
X = Benini('x', alpha, beta, sigma)
assert density(X)(x) == ((alpha/x + 2*beta*log(x/sigma)/x)
*exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2))
alpha = Symbol("alpha", nonpositive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
beta = Symbol("beta", nonpositive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
alpha = Symbol("alpha", positive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
beta = Symbol("beta", positive=True)
sigma = Symbol("sigma", nonpositive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
def test_beta():
a, b = symbols('alpha beta', positive=True)
B = Beta('x', a, b)
assert pspace(B).domain.set == Interval(0, 1)
assert characteristic_function(B)(x) == hyper((a,), (a + b,), I*x)
assert density(B)(x) == x**(a - 1)*(1 - x)**(b - 1)/beta(a, b)
assert simplify(E(B)) == a / (a + b)
assert simplify(variance(B)) == a*b / (a**3 + 3*a**2*b + a**2 + 3*a*b**2 + 2*a*b + b**3 + b**2)
# Full symbolic solution is too much, test with numeric version
a, b = 1, 2
B = Beta('x', a, b)
assert expand_func(E(B)) == a / S(a + b)
assert expand_func(variance(B)) == (a*b) / S((a + b)**2 * (a + b + 1))
def test_beta_noncentral():
a, b = symbols('a b', positive=True)
c = Symbol('c', nonnegative=True)
_k = Dummy('k')
X = BetaNoncentral('x', a, b, c)
assert pspace(X).domain.set == Interval(0, 1)
dens = density(X)
z = Symbol('z')
res = Sum( z**(_k + a - 1)*(c/2)**_k*(1 - z)**(b - 1)*exp(-c/2)/
(beta(_k + a, b)*factorial(_k)), (_k, 0, oo))
assert dens(z).dummy_eq(res)
# BetaCentral should not raise if the assumptions
# on the symbols can not be determined
a, b, c = symbols('a b c')
assert BetaNoncentral('x', a, b, c)
a = Symbol('a', positive=False, real=True)
raises(ValueError, lambda: BetaNoncentral('x', a, b, c))
a = Symbol('a', positive=True)
b = Symbol('b', positive=False, real=True)
raises(ValueError, lambda: BetaNoncentral('x', a, b, c))
a = Symbol('a', positive=True)
b = Symbol('b', positive=True)
c = Symbol('c', nonnegative=False, real=True)
raises(ValueError, lambda: BetaNoncentral('x', a, b, c))
def test_betaprime():
alpha = Symbol("alpha", positive=True)
betap = Symbol("beta", positive=True)
X = BetaPrime('x', alpha, betap)
assert density(X)(x) == x**(alpha - 1)*(x + 1)**(-alpha - betap)/beta(alpha, betap)
alpha = Symbol("alpha", nonpositive=True)
raises(ValueError, lambda: BetaPrime('x', alpha, betap))
alpha = Symbol("alpha", positive=True)
betap = Symbol("beta", nonpositive=True)
raises(ValueError, lambda: BetaPrime('x', alpha, betap))
def test_cauchy():
x0 = Symbol("x0")
gamma = Symbol("gamma", positive=True)
t = Symbol('t')
p = Symbol("p", positive=True)
X = Cauchy('x', x0, gamma)
# Tests the characteristic function
assert characteristic_function(X)(x) == exp(-gamma*Abs(x) + I*x*x0)
assert density(X)(x) == 1/(pi*gamma*(1 + (x - x0)**2/gamma**2))
assert diff(cdf(X)(x), x) == density(X)(x)
assert quantile(X)(p) == gamma*tan(pi*(p - S.Half)) + x0
gamma = Symbol("gamma", nonpositive=True)
raises(ValueError, lambda: Cauchy('x', x0, gamma))
def test_chi():
from sympy import I
k = Symbol("k", integer=True)
X = Chi('x', k)
assert density(X)(x) == 2**(-k/2 + 1)*x**(k - 1)*exp(-x**2/2)/gamma(k/2)
# Tests the characteristic function
assert characteristic_function(X)(x) == sqrt(2)*I*x*gamma(k/2 + S(1)/2)*hyper((k/2 + S(1)/2,),
(S(3)/2,), -x**2/2)/gamma(k/2) + hyper((k/2,), (S(1)/2,), -x**2/2)
# Tests the moment generating function
assert moment_generating_function(X)(x) == sqrt(2)*x*gamma(k/2 + S(1)/2)*hyper((k/2 + S(1)/2,),
(S(3)/2,), x**2/2)/gamma(k/2) + hyper((k/2,), (S(1)/2,), x**2/2)
k = Symbol("k", integer=True, positive=False)
raises(ValueError, lambda: Chi('x', k))
k = Symbol("k", integer=False, positive=True)
raises(ValueError, lambda: Chi('x', k))
def test_chi_noncentral():
k = Symbol("k", integer=True)
l = Symbol("l")
X = ChiNoncentral("x", k, l)
assert density(X)(x) == (x**k*l*(x*l)**(-k/2)*
exp(-x**2/2 - l**2/2)*besseli(k/2 - 1, x*l))
k = Symbol("k", integer=True, positive=False)
raises(ValueError, lambda: ChiNoncentral('x', k, l))
k = Symbol("k", integer=True, positive=True)
l = Symbol("l", nonpositive=True)
raises(ValueError, lambda: ChiNoncentral('x', k, l))
k = Symbol("k", integer=False)
l = Symbol("l", positive=True)
raises(ValueError, lambda: ChiNoncentral('x', k, l))
def test_chi_squared():
k = Symbol("k", integer=True)
X = ChiSquared('x', k)
# Tests the characteristic function
assert characteristic_function(X)(x) == ((-2*I*x + 1)**(-k/2))
assert density(X)(x) == 2**(-k/2)*x**(k/2 - 1)*exp(-x/2)/gamma(k/2)
assert cdf(X)(x) == Piecewise((lowergamma(k/2, x/2)/gamma(k/2), x >= 0), (0, True))
assert E(X) == k
assert variance(X) == 2*k
X = ChiSquared('x', 15)
assert cdf(X)(3) == -14873*sqrt(6)*exp(Rational(-3, 2))/(5005*sqrt(pi)) + erf(sqrt(6)/2)
k = Symbol("k", integer=True, positive=False)
raises(ValueError, lambda: ChiSquared('x', k))
k = Symbol("k", integer=False, positive=True)
raises(ValueError, lambda: ChiSquared('x', k))
def test_dagum():
p = Symbol("p", positive=True)
b = Symbol("b", positive=True)
a = Symbol("a", positive=True)
X = Dagum('x', p, a, b)
assert density(X)(x) == a*p*(x/b)**(a*p)*((x/b)**a + 1)**(-p - 1)/x
assert cdf(X)(x) == Piecewise(((1 + (x/b)**(-a))**(-p), x >= 0),
(0, True))
p = Symbol("p", nonpositive=True)
raises(ValueError, lambda: Dagum('x', p, a, b))
p = Symbol("p", positive=True)
b = Symbol("b", nonpositive=True)
raises(ValueError, lambda: Dagum('x', p, a, b))
b = Symbol("b", positive=True)
a = Symbol("a", nonpositive=True)
raises(ValueError, lambda: Dagum('x', p, a, b))
def test_erlang():
k = Symbol("k", integer=True, positive=True)
l = Symbol("l", positive=True)
X = Erlang("x", k, l)
assert density(X)(x) == x**(k - 1)*l**k*exp(-x*l)/gamma(k)
assert cdf(X)(x) == Piecewise((lowergamma(k, l*x)/gamma(k), x > 0),
(0, True))
def test_exgaussian():
m, z = symbols("m, z")
s, l = symbols("s, l", positive=True)
X = ExGaussian("x", m, s, l)
assert density(X)(z) == l*exp(l*(l*s**2 + 2*m - 2*z)/2) *\
erfc(sqrt(2)*(l*s**2 + m - z)/(2*s))/2
# Note: actual_output simplifies to expected_output.
# Ideally cdf(X)(z) would return expected_output
# expected_output = (erf(sqrt(2)*(l*s**2 + m - z)/(2*s)) - 1)*exp(l*(l*s**2 + 2*m - 2*z)/2)/2 - erf(sqrt(2)*(m - z)/(2*s))/2 + S.Half
u = l*(z - m)
v = l*s
GaussianCDF1 = cdf(Normal('x', 0, v))(u)
GaussianCDF2 = cdf(Normal('x', v**2, v))(u)
actual_output = GaussianCDF1 - exp(-u + (v**2/2) + log(GaussianCDF2))
assert cdf(X)(z) == actual_output
# assert simplify(actual_output) == expected_output
assert variance(X).expand() == s**2 + l**(-2)
assert skewness(X).expand() == 2/(l**3*s**2*sqrt(s**2 + l**(-2)) + l *
sqrt(s**2 + l**(-2)))
def test_exponential():
rate = Symbol('lambda', positive=True)
X = Exponential('x', rate)
p = Symbol("p", positive=True, real=True,finite=True)
assert E(X) == 1/rate
assert variance(X) == 1/rate**2
assert skewness(X) == 2
assert skewness(X) == smoment(X, 3)
assert kurtosis(X) == 9
assert kurtosis(X) == smoment(X, 4)
assert smoment(2*X, 4) == smoment(X, 4)
assert moment(X, 3) == 3*2*1/rate**3
assert P(X > 0) is S.One
assert P(X > 1) == exp(-rate)
assert P(X > 10) == exp(-10*rate)
assert quantile(X)(p) == -log(1-p)/rate
assert where(X <= 1).set == Interval(0, 1)
def test_exponential_power():
mu = Symbol('mu')
z = Symbol('z')
alpha = Symbol('alpha', positive=True)
beta = Symbol('beta', positive=True)
X = ExponentialPower('x', mu, alpha, beta)
assert density(X)(z) == beta*exp(-(Abs(mu - z)/alpha)
** beta)/(2*alpha*gamma(1/beta))
assert cdf(X)(z) == S.Half + lowergamma(1/beta,
(Abs(mu - z)/alpha)**beta)*sign(-mu + z)/\
(2*gamma(1/beta))
def test_f_distribution():
d1 = Symbol("d1", positive=True)
d2 = Symbol("d2", positive=True)
X = FDistribution("x", d1, d2)
assert density(X)(x) == (d2**(d2/2)*sqrt((d1*x)**d1*(d1*x + d2)**(-d1 - d2))
/(x*beta(d1/2, d2/2)))
d1 = Symbol("d1", nonpositive=True)
raises(ValueError, lambda: FDistribution('x', d1, d1))
d1 = Symbol("d1", positive=True, integer=False)
raises(ValueError, lambda: FDistribution('x', d1, d1))
d1 = Symbol("d1", positive=True)
d2 = Symbol("d2", nonpositive=True)
raises(ValueError, lambda: FDistribution('x', d1, d2))
d2 = Symbol("d2", positive=True, integer=False)
raises(ValueError, lambda: FDistribution('x', d1, d2))
def test_fisher_z():
d1 = Symbol("d1", positive=True)
d2 = Symbol("d2", positive=True)
X = FisherZ("x", d1, d2)
assert density(X)(x) == (2*d1**(d1/2)*d2**(d2/2)*(d1*exp(2*x) + d2)
**(-d1/2 - d2/2)*exp(d1*x)/beta(d1/2, d2/2))
def test_frechet():
a = Symbol("a", positive=True)
s = Symbol("s", positive=True)
m = Symbol("m", real=True)
X = Frechet("x", a, s=s, m=m)
assert density(X)(x) == a*((x - m)/s)**(-a - 1)*exp(-((x - m)/s)**(-a))/s
assert cdf(X)(x) == Piecewise((exp(-((-m + x)/s)**(-a)), m <= x), (0, True))
def test_gamma():
k = Symbol("k", positive=True)
theta = Symbol("theta", positive=True)
X = Gamma('x', k, theta)
# Tests characteristic function
assert characteristic_function(X)(x) == ((-I*theta*x + 1)**(-k))
assert density(X)(x) == x**(k - 1)*theta**(-k)*exp(-x/theta)/gamma(k)
assert cdf(X, meijerg=True)(z) == Piecewise(
(-k*lowergamma(k, 0)/gamma(k + 1) +
k*lowergamma(k, z/theta)/gamma(k + 1), z >= 0),
(0, True))
# assert simplify(variance(X)) == k*theta**2 # handled numerically below
assert E(X) == moment(X, 1)
k, theta = symbols('k theta', positive=True)
X = Gamma('x', k, theta)
assert E(X) == k*theta
assert variance(X) == k*theta**2
assert skewness(X).expand() == 2/sqrt(k)
assert kurtosis(X).expand() == 3 + 6/k
def test_gamma_inverse():
a = Symbol("a", positive=True)
b = Symbol("b", positive=True)
X = GammaInverse("x", a, b)
assert density(X)(x) == x**(-a - 1)*b**a*exp(-b/x)/gamma(a)
assert cdf(X)(x) == Piecewise((uppergamma(a, b/x)/gamma(a), x > 0), (0, True))
def test_sampling_gamma_inverse():
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for sampling of gamma inverse.')
X = GammaInverse("x", 1, 1)
assert sample(X) in X.pspace.domain.set
def test_gompertz():
b = Symbol("b", positive=True)
eta = Symbol("eta", positive=True)
X = Gompertz("x", b, eta)
assert density(X)(x) == b*eta*exp(eta)*exp(b*x)*exp(-eta*exp(b*x))
assert cdf(X)(x) == 1 - exp(eta)*exp(-eta*exp(b*x))
assert diff(cdf(X)(x), x) == density(X)(x)
def test_gumbel():
beta = Symbol("beta", positive=True)
mu = Symbol("mu")
x = Symbol("x")
y = Symbol("y")
X = Gumbel("x", beta, mu)
Y = Gumbel("y", beta, mu, minimum=True)
assert density(X)(x).expand() == \
exp(mu/beta)*exp(-x/beta)*exp(-exp(mu/beta)*exp(-x/beta))/beta
assert density(Y)(y).expand() == \
exp(-mu/beta)*exp(y/beta)*exp(-exp(-mu/beta)*exp(y/beta))/beta
assert cdf(X)(x).expand() == \
exp(-exp(mu/beta)*exp(-x/beta))
def test_kumaraswamy():
a = Symbol("a", positive=True)
b = Symbol("b", positive=True)
X = Kumaraswamy("x", a, b)
assert density(X)(x) == x**(a - 1)*a*b*(-x**a + 1)**(b - 1)
assert cdf(X)(x) == Piecewise((0, x < 0),
(-(-x**a + 1)**b + 1, x <= 1),
(1, True))
def test_laplace():
mu = Symbol("mu")
b = Symbol("b", positive=True)
X = Laplace('x', mu, b)
#Tests characteristic_function
assert characteristic_function(X)(x) == (exp(I*mu*x)/(b**2*x**2 + 1))
assert density(X)(x) == exp(-Abs(x - mu)/b)/(2*b)
assert cdf(X)(x) == Piecewise((exp((-mu + x)/b)/2, mu > x),
(-exp((mu - x)/b)/2 + 1, True))
def test_logistic():
mu = Symbol("mu", real=True)
s = Symbol("s", positive=True)
p = Symbol("p", positive=True)
X = Logistic('x', mu, s)
#Tests characteristics_function
assert characteristic_function(X)(x) == \
(Piecewise((pi*s*x*exp(I*mu*x)/sinh(pi*s*x), Ne(x, 0)), (1, True)))
assert density(X)(x) == exp((-x + mu)/s)/(s*(exp((-x + mu)/s) + 1)**2)
assert cdf(X)(x) == 1/(exp((mu - x)/s) + 1)
assert quantile(X)(p) == mu - s*log(-S.One + 1/p)
def test_loglogistic():
a, b = symbols('a b')
assert LogLogistic('x', a, b)
a = Symbol('a', negative=True)
b = Symbol('b', positive=True)
raises(ValueError, lambda: LogLogistic('x', a, b))
a = Symbol('a', positive=True)
b = Symbol('b', negative=True)
raises(ValueError, lambda: LogLogistic('x', a, b))
a, b, z, p = symbols('a b z p', positive=True)
X = LogLogistic('x', a, b)
assert density(X)(z) == b*(z/a)**(b - 1)/(a*((z/a)**b + 1)**2)
assert cdf(X)(z) == 1/(1 + (z/a)**(-b))
assert quantile(X)(p) == a*(p/(1 - p))**(1/b)
# Expectation
assert E(X) == Piecewise((S.NaN, b <= 1), (pi*a/(b*sin(pi/b)), True))
b = symbols('b', prime=True) # b > 1
X = LogLogistic('x', a, b)
assert E(X) == pi*a/(b*sin(pi/b))
def test_lognormal():
mean = Symbol('mu', real=True)
std = Symbol('sigma', positive=True)
X = LogNormal('x', mean, std)
# The sympy integrator can't do this too well
#assert E(X) == exp(mean+std**2/2)
#assert variance(X) == (exp(std**2)-1) * exp(2*mean + std**2)
# Right now, only density function and sampling works
for i in range(3):
X = LogNormal('x', i, 1)
assert sample(X) in X.pspace.domain.set
# The sympy integrator can't do this too well
#assert E(X) ==
mu = Symbol("mu", real=True)
sigma = Symbol("sigma", positive=True)
X = LogNormal('x', mu, sigma)
assert density(X)(x) == (sqrt(2)*exp(-(-mu + log(x))**2
/(2*sigma**2))/(2*x*sqrt(pi)*sigma))
# Tests cdf
assert cdf(X)(x) == Piecewise(
(erf(sqrt(2)*(-mu + log(x))/(2*sigma))/2
+ S(1)/2, x > 0), (0, True))
X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
assert density(X)(x) == sqrt(2)*exp(-log(x)**2/2)/(2*x*sqrt(pi))
def test_maxwell():
a = Symbol("a", positive=True)
X = Maxwell('x', a)
assert density(X)(x) == (sqrt(2)*x**2*exp(-x**2/(2*a**2))/
(sqrt(pi)*a**3))
assert E(X) == 2*sqrt(2)*a/sqrt(pi)
assert variance(X) == -8*a**2/pi + 3*a**2
assert cdf(X)(x) == erf(sqrt(2)*x/(2*a)) - sqrt(2)*x*exp(-x**2/(2*a**2))/(sqrt(pi)*a)
assert diff(cdf(X)(x), x) == density(X)(x)
def test_nakagami():
mu = Symbol("mu", positive=True)
omega = Symbol("omega", positive=True)
X = Nakagami('x', mu, omega)
assert density(X)(x) == (2*x**(2*mu - 1)*mu**mu*omega**(-mu)
*exp(-x**2*mu/omega)/gamma(mu))
assert simplify(E(X)) == (sqrt(mu)*sqrt(omega)
*gamma(mu + S.Half)/gamma(mu + 1))
assert simplify(variance(X)) == (
omega - omega*gamma(mu + S.Half)**2/(gamma(mu)*gamma(mu + 1)))
assert cdf(X)(x) == Piecewise(
(lowergamma(mu, mu*x**2/omega)/gamma(mu), x > 0),
(0, True))
def test_gaussian_inverse():
# test for symbolic parameters
a, b = symbols('a b')
assert GaussianInverse('x', a, b)
# Inverse Gaussian distribution is also known as Wald distribution
# `GaussianInverse` can also be referred by the name `Wald`
a, b, z = symbols('a b z')
X = Wald('x', a, b)
assert density(X)(z) == sqrt(2)*sqrt(b/z**3)*exp(-b*(-a + z)**2/(2*a**2*z))/(2*sqrt(pi))
a, b = symbols('a b', positive=True)
z = Symbol('z', positive=True)
X = GaussianInverse('x', a, b)
assert density(X)(z) == sqrt(2)*sqrt(b)*sqrt(z**(-3))*exp(-b*(-a + z)**2/(2*a**2*z))/(2*sqrt(pi))
assert E(X) == a
assert variance(X).expand() == a**3/b
assert cdf(X)(z) == (S.Half - erf(sqrt(2)*sqrt(b)*(1 + z/a)/(2*sqrt(z)))/2)*exp(2*b/a) +\
erf(sqrt(2)*sqrt(b)*(-1 + z/a)/(2*sqrt(z)))/2 + S.Half
a = symbols('a', nonpositive=True)
raises(ValueError, lambda: GaussianInverse('x', a, b))
a = symbols('a', positive=True)
b = symbols('b', nonpositive=True)
raises(ValueError, lambda: GaussianInverse('x', a, b))
def test_sampling_gaussian_inverse():
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for sampling of Gaussian inverse.')
X = GaussianInverse("x", 1, 1)
assert sample(X) in X.pspace.domain.set
def test_pareto():
xm, beta = symbols('xm beta', positive=True)
alpha = beta + 5
X = Pareto('x', xm, alpha)
dens = density(X)
#Tests cdf function
assert cdf(X)(x) == \
Piecewise((-x**(-beta - 5)*xm**(beta + 5) + 1, x >= xm), (0, True))
#Tests characteristic_function
assert characteristic_function(X)(x) == \
((-I*x*xm)**(beta + 5)*(beta + 5)*uppergamma(-beta - 5, -I*x*xm))
assert dens(x) == x**(-(alpha + 1))*xm**(alpha)*(alpha)
assert simplify(E(X)) == alpha*xm/(alpha-1)
# computation of taylor series for MGF still too slow
#assert simplify(variance(X)) == xm**2*alpha / ((alpha-1)**2*(alpha-2))
def test_pareto_numeric():
xm, beta = 3, 2
alpha = beta + 5
X = Pareto('x', xm, alpha)
assert E(X) == alpha*xm/S(alpha - 1)
assert variance(X) == xm**2*alpha / S(((alpha - 1)**2*(alpha - 2)))
# Skewness tests too slow. Try shortcutting function?
def test_raised_cosine():
mu = Symbol("mu", real=True)
s = Symbol("s", positive=True)
X = RaisedCosine("x", mu, s)
#Tests characteristics_function
assert characteristic_function(X)(x) == \
Piecewise((exp(-I*pi*mu/s)/2, Eq(x, -pi/s)), (exp(I*pi*mu/s)/2, Eq(x, pi/s)), (pi**2*exp(I*mu*x)*sin(s*x)/(s*x*(-s**2*x**2 + pi**2)), True))
assert density(X)(x) == (Piecewise(((cos(pi*(x - mu)/s) + 1)/(2*s),
And(x <= mu + s, mu - s <= x)), (0, True)))
def test_rayleigh():
sigma = Symbol("sigma", positive=True)
X = Rayleigh('x', sigma)
#Tests characteristic_function
assert characteristic_function(X)(x) == (-sqrt(2)*sqrt(pi)*sigma*x*(erfi(sqrt(2)*sigma*x/2) - I)*exp(-sigma**2*x**2/2)/2 + 1)
assert density(X)(x) == x*exp(-x**2/(2*sigma**2))/sigma**2
assert E(X) == sqrt(2)*sqrt(pi)*sigma/2
assert variance(X) == -pi*sigma**2/2 + 2*sigma**2
assert cdf(X)(x) == 1 - exp(-x**2/(2*sigma**2))
assert diff(cdf(X)(x), x) == density(X)(x)
def test_shiftedgompertz():
b = Symbol("b", positive=True)
eta = Symbol("eta", positive=True)
X = ShiftedGompertz("x", b, eta)
assert density(X)(x) == b*(eta*(1 - exp(-b*x)) + 1)*exp(-b*x)*exp(-eta*exp(-b*x))
def test_studentt():
nu = Symbol("nu", positive=True)
X = StudentT('x', nu)
assert density(X)(x) == (1 + x**2/nu)**(-nu/2 - S.Half)/(sqrt(nu)*beta(S.Half, nu/2))
assert cdf(X)(x) == S.Half + x*gamma(nu/2 + S.Half)*hyper((S.Half, nu/2 + S.Half),
(Rational(3, 2),), -x**2/nu)/(sqrt(pi)*sqrt(nu)*gamma(nu/2))
def test_trapezoidal():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
c = Symbol("c", real=True)
d = Symbol("d", real=True)
X = Trapezoidal('x', a, b, c, d)
assert density(X)(x) == Piecewise(((-2*a + 2*x)/((-a + b)*(-a - b + c + d)), (a <= x) & (x < b)),
(2/(-a - b + c + d), (b <= x) & (x < c)),
((2*d - 2*x)/((-c + d)*(-a - b + c + d)), (c <= x) & (x <= d)),
(0, True))
X = Trapezoidal('x', 0, 1, 2, 3)
assert E(X) == Rational(3, 2)
assert variance(X) == Rational(5, 12)
assert P(X < 2) == Rational(3, 4)
def test_triangular():
a = Symbol("a")
b = Symbol("b")
c = Symbol("c")
X = Triangular('x', a, b, c)
assert str(density(X)(x)) == ("Piecewise(((-2*a + 2*x)/((-a + b)*(-a + c)), (a <= x) & (c > x)), "
"(2/(-a + b), Eq(c, x)), ((2*b - 2*x)/((-a + b)*(b - c)), (b >= x) & (c < x)), (0, True))")
#Tests moment_generating_function
assert moment_generating_function(X)(x).expand() == \
((-2*(-a + b)*exp(c*x) + 2*(-a + c)*exp(b*x) + 2*(b - c)*exp(a*x))/(x**2*(-a + b)*(-a + c)*(b - c))).expand()
def test_quadratic_u():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
X = QuadraticU("x", a, b)
Y = QuadraticU("x", 1, 2)
# Tests _moment_generating_function
assert moment_generating_function(Y)(1) == -15*exp(2) + 27*exp(1)
assert moment_generating_function(Y)(2) == -9*exp(4)/2 + 21*exp(2)/2
assert density(X)(x) == (Piecewise((12*(x - a/2 - b/2)**2/(-a + b)**3,
And(x <= b, a <= x)), (0, True)))
def test_uniform():
l = Symbol('l', real=True)
w = Symbol('w', positive=True)
X = Uniform('x', l, l + w)
assert E(X) == l + w/2
assert variance(X).expand() == w**2/12
# With numbers all is well
X = Uniform('x', 3, 5)
assert P(X < 3) == 0 and P(X > 5) == 0
assert P(X < 4) == P(X > 4) == S.Half
z = Symbol('z')
p = density(X)(z)
assert p.subs(z, 3.7) == S.Half
assert p.subs(z, -1) == 0
assert p.subs(z, 6) == 0
c = cdf(X)
assert c(2) == 0 and c(3) == 0
assert c(Rational(7, 2)) == Rational(1, 4)
assert c(5) == 1 and c(6) == 1
@XFAIL
def test_uniform_P():
""" This stopped working because SingleContinuousPSpace.compute_density no
longer calls integrate on a DiracDelta but rather just solves directly.
integrate used to call UniformDistribution.expectation which special-cased
subsed out the Min and Max terms that Uniform produces
I decided to regress on this class for general cleanliness (and I suspect
speed) of the algorithm.
"""
l = Symbol('l', real=True)
w = Symbol('w', positive=True)
X = Uniform('x', l, l + w)
assert P(X < l) == 0 and P(X > l + w) == 0
def test_uniformsum():
n = Symbol("n", integer=True)
_k = Dummy("k")
x = Symbol("x")
X = UniformSum('x', n)
res = Sum((-1)**_k*(-_k + x)**(n - 1)*binomial(n, _k), (_k, 0, floor(x)))/factorial(n - 1)
assert density(X)(x).dummy_eq(res)
#Tests set functions
assert X.pspace.domain.set == Interval(0, n)
#Tests the characteristic_function
assert characteristic_function(X)(x) == (-I*(exp(I*x) - 1)/x)**n
#Tests the moment_generating_function
assert moment_generating_function(X)(x) == ((exp(x) - 1)/x)**n
def test_von_mises():
mu = Symbol("mu")
k = Symbol("k", positive=True)
X = VonMises("x", mu, k)
assert density(X)(x) == exp(k*cos(x - mu))/(2*pi*besseli(0, k))
def test_weibull():
a, b = symbols('a b', positive=True)
# FIXME: simplify(E(X)) seems to hang without extended_positive=True
# On a Linux machine this had a rapid memory leak...
# a, b = symbols('a b', positive=True)
X = Weibull('x', a, b)
assert E(X).expand() == a * gamma(1 + 1/b)
assert variance(X).expand() == (a**2 * gamma(1 + 2/b) - E(X)**2).expand()
assert simplify(skewness(X)) == (2*gamma(1 + 1/b)**3 - 3*gamma(1 + 1/b)*gamma(1 + 2/b) + gamma(1 + 3/b))/(-gamma(1 + 1/b)**2 + gamma(1 + 2/b))**Rational(3, 2)
assert simplify(kurtosis(X)) == (-3*gamma(1 + 1/b)**4 +\
6*gamma(1 + 1/b)**2*gamma(1 + 2/b) - 4*gamma(1 + 1/b)*gamma(1 + 3/b) + gamma(1 + 4/b))/(gamma(1 + 1/b)**2 - gamma(1 + 2/b))**2
def test_weibull_numeric():
# Test for integers and rationals
a = 1
bvals = [S.Half, 1, Rational(3, 2), 5]
for b in bvals:
X = Weibull('x', a, b)
assert simplify(E(X)) == expand_func(a * gamma(1 + 1/S(b)))
assert simplify(variance(X)) == simplify(
a**2 * gamma(1 + 2/S(b)) - E(X)**2)
# Not testing Skew... it's slow with int/frac values > 3/2
def test_wignersemicircle():
R = Symbol("R", positive=True)
X = WignerSemicircle('x', R)
assert density(X)(x) == 2*sqrt(-x**2 + R**2)/(pi*R**2)
assert E(X) == 0
#Tests ChiNoncentralDistribution
assert characteristic_function(X)(x) == \
Piecewise((2*besselj(1, R*x)/(R*x), Ne(x, 0)), (1, True))
def test_prefab_sampling():
N = Normal('X', 0, 1)
L = LogNormal('L', 0, 1)
E = Exponential('Ex', 1)
P = Pareto('P', 1, 3)
W = Weibull('W', 1, 1)
U = Uniform('U', 0, 1)
B = Beta('B', 2, 5)
G = Gamma('G', 1, 3)
variables = [N, L, E, P, W, U, B, G]
niter = 10
for var in variables:
for i in range(niter):
assert sample(var) in var.pspace.domain.set
def test_input_value_assertions():
a, b = symbols('a b')
p, q = symbols('p q', positive=True)
m, n = symbols('m n', positive=False, real=True)
raises(ValueError, lambda: Normal('x', 3, 0))
raises(ValueError, lambda: Normal('x', m, n))
Normal('X', a, p) # No error raised
raises(ValueError, lambda: Exponential('x', m))
Exponential('Ex', p) # No error raised
for fn in [Pareto, Weibull, Beta, Gamma]:
raises(ValueError, lambda: fn('x', m, p))
raises(ValueError, lambda: fn('x', p, n))
fn('x', p, q) # No error raised
def test_unevaluated():
X = Normal('x', 0, 1)
assert str(E(X, evaluate=False)) == ("Integral(sqrt(2)*x*exp(-x**2/2)/"
"(2*sqrt(pi)), (x, -oo, oo))")
assert str(E(X + 1, evaluate=False)) == ("Integral(sqrt(2)*x*exp(-x**2/2)/"
"(2*sqrt(pi)), (x, -oo, oo)) + 1")
assert str(P(X > 0, evaluate=False)) == ("Integral(sqrt(2)*exp(-_z**2/2)/"
"(2*sqrt(pi)), (_z, 0, oo))")
assert P(X > 0, X**2 < 1, evaluate=False) == S.Half
def test_probability_unevaluated():
T = Normal('T', 30, 3)
assert type(P(T > 33, evaluate=False)) == Integral
def test_density_unevaluated():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 2)
assert isinstance(density(X+Y, evaluate=False)(z), Integral)
def test_NormalDistribution():
nd = NormalDistribution(0, 1)
x = Symbol('x')
assert nd.cdf(x) == erf(sqrt(2)*x/2)/2 + S.Half
assert isinstance(nd.sample(), float) or nd.sample().is_Number
assert nd.expectation(1, x) == 1
assert nd.expectation(x, x) == 0
assert nd.expectation(x**2, x) == 1
def test_random_parameters():
mu = Normal('mu', 2, 3)
meas = Normal('T', mu, 1)
assert density(meas, evaluate=False)(z)
assert isinstance(pspace(meas), JointPSpace)
#assert density(meas, evaluate=False)(z) == Integral(mu.pspace.pdf *
# meas.pspace.pdf, (mu.symbol, -oo, oo)).subs(meas.symbol, z)
def test_random_parameters_given():
mu = Normal('mu', 2, 3)
meas = Normal('T', mu, 1)
assert given(meas, Eq(mu, 5)) == Normal('T', 5, 1)
def test_conjugate_priors():
mu = Normal('mu', 2, 3)
x = Normal('x', mu, 1)
assert isinstance(simplify(density(mu, Eq(x, y), evaluate=False)(z)),
Mul)
def test_difficult_univariate():
""" Since using solve in place of deltaintegrate we're able to perform
substantially more complex density computations on single continuous random
variables """
x = Normal('x', 0, 1)
assert density(x**3)
assert density(exp(x**2))
assert density(log(x))
def test_issue_10003():
X = Exponential('x', 3)
G = Gamma('g', 1, 2)
assert P(X < -1) is S.Zero
assert P(G < -1) is S.Zero
@slow
def test_precomputed_cdf():
x = symbols("x", real=True)
mu = symbols("mu", real=True)
sigma, xm, alpha = symbols("sigma xm alpha", positive=True)
n = symbols("n", integer=True, positive=True)
distribs = [
Normal("X", mu, sigma),
Pareto("P", xm, alpha),
ChiSquared("C", n),
Exponential("E", sigma),
# LogNormal("L", mu, sigma),
]
for X in distribs:
compdiff = cdf(X)(x) - simplify(X.pspace.density.compute_cdf()(x))
compdiff = simplify(compdiff.rewrite(erfc))
assert compdiff == 0
@slow
def test_precomputed_characteristic_functions():
import mpmath
def test_cf(dist, support_lower_limit, support_upper_limit):
pdf = density(dist)
t = Symbol('t')
# first function is the hardcoded CF of the distribution
cf1 = lambdify([t], characteristic_function(dist)(t), 'mpmath')
# second function is the Fourier transform of the density function
f = lambdify([x, t], pdf(x)*exp(I*x*t), 'mpmath')
cf2 = lambda t: mpmath.quad(lambda x: f(x, t), [support_lower_limit, support_upper_limit], maxdegree=10)
# compare the two functions at various points
for test_point in [2, 5, 8, 11]:
n1 = cf1(test_point)
n2 = cf2(test_point)
assert abs(re(n1) - re(n2)) < 1e-12
assert abs(im(n1) - im(n2)) < 1e-12
test_cf(Beta('b', 1, 2), 0, 1)
test_cf(Chi('c', 3), 0, mpmath.inf)
test_cf(ChiSquared('c', 2), 0, mpmath.inf)
test_cf(Exponential('e', 6), 0, mpmath.inf)
test_cf(Logistic('l', 1, 2), -mpmath.inf, mpmath.inf)
test_cf(Normal('n', -1, 5), -mpmath.inf, mpmath.inf)
test_cf(RaisedCosine('r', 3, 1), 2, 4)
test_cf(Rayleigh('r', 0.5), 0, mpmath.inf)
test_cf(Uniform('u', -1, 1), -1, 1)
test_cf(WignerSemicircle('w', 3), -3, 3)
def test_long_precomputed_cdf():
x = symbols("x", real=True)
distribs = [
Arcsin("A", -5, 9),
Dagum("D", 4, 10, 3),
Erlang("E", 14, 5),
Frechet("F", 2, 6, -3),
Gamma("G", 2, 7),
GammaInverse("GI", 3, 5),
Kumaraswamy("K", 6, 8),
Laplace("LA", -5, 4),
Logistic("L", -6, 7),
Nakagami("N", 2, 7),
StudentT("S", 4)
]
for distr in distribs:
for _ in range(5):
assert tn(diff(cdf(distr)(x), x), density(distr)(x), x, a=0, b=0, c=1, d=0)
US = UniformSum("US", 5)
pdf01 = density(US)(x).subs(floor(x), 0).doit() # pdf on (0, 1)
cdf01 = cdf(US, evaluate=False)(x).subs(floor(x), 0).doit() # cdf on (0, 1)
assert tn(diff(cdf01, x), pdf01, x, a=0, b=0, c=1, d=0)
def test_issue_13324():
X = Uniform('X', 0, 1)
assert E(X, X > S.Half) == Rational(3, 4)
assert E(X, X > 0) == S.Half
def test_FiniteSet_prob():
E = Exponential('E', 3)
N = Normal('N', 5, 7)
assert P(Eq(E, 1)) is S.Zero
assert P(Eq(N, 2)) is S.Zero
assert P(Eq(N, x)) is S.Zero
def test_prob_neq():
E = Exponential('E', 4)
X = ChiSquared('X', 4)
assert P(Ne(E, 2)) == 1
assert P(Ne(X, 4)) == 1
assert P(Ne(X, 4)) == 1
assert P(Ne(X, 5)) == 1
assert P(Ne(E, x)) == 1
def test_union():
N = Normal('N', 3, 2)
assert simplify(P(N**2 - N > 2)) == \
-erf(sqrt(2))/2 - erfc(sqrt(2)/4)/2 + Rational(3, 2)
assert simplify(P(N**2 - 4 > 0)) == \
-erf(5*sqrt(2)/4)/2 - erfc(sqrt(2)/4)/2 + Rational(3, 2)
def test_Or():
N = Normal('N', 0, 1)
assert simplify(P(Or(N > 2, N < 1))) == \
-erf(sqrt(2))/2 - erfc(sqrt(2)/2)/2 + Rational(3, 2)
assert P(Or(N < 0, N < 1)) == P(N < 1)
assert P(Or(N > 0, N < 0)) == 1
def test_conditional_eq():
E = Exponential('E', 1)
assert P(Eq(E, 1), Eq(E, 1)) == 1
assert P(Eq(E, 1), Eq(E, 2)) == 0
assert P(E > 1, Eq(E, 2)) == 1
assert P(E < 1, Eq(E, 2)) == 0
|
kaushik94/sympy
|
sympy/stats/tests/test_continuous_rv.py
|
Python
|
bsd-3-clause
| 44,661
|
[
"Gaussian"
] |
a41750543a74e9ab8d69f34a2e09732de279e36626d3665e1c7e494a3c3737fb
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function, division
import warnings
import numpy as np
##############################################################################
# Functions
##############################################################################
def lengths_and_angles_to_box_vectors(a_length, b_length, c_length, alpha, beta, gamma):
"""Convert from the lengths/angles of the unit cell to the box
vectors (Bravais vectors). The angles should be in degrees.
Parameters
----------
a_length : scalar or np.ndarray
length of Bravais unit vector **a**
b_length : scalar or np.ndarray
length of Bravais unit vector **b**
c_length : scalar or np.ndarray
length of Bravais unit vector **c**
alpha : scalar or np.ndarray
angle between vectors **b** and **c**, in degrees.
beta : scalar or np.ndarray
angle between vectors **c** and **a**, in degrees.
gamma : scalar or np.ndarray
angle between vectors **a** and **b**, in degrees.
Returns
-------
a : np.ndarray
If the inputs are scalar, the vectors will one dimesninoal (length 3).
If the inputs are one dimension, shape=(n_frames, ), then the output
will be (n_frames, 3)
b : np.ndarray
If the inputs are scalar, the vectors will one dimesninoal (length 3).
If the inputs are one dimension, shape=(n_frames, ), then the output
will be (n_frames, 3)
c : np.ndarray
If the inputs are scalar, the vectors will one dimesninoal (length 3).
If the inputs are one dimension, shape=(n_frames, ), then the output
will be (n_frames, 3)
Examples
--------
>>> import numpy as np
>>> result = lengths_and_angles_to_box_vectors(1, 1, 1, 90.0, 90.0, 90.0)
Notes
-----
This code is adapted from gyroid, which is licensed under the BSD
http://pythonhosted.org/gyroid/_modules/gyroid/unitcell.html
"""
if np.all(alpha < 2*np.pi) and np.all(beta < 2*np.pi) and np.all(gamma < 2*np.pi):
warnings.warn('All your angles were less than 2*pi. Did you accidentally give me radians?')
alpha = alpha * np.pi / 180
beta = beta * np.pi / 180
gamma = gamma * np.pi / 180
a = np.array([a_length, np.zeros_like(a_length), np.zeros_like(a_length)])
b = np.array([b_length*np.cos(gamma), b_length*np.sin(gamma), np.zeros_like(b_length)])
cx = c_length*np.cos(beta)
cy = c_length*(np.cos(alpha) - np.cos(beta)*np.cos(gamma)) / np.sin(gamma)
cz = np.sqrt(c_length*c_length - cx*cx - cy*cy)
c = np.array([cx,cy,cz])
if not a.shape == b.shape == c.shape:
raise TypeError('Shape is messed up.')
# Make sure that all vector components that are _almost_ 0 are set exactly
# to 0
tol = 1e-6
a[np.logical_and(a>-tol, a<tol)] = 0.0
b[np.logical_and(b>-tol, b<tol)] = 0.0
c[np.logical_and(c>-tol, c<tol)] = 0.0
return a.T, b.T, c.T
def box_vectors_to_lengths_and_angles(a, b, c):
"""Convert box vectors into the lengths and angles defining the box.
Parameters
----------
a : np.ndarray
the vector defining the first edge of the periodic box (length 3), or
an array of this vector in multiple frames, where a[i,:] gives the
length 3 array of vector a in each frame of a simulation
b : np.ndarray
the vector defining the second edge of the periodic box (length 3), or
an array of this vector in multiple frames, where b[i,:] gives the
length 3 array of vector a in each frame of a simulation
c : np.ndarray
the vector defining the third edge of the periodic box (length 3), or
an array of this vector in multiple frames, where c[i,:] gives the
length 3 array of vector a in each frame of a simulation
Examples
--------
>>> a = np.array([2,0,0], dtype=float)
>>> b = np.array([0,1,0], dtype=float)
>>> c = np.array([0,1,1], dtype=float)
>>> l1, l2, l3, alpha, beta, gamma = box_vectors_to_lengths_and_angles(a, b, c)
>>> (l1 == 2.0) and (l2 == 1.0) and (l3 == np.sqrt(2))
True
>>> np.abs(alpha - 45) < 1e-6
True
>>> np.abs(beta - 90.0) < 1e-6
True
>>> np.abs(gamma - 90.0) < 1e-6
True
Returns
-------
a_length : scalar or np.ndarray
length of Bravais unit vector **a**
b_length : scalar or np.ndarray
length of Bravais unit vector **b**
c_length : scalar or np.ndarray
length of Bravais unit vector **c**
alpha : scalar or np.ndarray
angle between vectors **b** and **c**, in degrees.
beta : scalar or np.ndarray
angle between vectors **c** and **a**, in degrees.
gamma : scalar or np.ndarray
angle between vectors **a** and **b**, in degrees.
"""
if not a.shape == b.shape == c.shape:
raise TypeError('Shape is messed up.')
if not a.shape[-1] == 3:
raise TypeError('The last dimension must be length 3')
if not (a.ndim in [1,2]):
raise ValueError('vectors must be 1d or 2d (for a vectorized '
'operation on multiple frames)')
last_dim = a.ndim-1
a_length = np.sqrt(np.sum(a*a, axis=last_dim))
b_length = np.sqrt(np.sum(b*b, axis=last_dim))
c_length = np.sqrt(np.sum(c*c, axis=last_dim))
# we allow 2d input, where the first dimension is the frame index
# so we want to do the dot product only over the last dimension
alpha = np.arccos(np.einsum('...i, ...i', b, c) / (b_length * c_length))
beta = np.arccos(np.einsum('...i, ...i', c, a) / (c_length * a_length))
gamma = np.arccos(np.einsum('...i, ...i', a, b) / (a_length * b_length))
# convert to degrees
alpha = alpha * 180.0 / np.pi
beta = beta * 180.0 / np.pi
gamma = gamma * 180.0 / np.pi
return a_length, b_length, c_length, alpha, beta, gamma
|
daviddesancho/mdtraj
|
mdtraj/utils/unitcell.py
|
Python
|
lgpl-2.1
| 6,899
|
[
"MDTraj"
] |
ac811877bbb1ea56d6e74445503c9de4a469818174ec0e410cf7a1bd997b38a3
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.anovaglm import H2OANOVAGLMEstimator
# Simple test to check correct frame transformation
def testFrameTransform():
train = h2o.import_file(path=pyunit_utils.locate("smalldata/anovaGlm/Moore.csv"))
answer = h2o.import_file(path=pyunit_utils.locate("smalldata/anovaGlm/MooreTransformed.csv"))
y = 'conformity'
x = ['fcategory', 'partner.status']
model = H2OANOVAGLMEstimator(family='gaussian', lambda_=0, save_transformed_framekeys=True)
model.train(x=x, y=y, training_frame=train)
transformFrame = h2o.get_frame(model._model_json['output']['transformed_columns_key']['name'])
pyunit_utils.compare_frames_local(answer[['fcategory1', 'fcategory2', 'partner.status1',
'fcategory1:partner.status1', 'fcategory2:partner.status1']],
transformFrame[['fcategory_high', 'fcategory_low', 'partner.status_high',
'fcategory_high:partner.status_high',
'fcategory_low:partner.status_high']], prob=1)
if __name__ == "__main__":
pyunit_utils.standalone_test(testFrameTransform)
else:
testFrameTransform()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/anovaglm/pyunit_PUBDEV_8088_transformFrame.py
|
Python
|
apache-2.0
| 1,305
|
[
"Gaussian"
] |
cf0ff3db2bdce4f21d7bc00baa3ed7ac41613519d6312db8bdd8aa8d56d98f54
|
# Copyright 2005 by Jonathan Taylor.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import sys
# Add path to Bio
sys.path.append('../..')
"""This module deals with CAPS markers.
A CAPS marker is a location a DifferentialCutsite as described below and a
set of primers that can be used to visualize this. More information can
be found in the paper `Konieczny and Ausubel (1993)`_ (PMID 8106085).
.. _`Konieczny and Ausubel (1993)`: http://dx.doi.org/10.1046/j.1365-313X.1993.04020403.x
"""
__docformat__ = "restructuredtext en"
class DifferentialCutsite(object):
"""Differential enzyme cutsite in an alignment.
A differential cutsite is a location in an alignment where an enzyme cuts
at least one sequence and also cannot cut at least one other sequence.
Members:
- start - Where it lives in the alignment.
- enzyme - The enzyme that causes this.
- cuts_in - A list of sequences (as indexes into the alignment) the
enzyme cuts in.
- blocked_in - A list of sequences (as indexes into the alignment) the
enzyme is blocked in.
"""
def __init__(self, **kwds):
"""Initialize a DifferentialCutsite.
Each member (as listed in the class description) should be included as a
keyword.
"""
self.start = int(kwds["start"])
self.enzyme = kwds["enzyme"]
self.cuts_in = kwds["cuts_in"]
self.blocked_in = kwds["blocked_in"]
class AlignmentHasDifferentLengthsError(Exception):
pass
class CAPSMap(object):
"""A map of an alignment showing all possible dcuts.
Members:
- alignment - The alignment that is mapped.
- dcuts - A list of possible CAPS markers in the form of
DifferentialCutsites.
"""
def __init__(self, alignment, enzymes = []):
"""Initialize the CAPSMap.
Required:
- alignment - The alignment to be mapped.
Optional:
- enzymes - The enzymes to be used to create the map.
"""
self.sequences = [rec.seq for rec in alignment]
self.size = len(self.sequences)
self.length = len(self.sequences[0])
for seq in self.sequences:
if len(seq) != self.length:
raise AlignmentHasDifferentLengthsError
self.alignment = alignment
self.enzymes = enzymes
# look for dcuts
self._digest()
def _digest_with(self, enzyme):
cuts = [] # list of lists, one per sequence
all = []
# go through each sequence
for seq in self.sequences:
# grab all the cuts in the sequence
seq_cuts = [cut - enzyme.fst5 for cut in enzyme.search(seq)]
# maintain a list of all cuts in all sequences
all.extend(seq_cuts)
cuts.append(seq_cuts)
# we sort the all list and remove duplicates
all.sort()
last = -999
new = []
for cut in all:
if cut != last:
new.append(cut)
last = cut
all = new
# all now has indices for all sequences in the alignment
for cut in all:
# test for dcuts
cuts_in = []
blocked_in = []
for i in range(0, self.size):
seq = self.sequences[i]
if cut in cuts[i]:
cuts_in.append(i)
else:
blocked_in.append(i)
if cuts_in != [] and blocked_in != []:
self.dcuts.append(DifferentialCutsite(start = cut, enzyme = enzyme, cuts_in = cuts_in, blocked_in = blocked_in))
def _digest(self):
self.dcuts = []
for enzyme in self.enzymes:
self._digest_with(enzyme)
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/CAPS/__init__.py
|
Python
|
gpl-2.0
| 3,889
|
[
"Biopython"
] |
7134bd38305c64fcd2aae98d181c61b2122d9d4ae19548a54b02949de0e56477
|
"""Dynamic Imaging of Coherent Sources (DICS)."""
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Britta Westner <britta.wstnr@gmail.com>
# Susanna Aro <susanna.aro@aalto.fi>
# Roman Goj <roman.goj@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from ..utils import (logger, verbose, warn, _check_one_ch_type,
_check_channels_spatial_filter, _check_rank,
_check_option)
from ..forward import _subject_from_forward
from ..minimum_norm.inverse import combine_xyz, _check_reference
from ..source_estimate import _make_stc, _get_src_type
from ..time_frequency import csd_fourier, csd_multitaper, csd_morlet
from ._compute_beamformer import (_check_proj_match, _prepare_beamformer_input,
_compute_beamformer, _check_src_type,
Beamformer, _compute_power)
@verbose
def make_dics(info, forward, csd, reg=0.05, label=None, pick_ori=None,
rank=None, inversion='single', weight_norm=None,
normalize_fwd=True, real_filter=False, reduce_rank=False,
verbose=None):
"""Compute a Dynamic Imaging of Coherent Sources (DICS) spatial filter.
This is a beamformer filter that can be used to estimate the source power
at a specific frequency range [1]_. It does this by constructing a spatial
filter for each source point. The computation of these filters is very
similar to those of the LCMV beamformer (:func:`make_lcmv`), but instead of
operating on a covariance matrix, the CSD matrix is used. When applying
these filters to a CSD matrix (see :func:`apply_dics_csd`), the source
power can be estimated for each source point.
Parameters
----------
info : instance of Info
Measurement info, e.g. ``epochs.info``.
forward : instance of Forward
Forward operator.
csd : instance of CrossSpectralDensity
The data cross-spectral density (CSD) matrices. A source estimate is
performed for each frequency or frequency-bin defined in the CSD
object.
reg : float
The regularization to apply to the cross-spectral density before
computing the inverse.
label : Label | None
Restricts the solution to a given label.
pick_ori : None | 'normal' | 'max-power'
The source orientation to compute the filter for:
``None`` :
orientations are pooled (Default)
'normal' :
filters are computed for the orientation tangential to the
cortical surface
'max-power' :
filters are computer for the orientation that maximizes
spectral power.
rank : None | int | 'full'
This controls the effective rank of the covariance matrix when
computing the inverse. The rank can be set explicitly by specifying an
integer value. If ``None``, the rank will be automatically estimated.
Since applying regularization will always make the covariance matrix
full rank, the rank is estimated before regularization in this case. If
'full', the rank will be estimated after regularization and hence
will mean using the full rank, unless ``reg=0`` is used.
The default is None.
.. versionadded:: 0.17
inversion : 'single' | 'matrix'
This determines how the beamformer deals with source spaces in "free"
orientation. Such source spaces define three orthogonal dipoles at each
source point. When ``inversion='single'``, each dipole is considered
as an individual source and the corresponding spatial filter is
computed for each dipole separately. When ``inversion='matrix'``, all
three dipoles at a source vertex are considered as a group and the
spatial filters are computed jointly using a matrix inversion. While
``inversion='single'`` is more stable, ``inversion='matrix'`` is more
precise. See section 5 of [5]_. Defaults to 'single'.
weight_norm : 'unit-noise-gain' | 'nai' | None
If 'unit-noise-gain', the unit-noise gain minimum variance beamformer
will be computed (Borgiotti-Kaplan beamformer) [2]_,
If 'nai', the Neural Activity Index [4]_ will be computed.
Defaults to ``None``, in which case no normalization is performed.
normalize_fwd : bool
Whether to normalize the forward solution. Defaults to ``True``. Note
that this normalization is not required when weight normalization
(``weight_norm``) is used.
real_filter : bool
If ``True``, take only the real part of the cross-spectral-density
matrices to compute real filters. Defaults to ``False``.
reduce_rank : bool
If ``True``, the rank of the forward operator will be reduced by 1 for
each spatial location, prior to inversion. This may be necessary when
you use a single sphere model for MEG and ``mode='vertex'``.
Defaults to ``False``.
%(verbose)s
Returns
-------
filters : instance of Beamformer
Dictionary containing filter weights from DICS beamformer.
Contains the following keys:
'weights' : ndarray, shape (n_frequencies, n_weights)
For each frequency, the filter weights of the beamformer.
'csd' : instance of CrossSpectralDensity
The data cross-spectral density matrices used to compute the
beamformer.
'ch_names' : list of str
Channels used to compute the beamformer.
'proj' : ndarray, shape (n_channels, n_channels)
Projections used to compute the beamformer.
'vertices' : list of ndarray
Vertices for which the filter weights were computed.
'inversion' : 'single' | 'matrix'
Whether the spatial filters were computed for each dipole
separately or jointly for all dipoles at each vertex using a
matrix inversion.
'weight_norm' : None | 'unit-noise-gain'
The normalization of the weights.
'normalize_fwd' : bool
Whether the forward solution was normalized
'n_orient' : int
Number of source orientations defined in the forward model.
'subject' : str
The subject ID.
'src_type' : str
Type of source space.
See Also
--------
apply_dics_csd, tf_dics
Notes
-----
The original reference is [1]_. See [5]_ for a tutorial style paper on the
topic.
The DICS beamformer is very similar to the LCMV (:func:`make_lcmv`)
beamformer and many of the parameters are shared. However,
:func:`make_dics` and :func:`make_lcmv` currently have different defaults
for these parameters, which were settled on separately through extensive
practical use case testing (but not necessarily exhaustive parameter space
searching), and it remains to be seen how functionally interchangeable they
could be.
The default setting reproduce the DICS beamformer as described in [5]_::
inversion='single', weight_norm=None, normalize_fwd=True
To use the :func:`make_lcmv` defaults, use::
inversion='matrix', weight_norm='unit-gain', normalize_fwd=False
For more information about ``real_filter``, see the
supplemental information from [3]_.
References
----------
.. [1] Gross et al. (2001) Dynamic imaging of coherent sources: Studying
neural interactions in the human brain. PNAS vol. 98 (2)
pp. 694-699. https://doi.org/10.1073/pnas.98.2.694
.. [2] Sekihara & Nagarajan. Adaptive spatial filters for electromagnetic
brain imaging (2008) Springer Science & Business Media
.. [3] Hipp JF, Engel AK, Siegel M (2011) Oscillatory Synchronization
in Large-Scale Cortical Networks Predicts Perception.
Neuron (2011) vol 69 pp. 387-396.
https://doi.org/10.1016/j.neuron.2010.12.027
.. [4] Van Veen et al. Localization of brain electrical activity via
linearly constrained minimum variance spatial filtering.
Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
.. [5] van Vliet, et al. (2018) Analysis of functional connectivity and
oscillatory power using DICS: from raw MEG data to group-level
statistics in Python. bioRxiv, 245530.
https://doi.org/10.1101/245530
""" # noqa: E501
rank = _check_rank(rank)
_check_option('pick_ori', pick_ori, [None, 'normal', 'max-power'])
_check_option('inversion', inversion, ['single', 'matrix'])
_check_option('weight_norm', weight_norm, ['unit-noise-gain', 'nai', None])
# Leadfield rank and optional rank reduction
# (to deal with problems with complex eigenvalues within the computation
# of the optimal orientation when using pinv if the leadfield was only
# rank 2 (e.g., with the spherical headmodel of the phantom data),
# see gh-4568 and gh-4628.
if reduce_rank and not (pick_ori == 'max-power' and inversion == 'matrix'):
raise NotImplementedError(
'The computation of spatial filters with rank reduction using '
'reduce_rank=True is only implemented with pick_ori=="max-power" '
'and inversion="matrix".'
)
frequencies = [np.mean(freq_bin) for freq_bin in csd.frequencies]
n_freqs = len(frequencies)
n_orient = forward['sol']['ncol'] // forward['nsource']
# Determine how to normalize the leadfield
if normalize_fwd:
if inversion == 'single':
if weight_norm == 'unit-noise-gain':
raise ValueError('The computation of a unit-noise-gain '
'beamformer with inversion="single" is not '
'stable with depth normalization, set '
'normalize_fwd to False.')
combine_xyz = False
else:
combine_xyz = 'fro'
exp = 1. # turn on depth weighting with exponent 1
else:
exp = None # turn off depth weighting entirely
combine_xyz = False
_check_one_ch_type('dics', info, forward)
# pick info, get gain matrix, etc.
_, info, proj, vertices, G, _, nn, orient_std = _prepare_beamformer_input(
info, forward, label, pick_ori,
combine_xyz=combine_xyz, exp=exp)
subject = _subject_from_forward(forward)
src_type = _get_src_type(forward['src'], vertices)
del forward
ch_names = list(info['ch_names'])
csd_picks = [csd.ch_names.index(ch) for ch in ch_names]
logger.info('Computing DICS spatial filters...')
Ws = []
for i, freq in enumerate(frequencies):
if n_freqs > 1:
logger.info(' computing DICS spatial filter at %sHz (%d/%d)' %
(freq, i + 1, n_freqs))
Cm = csd.get_data(index=i)
if real_filter:
Cm = Cm.real
# Ensure the CSD is in the same order as the leadfield
Cm = Cm[csd_picks, :][:, csd_picks]
# compute spatial filter
W = _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori,
reduce_rank, rank=rank, inversion=inversion,
nn=nn, orient_std=orient_std)
Ws.append(W)
Ws = np.array(Ws)
filters = Beamformer(
kind='DICS', weights=Ws, csd=csd, ch_names=ch_names, proj=proj,
vertices=vertices, subject=subject, pick_ori=pick_ori,
inversion=inversion, weight_norm=weight_norm,
normalize_fwd=bool(normalize_fwd), src_type=src_type,
n_orient=n_orient if pick_ori is None else 1)
return filters
def _apply_dics(data, filters, info, tmin):
"""Apply DICS spatial filter to data for source reconstruction."""
if isinstance(data, np.ndarray) and data.ndim == 2:
data = [data]
one_epoch = True
else:
one_epoch = False
Ws = filters['weights']
one_freq = len(Ws) == 1
subject = filters['subject']
# compatibility with 0.16, add src_type as None if not present:
filters, warn_text = _check_src_type(filters)
for i, M in enumerate(data):
if not one_epoch:
logger.info("Processing epoch : %d" % (i + 1))
# Apply SSPs
if info['projs']:
_check_proj_match(info, filters)
M = np.dot(filters['proj'], M)
stcs = []
for W in Ws:
# project to source space using beamformer weights
sol = np.dot(W, M)
if filters['n_orient'] > 1:
logger.info('combining the current components...')
sol = combine_xyz(sol)
tstep = 1.0 / info['sfreq']
stcs.append(_make_stc(sol, vertices=filters['vertices'],
src_type=filters['src_type'], tmin=tmin,
tstep=tstep, subject=subject,
warn_text=warn_text))
if one_freq:
yield stcs[0]
else:
yield stcs
logger.info('[done]')
@verbose
def apply_dics(evoked, filters, verbose=None):
"""Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights.
Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights
on evoked data.
.. warning:: The result of this function is meant as an intermediate step
for further processing (such as computing connectivity). If
you are interested in estimating source time courses, use an
LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`)
instead. If you are interested in estimating spectral power at
the source level, use :func:`apply_dics_csd`.
.. warning:: This implementation has not been heavily tested so please
report any issues or suggestions.
Parameters
----------
evoked : Evoked
Evoked data to apply the DICS beamformer weights to.
filters : instance of Beamformer
DICS spatial filter (beamformer weights)
Filter weights returned from :func:`make_dics`.
%(verbose)s
Returns
-------
stc : SourceEstimate | VolSourceEstimate | list
Source time courses. If the DICS beamformer has been computed for more
than one frequency, a list is returned containing for each frequency
the corresponding time courses.
See Also
--------
apply_dics_epochs
apply_dics_csd
""" # noqa: E501
_check_reference(evoked)
info = evoked.info
data = evoked.data
tmin = evoked.times[0]
sel = _check_channels_spatial_filter(evoked.ch_names, filters)
data = data[sel]
stc = _apply_dics(data=data, filters=filters, info=info, tmin=tmin)
return next(stc)
@verbose
def apply_dics_epochs(epochs, filters, return_generator=False, verbose=None):
"""Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights.
Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights
on single trial data.
.. warning:: The result of this function is meant as an intermediate step
for further processing (such as computing connectivity). If
you are interested in estimating source time courses, use an
LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`)
instead. If you are interested in estimating spectral power at
the source level, use :func:`apply_dics_csd`.
.. warning:: This implementation has not been heavily tested so please
report any issue or suggestions.
Parameters
----------
epochs : Epochs
Single trial epochs.
filters : instance of Beamformer
DICS spatial filter (beamformer weights)
Filter weights returned from :func:`make_dics`. The DICS filters must
have been computed for a single frequency only.
return_generator : bool
Return a generator object instead of a list. This allows iterating
over the stcs without having to keep them all in memory.
%(verbose)s
Returns
-------
stc: list | generator of (SourceEstimate | VolSourceEstimate)
The source estimates for all epochs.
See Also
--------
apply_dics
apply_dics_csd
"""
_check_reference(epochs)
if len(filters['weights']) > 1:
raise ValueError(
'This function only works on DICS beamformer weights that have '
'been computed for a single frequency. When calling make_dics(), '
'make sure to use a CSD object with only a single frequency (or '
'frequency-bin) defined.'
)
info = epochs.info
tmin = epochs.times[0]
sel = _check_channels_spatial_filter(epochs.ch_names, filters)
data = epochs.get_data()[:, sel, :]
stcs = _apply_dics(data=data, filters=filters, info=info, tmin=tmin)
if not return_generator:
stcs = list(stcs)
return stcs
@verbose
def apply_dics_csd(csd, filters, verbose=None):
"""Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights.
Apply a previously computed DICS beamformer to a cross-spectral density
(CSD) object to estimate source power in time and frequency windows
specified in the CSD object [1]_.
Parameters
----------
csd : instance of CrossSpectralDensity
The data cross-spectral density (CSD) matrices. A source estimate is
performed for each frequency or frequency-bin defined in the CSD
object.
filters : instance of Beamformer
DICS spatial filter (beamformer weights)
Filter weights returned from `make_dics`.
%(verbose)s
Returns
-------
stc : SourceEstimate
Source power with frequency instead of time.
frequencies : list of float
The frequencies for which the source power has been computed. If the
data CSD object defines frequency-bins instead of exact frequencies,
the mean of each bin is returned.
References
----------
.. [1] Gross et al. Dynamic imaging of coherent sources: Studying neural
interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
""" # noqa: E501
ch_names = filters['ch_names']
vertices = filters['vertices']
n_orient = filters['n_orient']
subject = filters['subject']
n_sources = np.sum([len(v) for v in vertices])
# If CSD is summed over multiple frequencies, take the average frequency
frequencies = [np.mean(dfreq) for dfreq in csd.frequencies]
n_freqs = len(frequencies)
source_power = np.zeros((n_sources, len(csd.frequencies)))
# Ensure the CSD is in the same order as the weights
csd_picks = [csd.ch_names.index(ch) for ch in ch_names]
logger.info('Computing DICS source power...')
for i, freq in enumerate(frequencies):
if n_freqs > 1:
logger.info(' applying DICS spatial filter at %sHz (%d/%d)' %
(freq, i + 1, n_freqs))
Cm = csd.get_data(index=i)
Cm = Cm[csd_picks, :][:, csd_picks]
W = filters['weights'][i]
source_power[:, i] = _compute_power(Cm, W, n_orient)
logger.info('[done]')
# compatibility with 0.16, add src_type as None if not present:
filters, warn_text = _check_src_type(filters)
return (_make_stc(source_power, vertices=vertices,
src_type=filters['src_type'], tmin=0., tstep=1.,
subject=subject, warn_text=warn_text),
frequencies)
@verbose
def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
subtract_evoked=False, mode='fourier', freq_bins=None,
frequencies=None, n_ffts=None, mt_bandwidths=None,
mt_adaptive=False, mt_low_bias=True, cwt_n_cycles=7, decim=1,
reg=0.05, label=None, pick_ori=None, rank=None, inversion='single',
weight_norm=None, normalize_fwd=True, real_filter=False,
reduce_rank=False, verbose=None):
"""5D time-frequency beamforming based on DICS.
Calculate source power in time-frequency windows using a spatial filter
based on the Dynamic Imaging of Coherent Sources (DICS) beamforming
approach [1]_. For each time window and frequency bin combination,
cross-spectral density (CSD) is computed and used to create a DICS
beamformer spatial filter.
Parameters
----------
epochs : Epochs
Single trial epochs.
forward : dict
Forward operator.
noise_csds : list of instances of CrossSpectralDensity | None
Noise cross-spectral density for each frequency bin. If these are
specified, the DICS filters will be applied to both the signal and
noise CSDs. The source power estimates for each frequency bin will be
scaled by the estimated noise power (signal / noise).
Specifying ``None`` will disable performing noise normalization.
tmin : float
Minimum time instant to consider.
tmax : float
Maximum time instant to consider.
tstep : float
Spacing between consecutive time windows, should be smaller than or
equal to the shortest time window length.
win_lengths : list of float
Time window lengths in seconds. One time window length should be
provided for each frequency bin.
subtract_evoked : bool
If True, subtract the averaged evoked response prior to computing the
tf source grid. Defaults to False.
mode : 'fourier' | 'multitaper' | 'cwt_morlet'
Spectrum estimation mode. Defaults to 'fourier'.
freq_bins : list of tuple of float
Start and end point of frequency bins of interest.
Only used in 'multitaper' or 'fourier' mode. For 'cwt_morlet' mode, use
the ``frequencies`` parameter instead.
frequencies : list of float | list of list of float
The frequencies to compute the source power for. If you want to compute
the average power for multiple frequency bins, specify a list of
lists: each list containing the frequencies for the corresponding bin.
Only used in 'cwt_morlet' mode. In other modes, use the ``freq_bins``
parameter instead.
n_ffts : list | None
Length of the FFT for each frequency bin. If ``None`` (the default),
the exact number of samples between ``tmin`` and ``tmax`` will be used.
Only used in 'multitaper' or 'fourier' mode.
mt_bandwidths : list of float
The bandwidths of the multitaper windowing function in Hz. Only used in
'multitaper' mode. One value should be provided for each frequency bin.
Defaults to None.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into CSD. Only used
in 'multitaper' mode. Defaults to False.
mt_low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth. Only used in 'multitaper' mode. Defaults to True.
cwt_n_cycles: float | list of float | None
Number of cycles to use when constructing Morlet wavelets. Fixed number
or one per frequency. Defaults to 7.
Only used in 'cwt_morlet' mode.
decim : int | slice
To reduce memory usage, decimation factor during time-frequency
decomposition. Defaults to 1 (no decimation).
Only used in 'cwt_morlet' mode.
If `int`, uses tfr[..., ::decim].
If `slice`, uses tfr[..., decim].
reg : float
Regularization to use for the DICS beamformer computation.
Defaults to 0.05.
label : Label | None
Restricts the solution to a given label. Defaults to None.
pick_ori : None | 'normal' | 'max-power'
The source orientation to estimate source power for:
``None`` :
orientations are pooled. (Default)
'normal' :
filters are computed for the orientation tangential to the
cortical surface
'max-power' :
filters are computer for the orientation that maximizes
spectral power.
Defaults to ``None``.
rank : None | int | 'full'
This controls the effective rank of the covariance matrix when
computing the inverse. The rank can be set explicitly by specifying an
integer value. If ``None``, the rank will be automatically estimated.
Since applying regularization will always make the covariance matrix
full rank, the rank is estimated before regularization in this case. If
'full', the rank will be estimated after regularization and hence
will mean using the full rank, unless ``reg=0`` is used.
The default is None.
.. versionadded:: 0.17
inversion : 'single' | 'matrix'
This determines how the beamformer deals with source spaces in "free"
orientation. Such source spaces define three orthogonal dipoles at each
source point. When ``inversion='single'``, each dipole is considered
as an individual source and the corresponding spatial filter is
computed for each dipole separately. When ``inversion='matrix'``, all
three dipoles at a source vertex are considered as a group and the
spatial filters are computed jointly using a matrix inversion. While
``inversion='single'`` is more stable, ``inversion='matrix'`` is more
precise. See Notes of :func:`make_dics`. Defaults to 'single'.
weight_norm : None | 'unit-noise-gain'
How to normalize the beamformer weights. None means no normalization is
performed. If 'unit-noise-gain', the unit-noise gain minimum variance
beamformer will be computed (Borgiotti-Kaplan beamformer) [2]_.
Defaults to ``None``.
normalize_fwd : bool
Whether to normalize the forward solution. Defaults to ``True``. Note
that this normalization is not required when weight normalization
(``weight_norm``) is used.
real_filter : bool
If ``True``, take only the real part of the cross-spectral-density
matrices to compute real filters. Defaults to ``False``.
reduce_rank : bool
If ``True``, the rank of the forward operator will be reduced by 1 for
each spatial location, prior to inversion. This may be necessary when
you use a single sphere model for MEG and ``mode='vertex'``.
Defaults to ``False``.
%(verbose)s
Returns
-------
stcs : list of SourceEstimate | VolSourceEstimate
Source power at each time window. One SourceEstimate object is returned
for each frequency bin.
Notes
-----
Dalal et al. [1]_ used a synthetic aperture magnetometry beamformer (SAM)
in each time-frequency window instead of DICS.
An alternative to using noise CSDs is to normalize the forward solution
(``normalize_fwd``) or the beamformer weights (``weight_norm``). In
this case, ``noise_csds`` may be set to ``None``.
References
----------
.. [1] Dalal et al. Five-dimensional neuroimaging: Localization of the
time-frequency dynamics of cortical activity.
NeuroImage (2008) vol. 40 (4) pp. 1686-1700
.. [2] Sekihara & Nagarajan. Adaptive spatial filters for electromagnetic
brain imaging (2008) Springer Science & Business Media
"""
_check_reference(epochs)
rank = _check_rank(rank)
if mode == 'cwt_morlet' and frequencies is None:
raise ValueError('In "cwt_morlet" mode, the "frequencies" parameter '
'should be used.')
elif mode != 'cwt_morlet' and freq_bins is None:
raise ValueError('In "%s" mode, the "freq_bins" parameter should be '
'used.' % mode)
if frequencies is not None:
# Make sure frequencies are always in the form of a list of lists
frequencies = [np.atleast_1d(f) for f in frequencies]
n_freq_bins = len(frequencies)
else:
n_freq_bins = len(freq_bins)
if len(win_lengths) != n_freq_bins:
raise ValueError('One time window length expected per frequency bin')
if any(win_length < tstep for win_length in win_lengths):
raise ValueError('Time step should not be larger than any of the '
'window lengths')
if noise_csds is not None and len(noise_csds) != n_freq_bins:
raise ValueError('One noise CSD object expected per frequency bin')
if n_ffts is not None and len(n_ffts) != n_freq_bins:
raise ValueError('When specifying number of FFT samples, one value '
'must be provided per frequency bin')
if mt_bandwidths is not None and len(mt_bandwidths) != n_freq_bins:
raise ValueError('When using multitaper mode and specifying '
'multitaper transform bandwidth, one value must be '
'provided per frequency bin')
# Multiplying by 1e3 to avoid numerical issues, e.g. 0.3 // 0.05 == 5
n_time_steps = int(((tmax - tmin) * 1e3) // (tstep * 1e3))
# Subtract evoked response
if subtract_evoked:
epochs = epochs.copy().subtract_evoked()
sol_final = []
# Compute source power for each frequency bin
for i_freq in range(n_freq_bins):
win_length = win_lengths[i_freq]
n_overlap = int((win_length * 1e3) // (tstep * 1e3))
# Scale noise CSD to allow data and noise CSDs to have different length
if noise_csds is not None:
noise_csd = noise_csds[i_freq].copy()
noise_csd._data /= noise_csd.n_fft
if mode == 'cwt_morlet':
freq_bin = frequencies[i_freq]
fmin = np.min(freq_bin)
fmax = np.max(freq_bin)
else:
fmin, fmax = freq_bins[i_freq]
if n_ffts is None:
n_fft = None
else:
n_fft = n_ffts[i_freq]
if mt_bandwidths is None:
mt_bandwidth = None
else:
mt_bandwidth = mt_bandwidths[i_freq]
sol_single = []
sol_overlap = []
for i_time in range(n_time_steps):
win_tmin = tmin + i_time * tstep
win_tmax = win_tmin + win_length
# If in the last step the last time point was not covered in
# previous steps and will not be covered now, a solution needs to
# be calculated for an additional time window
if (i_time == n_time_steps - 1 and
win_tmax - tstep < tmax and
win_tmax >= tmax + (epochs.times[-1] - epochs.times[-2])):
warn('Adding a time window to cover last time points')
win_tmin = tmax - win_length
win_tmax = tmax
if win_tmax < tmax + (epochs.times[-1] - epochs.times[-2]):
# Counteracts unsafe floating point arithmetic ensuring all
# relevant samples will be taken into account when selecting
# data in time windows
logger.info(
'Computing time-frequency DICS beamformer for time '
'window %d to %d ms, in frequency range %d to %d Hz' %
(win_tmin * 1e3, win_tmax * 1e3, fmin, fmax)
)
# Calculating data CSD in current time window
if mode == 'fourier':
csd = csd_fourier(
epochs, fmin=fmin, fmax=fmax, tmin=win_tmin,
tmax=win_tmax, n_fft=n_fft, verbose=False)
elif mode == 'multitaper':
csd = csd_multitaper(
epochs, fmin=fmin, fmax=fmax, tmin=win_tmin,
tmax=win_tmax, n_fft=n_fft, bandwidth=mt_bandwidth,
low_bias=mt_low_bias, verbose=False)
elif mode == 'cwt_morlet':
csd = csd_morlet(
epochs, frequencies=freq_bin, tmin=win_tmin,
tmax=win_tmax, n_cycles=cwt_n_cycles, decim=decim,
verbose=False)
else:
raise ValueError('Invalid mode, choose either '
"'fourier' or 'multitaper'")
csd = csd.sum()
# Scale data CSD to allow data and noise CSDs to have different
# length
csd._data /= csd.n_fft
filters = make_dics(epochs.info, forward, csd, reg=reg,
label=label, pick_ori=pick_ori,
rank=rank, inversion=inversion,
weight_norm=weight_norm,
normalize_fwd=normalize_fwd,
reduce_rank=reduce_rank,
real_filter=real_filter, verbose=False)
stc, _ = apply_dics_csd(csd, filters, verbose=False)
if noise_csds is not None:
# Scale signal power by noise power
noise_stc, _ = apply_dics_csd(noise_csd, filters,
verbose=False)
stc /= noise_stc
sol_single.append(stc.data[:, 0])
# Average over all time windows that contain the current time
# point, which is the current time window along with
# n_overlap - 1 previous ones
if i_time - n_overlap < 0:
curr_sol = np.mean(sol_single[0:i_time + 1], axis=0)
else:
curr_sol = np.mean(sol_single[i_time - n_overlap + 1:
i_time + 1], axis=0)
# The final result for the current time point in the current
# frequency bin
sol_overlap.append(curr_sol)
# Gathering solutions for all time points for current frequency bin
sol_final.append(sol_overlap)
sol_final = np.array(sol_final)
# Creating stc objects containing all time points for each frequency bin
stcs = []
# compatibility with 0.16, add src_type as None if not present:
filters, warn_text = _check_src_type(filters)
for i_freq in range(n_freq_bins):
stc = _make_stc(sol_final[i_freq, :, :].T, vertices=stc.vertices,
src_type=filters['src_type'], tmin=tmin, tstep=tstep,
subject=stc.subject, warn_text=warn_text)
stcs.append(stc)
return stcs
|
adykstra/mne-python
|
mne/beamformer/_dics.py
|
Python
|
bsd-3-clause
| 34,894
|
[
"NEURON"
] |
0820cf4b7b66b18511edca7dc682400cf764090a61c08778819b16bfcccd7cf5
|
# Copyright (C) 2010 CAMd
# Copyright (C) 2010 Argonne National Laboratory
# Please see the accompanying LICENSE file for further information.
"""Module for high-level BLACS interface.
Usage
=====
A BLACS grid is a logical grid of processors. To use BLACS, first
create a BLACS grid. If comm contains 8 or more ranks, this example
will work::
from gpaw.mpi import world
from gpaw.blacs import BlacsGrid
grid = BlacsGrid(world, 4, 2)
Use the processor grid to create various descriptors for distributed
arrays::
block_desc = grid.new_descriptor(500, 500, 64, 64)
local_desc = grid.new_descriptor(500, 500, 500, 500)
The first descriptor describes 500 by 500 arrays distributed amongst
the 8 CPUs of the BLACS grid in blocks of 64 by 64 elements (which is
a sensible block size). That means each CPU has many blocks located
all over the array::
print world.rank, block_desc.shape, block_desc.gshape
Here block_desc.shape is the local array shape while gshape is the
global shape. The local array shape varies a bit on each CPU as the
block distribution may be slightly uneven.
The second descriptor, local_desc, has a block size equal to the
global size of the array, and will therefore only have one block.
This block will then reside on the first CPU -- local_desc therefore
represents non-distributed arrays. Let us instantiate some arrays::
H_MM = local_desc.empty()
if world.rank == 0:
assert H_MM.shape == (500, 500)
H_MM[:, :] = calculate_hamiltonian_or_something()
else:
assert H_MM.shape[0] == 0 or H_MM.shape[1] == 0
H_mm = block_desc.empty()
print H_mm.shape # many elements on all CPUs
We can then redistribute the local H_MM into H_mm::
from gpaw.blacs import Redistributor
redistributor = Redistributor(world, local_desc, block_desc)
redistributor.redistribute(H_MM, H_mm)
Now we can run parallel linear algebra on H_mm. This will diagonalize
H_mm, place the eigenvectors in C_mm and the eigenvalues globally in
eps_M::
eps_M = np.empty(500)
C_mm = block_desc.empty()
block_desc.diagonalize_ex(H_mm, C_mm, eps_M)
We can redistribute C_mm back to the master process if we want::
C_MM = local_desc.empty()
redistributor2 = Redistributor(world, block_desc, local_desc)
redistributor2.redistribute(C_mm, C_MM)
If somebody wants to do all this more easily, they will probably write
a function for that.
List of interesting classes
===========================
* BlacsGrid
* BlacsDescriptor
* Redistributor
The other classes in this module are coded specifically for GPAW and
are inconvenient to use otherwise.
The module gpaw.utilities.blacs contains several functions like gemm,
gemv and r2k. These functions may or may not have appropriate
docstings, and may use Fortran-like variable naming. Also, either
this module or gpaw.utilities.blacs will be renamed at some point.
"""
import numpy as np
from gpaw.mpi import SerialCommunicator, serial_comm
from gpaw.matrix_descriptor import MatrixDescriptor
from gpaw.utilities.scalapack import scalapack_inverse_cholesky, \
scalapack_diagonalize_ex, scalapack_general_diagonalize_ex, \
scalapack_diagonalize_dc, scalapack_general_diagonalize_dc, \
scalapack_diagonalize_mr3, scalapack_general_diagonalize_mr3
import _gpaw
INACTIVE = -1
BLOCK_CYCLIC_2D = 1
class BlacsGrid:
"""Class representing a 2D grid of processors sharing a Blacs context.
A BLACS grid defines a logical M by N ordering of a collection of
CPUs. A BLACS grid can be used to create BLACS descriptors. On
an npcol by nprow BLACS grid, a matrix is distributed amongst M by
N CPUs along columns and rows, respectively, while the matrix
shape and blocking properties are determined by the descriptors.
Use the method new_descriptor() to create any number of BLACS
descriptors sharing the same CPU layout.
Most matrix operations require the involved matrices to all be on
the same BlacsGrid. Use a Redistributor to redistribute matrices
from one BLACS grid to another if necessary.
Parameters::
* comm: MPI communicator for CPUs of the BLACS grid or None. A BLACS
grid may use all or some of the CPUs of the communicator.
* nprow: Number of CPU rows.
* npcol: Number of CPU columns.
* order: 'R' or 'C', meaning rows or columns. I'm not sure what this
does, it probably interchanges the meaning of rows and columns. XXX
Complicated stuff
-----------------
It may be useful to know that a BLACS grid is said to be active
and will evaluate to True on any process where comm is not None
*and* comm.rank < nprow * npcol. Otherwise it is considered
inactive and evaluates to False. Ranks where a grid is inactive
never do anything at all.
BLACS identifies each grid by a unique ID number called the
context (frequently abbreviated ConTxt). Grids on inactive ranks
have context -1."""
def __init__(self, comm, nprow, npcol, order='R'):
assert nprow > 0
assert npcol > 0
assert len(order) == 1
assert order in 'CcRr'
# set a default value for the context leads to fewer
# if statements below
context = INACTIVE
# There are three cases to handle:
# 1. Comm is None is inactive (default).
# 2. Comm is a legitimate communicator
# 3. DryRun Communicator is now handled by subclass
if comm is not None: # MPI task is part of the communicator
if nprow * npcol > comm.size:
raise ValueError('Impossible: %dx%d Blacs grid with %d CPUs'
% (nprow, npcol, comm.size))
context = _gpaw.new_blacs_context(comm.get_c_object(),
npcol, nprow, order)
assert (context != INACTIVE) == (comm.rank < nprow * npcol)
self.mycol, self.myrow = _gpaw.get_blacs_gridinfo(context,
nprow,
npcol)
self.context = context
self.comm = comm
self.nprow = nprow
self.npcol = npcol
self.ncpus = nprow * npcol
self.order = order
def new_descriptor(self, M, N, mb, nb, rsrc=0, csrc=0):
"""Create a new descriptor from this BLACS grid.
See documentation for BlacsDescriptor.__init__."""
return BlacsDescriptor(self, M, N, mb, nb, rsrc, csrc)
def is_active(self):
"""Whether context is active on this rank."""
return self.context != INACTIVE
def __nonzero__(self):
return self.is_active()
def __str__(self):
classname = self.__class__.__name__
template = '%s[comm:size=%d,rank=%d; context=%d; %dx%d]'
string = template % (classname, self.comm.size, self.comm.rank,
self.context, self.nprow, self.npcol)
return string
def __del__(self):
if self.is_active():
_gpaw.blacs_destroy(self.context)
class DryRunBlacsGrid(BlacsGrid):
def __init__(self, comm, nprow, npcol, order='R'):
assert isinstance(comm, SerialCommunicator) #DryRunCommunicator is subclass
if nprow * npcol > comm.size:
raise ValueError('Impossible: %dx%d Blacs grid with %d CPUs'
% (nprow, npcol, comm.size))
self.context = INACTIVE
self.comm = comm
self.nprow = nprow
self.npcol = npcol
self.ncpus = nprow * npcol
self.mycol, self.myrow = INACTIVE, INACTIVE
self.order = order
#XXX A MAJOR HACK HERE:
from gpaw import dry_run
if dry_run:
BlacsGrid = DryRunBlacsGrid
class BlacsDescriptor(MatrixDescriptor):
"""Class representing a 2D matrix distribution on a blacs grid.
A BlacsDescriptor represents a particular shape and distribution
of matrices. A BlacsDescriptor has a global matrix shape and a
rank-dependent local matrix shape. The local shape is not
necessarily equal on all ranks.
A numpy array is said to be compatible with a BlacsDescriptor if,
on all ranks, the shape of the numpy array is equal to the local
shape of the BlacsDescriptor. Compatible arrays can be created
conveniently with the zeros() and empty() methods.
An array with a global shape of M by N is distributed such that
each process gets a number of distinct blocks of size mb by nb.
The blocks on one process generally reside in very different areas
of the matrix to improve load balance.
The following chart describes how different ranks (there are 4
ranks in this example, 0 through 3) divide the matrix into blocks.
This is called 2D block cyclic distribution::
+--+--+--+--+..+--+
| 0| 1| 0| 1|..| 1|
+--+--+--+--+..+--+
| 2| 3| 2| 3|..| 3|
+--+--+--+--+..+--+
| 0| 1| 0| 1|..| 1|
+--+--+--+--+..+--+
| 2| 3| 2| 3|..| 3|
+--+--+--+--+..+--+
...................
...................
+--+--+--+--+..+--+
| 2| 3| 2| 3|..| 3|
+--+--+--+--+..+--+
Also refer to:
http://acts.nersc.gov/scalapack/hands-on/datadist.html
Parameters:
* blacsgrid: the BLACS grid of processors to distribute matrices.
* M: global row count
* N: global column count
* mb: number of rows per block
* nb: number of columns per block
* rsrc: rank on which the first row is stored
* csrc: rank on which the first column is stored
Complicated stuff
-----------------
If there is trouble with matrix shapes, the below caveats are
probably the reason.
Depending on layout, a descriptor may have a local shape of zero
by N or something similar. If the row blocksize is 7, the global
row count is 10, and the blacs grid contains 3 row processes: The
first process will have 7 rows, the next will have 3, and the last
will have 0. The shapes in this case must still be correctly
given to BLACS functions, which can be confusing.
A blacs descriptor must also give the correct local leading
dimension (lld), which is the local array size along the
memory-contiguous direction in the matrix, and thus equal to the
local column number, *except* when local shape is zero, but the
implementation probably works.
"""
def __init__(self, blacsgrid, M, N, mb, nb, rsrc, csrc):
assert M > 0
assert N > 0
assert 1 <= mb
assert 1 <= nb
if mb > M:
mb = M
if nb > N:
nb = N
assert 0 <= rsrc < blacsgrid.nprow
assert 0 <= csrc < blacsgrid.npcol
self.blacsgrid = blacsgrid
self.M = M # global size 1
self.N = N # global size 2
self.mb = mb # block cyclic distr dim 1
self.nb = nb # and 2. How many rows or columns are on this processor
# more info:
# http://www.netlib.org/scalapack/slug/node75.html
self.rsrc = rsrc
self.csrc = csrc
if blacsgrid.is_active():
locN, locM = _gpaw.get_blacs_local_shape(self.blacsgrid.context,
self.N, self.M,
self.nb, self.mb,
self.csrc, self.rsrc)
self.lld = max(1, locN) # max 1 is nonsensical, but appears
# to be required by PBLAS
else:
# ScaLAPACK has no requirements as to what these values on an
# inactive blacsgrid should be. This seemed reasonable to me
# at the time.
locN, locM = 0, 0
self.lld = 0
# locM, locN is not allowed to be negative. This will cause the
# redistributor to fail. This could happen on active blacsgrid
# which does not contain any piece of the distribute matrix.
# This is why there is a final check on the value of locM, locN.
MatrixDescriptor.__init__(self, max(0, locM), max(0, locN))
# This is the definition of inactive descriptor; can occur
# on an active or inactive blacs grid.
self.active = locM > 0 and locN > 0
self.bshape = (self.mb, self.nb) # Shape of one block
self.gshape = (M, N) # Global shape of array
def asarray(self):
"""Return a nine-element array representing this descriptor.
In the C/Fortran code, a BLACS descriptor is represented by a
special array of arcane nature. The value of asarray() must
generally be passed to BLACS functions in the C code."""
arr = np.array([BLOCK_CYCLIC_2D, self.blacsgrid.context,
self.N, self.M, self.nb, self.mb, self.csrc, self.rsrc,
self.lld], np.int32)
return arr
def __repr__(self):
classname = self.__class__.__name__
template = '%s[context=%d, glob %s, block %s, lld %d, loc %s]'
string = template % (classname, self.blacsgrid.context,
self.gshape,
self.bshape, self.lld, self.shape)
return string
def diagonalize_dc(self, H_nn, C_nn, eps_N, UL='L'):
"""See documentation in gpaw/utilities/blacs.py."""
scalapack_diagonalize_dc(self, H_nn, C_nn, eps_N, UL)
def diagonalize_ex(self, H_nn, C_nn, eps_N, UL='L', iu=None):
"""See documentation in gpaw/utilities/blacs.py."""
scalapack_diagonalize_ex(self, H_nn, C_nn, eps_N, UL, iu=iu)
def diagonalize_mr3(self, H_nn, C_nn, eps_N, UL='L', iu=None):
"""See documentation in gpaw/utilities/blacs.py."""
scalapack_diagonalize_mr3(self, H_nn, C_nn, eps_N, UL, iu=iu)
def general_diagonalize_dc(self, H_mm, S_mm, C_mm, eps_M,
UL='L'):
"""See documentation in gpaw/utilities/blacs.py."""
scalapack_general_diagonalize_dc(self, H_mm, S_mm, C_mm, eps_M,
UL)
def general_diagonalize_ex(self, H_mm, S_mm, C_mm, eps_M,
UL='L', iu=None):
"""See documentation in gpaw/utilities/blacs.py."""
scalapack_general_diagonalize_ex(self, H_mm, S_mm, C_mm, eps_M,
UL, iu=iu)
def general_diagonalize_mr3(self, H_mm, S_mm, C_mm, eps_M,
UL='L', iu=None):
"""See documentation in gpaw/utilities/blacs.py."""
scalapack_general_diagonalize_mr3(self, H_mm, S_mm, C_mm, eps_M,
UL, iu=iu)
def inverse_cholesky(self, S_nn, UL='L'):
"""See documentation in gpaw/utilities/blacs.py."""
scalapack_inverse_cholesky(self, S_nn, UL)
def my_blocks(self, array_mn):
"""Yield the local blocks and their global index limits.
Yields tuples of the form (Mstart, Mstop, Nstart, Nstop, block),
for each locally stored block of the array.
"""
if not self.check(array_mn):
raise ValueError('Bad array shape (%s vs %s)' % (self,
array_mn.shape))
grid = self.blacsgrid
mb = self.mb
nb = self.nb
myrow = grid.myrow
mycol = grid.mycol
nprow = grid.nprow
npcol = grid.npcol
M, N = self.gshape
Mmyblocks = -(-self.shape[0] // mb)
Nmyblocks = -(-self.shape[1] // nb)
for Mblock in range(Mmyblocks):
for Nblock in range(Nmyblocks):
myMstart = Mblock * mb
myNstart = Nblock * nb
Mstart = myrow * mb + Mblock * mb * nprow
Nstart = mycol * mb + Nblock * nb * npcol
Mstop = min(Mstart + mb, M)
Nstop = min(Nstart + nb, N)
block = array_mn[myMstart:myMstart + mb,
myNstart:myNstart + mb]
yield Mstart, Mstop, Nstart, Nstop, block
def as_serial(self):
return self.blacsgrid.new_descriptor(self.M, self.N, self.M, self.N)
def redistribute(self, otherdesc, src_mn, dst_mn=None):
if self.blacsgrid != otherdesc.blacsgrid:
raise ValueError('Cannot redistribute to other BLACS grid. '
'Requires using Redistributor class explicitly')
if dst_mn is None:
dst_mn = otherdesc.empty(dtype=src_mn.dtype)
r = Redistributor(self.blacsgrid.comm, self, otherdesc)
r.redistribute(src_mn, dst_mn)
return dst_mn
def collect_on_master(self, src_mn, dst_mn=None):
desc = self.as_serial()
return self.redistribute(desc, src_mn, dst_mn)
def distribute_from_master(self, src_mn, dst_mn=None):
desc = self.as_serial()
return desc.redistribute(self, src_mn, dst_mn)
class Redistributor:
"""Class for redistributing BLACS matrices on different contexts."""
def __init__(self, supercomm, srcdescriptor, dstdescriptor, uplo='G'):
"""Create redistributor.
Source and destination descriptors may reside on different
BLACS grids, but the descriptors should describe arrays with
the same number of elements.
The communicators of the BLACS grid of srcdescriptor as well
as that of dstdescriptor *must* both be subcommunicators of
supercomm.
Allowed values of UPLO are: G for general matrix, U for upper
triangular and L for lower triangular. The latter two are useful
for symmetric matrices."""
self.supercomm = supercomm
self.supercomm_bg = BlacsGrid(self.supercomm, self.supercomm.size, 1)
self.srcdescriptor = srcdescriptor
self.dstdescriptor = dstdescriptor
assert uplo in ['G', 'U', 'L']
self.uplo = uplo
def redistribute(self, src_mn, dst_mn,
subM=None, subN=None,
ia=0, ja=0, ib=0, jb=0):
"""Redistribute src_mn into dst_mn.
src_mn and dst_mn must be compatible with source and
destination descriptors of this redistributor.
If subM and subN are given, distribute only a subM by subN
submatrix.
If any ia, ja, ib and jb are given, they denote the global
index (i, j) of the origin of the submatrix inside the source
and destination (a, b) matrices."""
# self.supercomm must be a supercommunicator of the communicators
# corresponding to the context of srcmatrix as well as dstmatrix.
# We should verify this somehow.
dtype = src_mn.dtype
assert dtype == dst_mn.dtype
assert dtype == float or dtype == complex
# Check to make sure the submatrix of the source
# matrix will fit into the destination matrix
# plus standard BLACS matrix checks.
srcdescriptor = self.srcdescriptor
dstdescriptor = self.dstdescriptor
srcdescriptor.checkassert(src_mn)
dstdescriptor.checkassert(dst_mn)
if subM is None:
subM = srcdescriptor.gshape[0]
if subN is None:
subN = srcdescriptor.gshape[1]
assert srcdescriptor.gshape[0] >= subM
assert srcdescriptor.gshape[1] >= subN
assert dstdescriptor.gshape[0] >= subM
assert dstdescriptor.gshape[1] >= subN
# Switch to Fortran conventions
uplo = {'U': 'L', 'L': 'U', 'G': 'G'}[self.uplo]
_gpaw.scalapack_redist(srcdescriptor.asarray(),
dstdescriptor.asarray(),
src_mn, dst_mn,
subN, subM,
ja + 1, ia + 1, jb + 1, ib + 1, # 1-indexing
self.supercomm_bg.context, uplo)
def parallelprint(comm, obj):
import sys
for a in range(comm.size):
if a == comm.rank:
print 'rank=%d' % a
print obj
print
sys.stdout.flush()
comm.barrier()
|
qsnake/gpaw
|
gpaw/blacs.py
|
Python
|
gpl-3.0
| 20,358
|
[
"GPAW"
] |
e1aca2a1fd4c46738df4fd6c557040a3f4d4537bdd2bd9fcd049dc07f023f8d5
|
#!/usr/bin/python
# http://mcsp.wartburg.edu/zelle/python/graphics.py
# https://mcsp.wartburg.edu/zelle/python/graphics/graphics/index.html
import math
from graphics import *
XSCALE = 2550
YSCALE = 1310
XCENTER = XSCALE / 2
YCENTER = YSCALE / 2
# https://en.wikipedia.org/wiki/Incircle_and_excircles_of_a_triangle#Trilinear_coordinates
# {\displaystyle \left({\frac {ax_{a}+bx_{b}+cx_{c}}{a+b+c}},{\frac {ay_{a}+by_{b}+cy_{c}}{a+b+c}}\right)={\frac {a\left(x_{a},y_{a}\right)+b\left(x_{b},y_{b}\right)+c\left(x_{c},y_{c}\right)}{a+b+c}}.}
# {ax_{a}+bx_{b}+cx_{c}}{a+b+c}},{{ay_{a}+by_{b}+cy_{c}}{a+b+c}}
def circles5(win, scale):
white1 = color_rgb(255, 255, 255)
black1 = color_rgb(0, 0, 0)
# win.setBackground("black")
win.setBackground(black1)
red1 = color_rgb(255, 0, 0)
green1 = color_rgb(0, 255, 0)
blue1 = color_rgb(0, 0, 255)
print "red1 = %s" % str(red1)
print "green1 = %s" % str(green1)
print "blue1 = %s" % str(blue1)
rb_magenta1 = color_rgb(255, 0, 255)
gb_cyan1 = color_rgb(0, 255, 255)
rg_yellow1 = color_rgb(255, 255, 0)
rm_rose1 = color_rgb(255, 0, 127)
bm_violet1 = color_rgb(127, 0, 255)
bc_azure1 = color_rgb(0, 127, 255)
gc_green1 = color_rgb(0, 255, 127)
gy_chart1 = color_rgb(127, 255, 0)
ry_orange1 = color_rgb(255, 127, 0)
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
xa = XCENTER * diameter1
ya = YCENTER * diameter1
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
theta1 += inc1
# draw the "bottom" layer first
## rm_rose1 = color_rgb(255, 0, 127)
# bm_violet1 = color_rgb(127, 0, 255)
## bc_azure1 = color_rgb(0, 127, 255)
# gc_green1 = color_rgb(0, 255, 127)
## gy_chart1 = color_rgb(127, 255, 0)
# ry_orange1 = color_rgb(255, 127, 0)
# # red magenta blue cyan green yellow
# # rose violet azure spring-green chartreuse orange
xb4 = xs[5] * diameter1
yb4 = ys[5] * diameter1
xc4 = xs[0] * diameter1
yc4 = ys[0] * diameter1
x4 = (xa + xb4 + xc4) / (3 * diameter1)
y4 = (ya + yb4 + yc4) / (3 * diameter1)
c4 = Circle(Point(x4, y4), 10 * scale)
# c4.setOutline(bm_violet1)
# c4.setOutline(gc_green1)
c4.setOutline(ry_orange1)
c4.setWidth(4)
c4.draw(win)
xb5 = xs[1] * diameter1
yb5 = ys[1] * diameter1
xc5 = xs[2] * diameter1
yc5 = ys[2] * diameter1
x5 = (xa + xb5 + xc5) / (3 * diameter1)
y5 = (ya + yb5 + yc5) / (3 * diameter1)
c5 = Circle(Point(x5, y5), 10 * scale)
c5.setOutline(bm_violet1)
c5.setWidth(4)
c5.draw(win)
xb6 = xs[3] * diameter1
yb6 = ys[3] * diameter1
xc6 = xs[4] * diameter1
yc6 = ys[4] * diameter1
x6 = (xa + xb6 + xc6) / (3 * diameter1)
y6 = (ya + yb6 + yc6) / (3 * diameter1)
c6 = Circle(Point(x6, y6), 10 * scale)
c6.setOutline(gc_green1)
c6.setWidth(4)
c6.draw(win)
# https://en.wikipedia.org/wiki/Color_wheel
# https://en.wikipedia.org/wiki/File:Color_star-en_(tertiary_names).svg
# red purple blue green yellow orange
# magenta, violet, teal, chartreuse, amber, vermilion
# c0.setOutline("red") #FF0000
# c0.setOutline("purple") #A020F0
# c0.setOutline("blue") #0000FF
# c0.setOutline("green") #00FF00
# c0.setOutline("yellow") #FFFF00
# c0.setOutline("orange") #FFA500
# c0.setOutline("magenta") #FF00FF
# c0.setOutline("violet")
# # c0.setOutline("teal") # unknown #008080 https://en.wikipedia.org/wiki/X11_color_names
# c0.setOutline("chartreuse")
# # c0.setOutline("amber") # unknown
# # c0.setOutline("vermilion") # unknown
# https://en.wikipedia.org/wiki/File:RBG_color_wheel.svg
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
# c0.setOutline("red") #FF0000
# c0.setOutline("magenta") #FF00FF
# c0.setOutline("blue") #0000FF
# c0.setOutline("cyan") #00FFFF
# c0.setOutline("green") #00FF00
# c0.setOutline("yellow") #FFFF00
# # c0.setOutline("rose") # unknown
# c0.setOutline("pink") #FFC0CB
# c0.setOutline("violet") #EE82EE
# c0.setOutline("azure") #F0FFFF
# c0.setOutline("spring green") #00FF7F
# c0.setOutline("chartreuse") #7FFF00
# c0.setOutline("orange") #FFA500
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
# color1 = ["red", "magenta", "blue", "cyan", "green", "yellow"]
color1 = [red1, rb_magenta1, blue1, gb_cyan1, green1, rg_yellow1]
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.setOutline(color1[i1])
c1.setWidth(4)
c1.draw(win)
theta1 += inc1
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.setWidth(4)
# c0.setOutline("white")
c0.setOutline(white1)
# c0.setWidth(10)
# c0.setOutline(rm_rose1)
# c0.setOutline(bm_violet1)
# c0.setOutline(bc_azure1)
# c0.setOutline(gc_green1)
# c0.setOutline(gy_chart1)
# c0.setOutline(ry_orange1)
c0.draw(win)
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
# c1.setOutline("pink")
c1.setOutline(rm_rose1)
c1.setWidth(4)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
# c2.setOutline("azure")
c2.setOutline(bc_azure1)
# c2.setWidth(10)
c2.setWidth(4)
c2.draw(win)
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
# c3.setOutline(gc_green1)
c3.setOutline(gy_chart1)
c3.setWidth(4)
c3.draw(win)
def circles4(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
c2.draw(win)
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
c3.draw(win)
xb4 = xs[5] * diameter1
yb4 = ys[5] * diameter1
xc4 = xs[0] * diameter1
yc4 = ys[0] * diameter1
x4 = (xa + xb4 + xc4) / (3 * diameter1)
y4 = (ya + yb4 + yc4) / (3 * diameter1)
c4 = Circle(Point(x4, y4), 10 * scale)
c4.draw(win)
xb5 = xs[1] * diameter1
yb5 = ys[1] * diameter1
xc5 = xs[2] * diameter1
yc5 = ys[2] * diameter1
x5 = (xa + xb5 + xc5) / (3 * diameter1)
y5 = (ya + yb5 + yc5) / (3 * diameter1)
c5 = Circle(Point(x5, y5), 10 * scale)
c5.draw(win)
xb6 = xs[3] * diameter1
yb6 = ys[3] * diameter1
xc6 = xs[4] * diameter1
yc6 = ys[4] * diameter1
x6 = (xa + xb6 + xc6) / (3 * diameter1)
y6 = (ya + yb6 + yc6) / (3 * diameter1)
c6 = Circle(Point(x6, y6), 10 * scale)
c6.draw(win)
def circles3(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
c2.draw(win)
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
c3.draw(win)
def circles2(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
# c1 = Circle(Point(XCENTER + diameter1,YCENTER), 10 * scale)
# c1.draw(win)
# c2 is at 60 degrees, same diameter
npoints = 6
inc1 = (math.pi * 2) / npoints
# inc1 = (math.pi) / npoints
theta1 = 0
# x2 = (math.sin(theta1) * diameter1) + XCENTER
# y2 = (math.cos(theta1) * diameter1) + YCENTER
# c2 = Circle(Point(x2, y2), 10 * scale)
# c2.draw(win)
# theta1 += inc1
# x3 = (math.sin(theta1) * diameter1) + XCENTER
# y3 = (math.cos(theta1) * diameter1) + YCENTER
# c3 = Circle(Point(x3, y3), 10 * scale)
# c3.draw(win)
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
y1 = (math.cos(theta1) * diameter1) + YCENTER
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
#for i1 in range(npoints):
# x1 = (math.sin(theta1) * radius) + xoffset
# y1 = (math.cos(theta1) * radius) + yoffset
# hex1(win, x1, y1, scale)
# theta1 += inc1
def circles1(win, xoffset, yoffset, scale = 1.0):
sxoffset = xoffset * scale + XCENTER
syoffset = yoffset * scale + YCENTER
#p = Polygon(
# Point(-4 * scale + sxoffset, -7 * scale + syoffset),
# Point( 4 * scale + sxoffset, -7 * scale + syoffset),
# Point( 8 * scale + sxoffset, 0 * scale + syoffset),
# Point( 4 * scale + sxoffset, 7 * scale + syoffset),
# Point(-4 * scale + sxoffset, 7 * scale + syoffset),
# Point(-8 * scale + sxoffset, 0 * scale + syoffset))
#p.draw(win)
# c = Circle(Point(50 * SCALE,50 * SCALE), 10 * SCALE)
c = Circle(Point(XCENTER,YCENTER), 10 * scale)
c.draw(win)
c1 = Circle(Point(-4 * scale + sxoffset, -7 * scale + syoffset), 10 * scale)
c1.draw(win)
c2 = Circle(Point( 4 * scale + sxoffset, -7 * scale + syoffset), 10 * scale)
c2.draw(win)
c3 = Circle(Point( 8 * scale + sxoffset, 0 * scale + syoffset), 10 * scale)
c3.draw(win)
c4 = Circle(Point( 4 * scale + sxoffset, 7 * scale + syoffset), 10 * scale)
c4.draw(win)
c5 = Circle(Point(-4 * scale + sxoffset, 7 * scale + syoffset), 10 * scale)
c5.draw(win)
c6 = Circle(Point(-8 * scale + sxoffset, 0 * scale + syoffset), 10 * scale)
c6.draw(win)
def main():
radius = 500.0
# scale = 0.5
scale = 10.0
win = GraphWin("circle1", XSCALE, YSCALE)
win.setCoords(0,0, XSCALE , YSCALE)
# one side is 8 units long
# height of vertical rectangle is 14
# bulge to either side is 4
# 1 -> 1
# layer 0
# center
# circle1(win, 0, 0, scale, radius)
# circles1(win, 0, 0, scale)
# circles2(win, scale)
# circles3(win, scale)
# circles4(win, scale)
circles5(win, scale)
# p0 = Point(XCENTER, YCENTER)
# p0.setFill("red")
# p0.setOutline("red")
# p0.draw(win)
# p1 = Point(XCENTER + 12 * scale, YCENTER + 7 * scale)
# l1 = Line(p0, p1)
# l1.setFill("red")
# l1.draw(win)
# t = Text(Point(XCENTER,YCENTER), "0")
# t.draw(win)
win.getMouse()
win.close()
# https://math.stackexchange.com/questions/260096/find-the-coordinates-of-a-point-on-a-circle
# x = rsin(theta), y = rcos(theta)
def circle1(win, xoffset, yoffset, scale = 1.0, radius = 10.0):
hex1(win, xoffset, yoffset, scale)
# theta is degrees or radians?
npoints = 10
npoints = 1
npoints = 100
inc1 = (math.pi * 2) / npoints
theta1 = 0.0
for i1 in range(npoints):
x1 = (math.sin(theta1) * radius) + xoffset
y1 = (math.cos(theta1) * radius) + yoffset
hex1(win, x1, y1, scale)
theta1 += inc1
# math = <module 'math' from '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload/math.so'>
# acos acos(x) Return the arc cosine (measured in radians) of x.
# acosh acosh(x) Return the inverse hyperbolic cosine of x.
# asin asin(x) Return the arc sine (measured in radians) of x.
# asinh asinh(x) Return the inverse hyperbolic sine of x.
# atan atan(x) Return the arc tangent (measured in radians) of x.
# atan2 atan2(y, x) Return the arc tangent (measured in radians) of y/x. Unlike atan(y/x), the signs of both x and y are considered.
# atanh atanh(x) Return the inverse hyperbolic tangent of x.
# ceil ceil(x) Return the ceiling of x as a float. This is the smallest integral value >= x.
# copysign copysign(x, y) Return x with the sign of y.
# cos cos(x) Return the cosine of x (measured in radians).
# cosh cosh(x) Return the hyperbolic cosine of x.
# degrees degrees(x) Convert angle x from radians to degrees.
# erf erf(x) Error function at x.
# erfc erfc(x) Complementary error function at x.
# exp exp(x) Return e raised to the power of x.
# expm1 expm1(x) Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x.
# fabs fabs(x) Return the absolute value of the float x.
# factorial factorial(x) -> Integral Find x!. Raise a ValueError if x is negative or non-integral.
# floor floor(x) Return the floor of x as a float. This is the largest integral value <= x.
# fmod fmod(x, y) Return fmod(x, y), according to platform C. x % y may differ.
# frexp frexp(x) Return the mantissa and exponent of x, as pair (m, e). m is a float and e is an int, such that x = m * 2.**e. If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
# fsum fsum(iterable) Return an accurate floating point sum of values in the iterable. Assumes IEEE-754 floating point arithmetic.
# gamma gamma(x) Gamma function at x.
# hypot hypot(x, y) Return the Euclidean distance, sqrt(x*x + y*y).
# isinf isinf(x) -> bool Check if float x is infinite (positive or negative).
# isnan isnan(x) -> bool Check if float x is not a number (NaN).
# ldexp ldexp(x, i) Return x * (2**i).
# lgamma lgamma(x) Natural logarithm of absolute value of Gamma function at x.
# log log(x[, base]) Return the logarithm of x to the given base. If the base not specified, returns the natural logarithm (base e) of x.
# log10 log10(x) Return the base 10 logarithm of x.
# log1p log1p(x) Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero.
# modf modf(x) Return the fractional and integer parts of x. Both results carry the sign of x and are floats.
# pow pow(x, y) Return x**y (x to the power of y).
# radians radians(x) Convert angle x from degrees to radians.
# sin sin(x) Return the sine of x (measured in radians).
# sinh sinh(x) Return the hyperbolic sine of x.
# sqrt sqrt(x) Return the square root of x.
# tan tan(x) Return the tangent of x (measured in radians).
# tanh tanh(x) Return the hyperbolic tangent of x.
# trunc trunc(x:Real) -> Integral Truncates x to the nearest Integral toward 0. Uses the __trunc__ magic method.
# math.pi = 3.14159265359
# math.e = 2.71828182846
# phi = 1.61803398875
def hex1(win, xoffset, yoffset, scale = 1.0):
sxoffset = xoffset * scale + XCENTER
syoffset = yoffset * scale + YCENTER
p = Polygon(
Point(-4 * scale + sxoffset, -7 * scale + syoffset),
Point( 4 * scale + sxoffset, -7 * scale + syoffset),
Point( 8 * scale + sxoffset, 0 * scale + syoffset),
Point( 4 * scale + sxoffset, 7 * scale + syoffset),
Point(-4 * scale + sxoffset, 7 * scale + syoffset),
Point(-8 * scale + sxoffset, 0 * scale + syoffset))
p.draw(win)
def old_main():
scale = 7.7
win = GraphWin("hex2", XSCALE, YSCALE)
win.setCoords(0,0, XSCALE , YSCALE)
# one side is 8 units long
# height of vertical rectangle is 14
# bulge to either side is 4
# 1 -> 1
# layer 0
# center
hex1(win, 0, 0, scale)
# 6 -> 7
# layer 1
# 1.1 upper right -> lastx + 12, lasty + 7
hex1(win, 12, 7, scale)
# 1.2 lower right -> lastx + 12, lasty - 7
hex1(win, 12, -7, scale)
# 1.3 bottom -> lastx , lasty - 14
hex1(win, 0, -14, scale)
# 1.4 lower left -> lastx - 12, lasty - 7
hex1(win, -12, -7, scale)
# 1.5 upper left -> lastx - 12, lasty + 7
hex1(win, -12, 7, scale)
# 1.6 top -> lastx , lasty + 14
hex1(win, 0, 14, scale)
# 12 -> 19
# layer 2
# 2.1 one o'clock
hex1(win, 12, 21, scale)
# 2.2 two o'clock
hex1(win, 24, 14, scale)
# 2.3 three o'clock
hex1(win, 24, 0, scale)
# 2.4 four o'clock
hex1(win, 24, -14, scale)
# 2.5 five o'clock
hex1(win, 12, -21, scale)
# 2.6 six o'clock
hex1(win, 0, -28, scale)
# 2.7 seven o'clock
hex1(win, -12, -21, scale)
# 2.8 eight o'clock
hex1(win, -24, -14, scale)
# 2.9 nine o'clock
hex1(win, -24, 0, scale)
# 2.10 ten o'clock
hex1(win, -24, 14, scale)
# 2.11 eleven o'clock
hex1(win, -12, 21, scale)
# 2.12 twelve o'clock
hex1(win, 0, 28, scale)
# 18 -> 37
# layer 3
# 3.1 above one o'clock
hex1(win, 12, 35, scale)
# 3.2 above two o'clock
hex1(win, 24, 28, scale)
# 3.3 shift one o'clock
hex1(win, 36, 21, scale)
# 3.4 down from 3
hex1(win, 36, 7, scale)
# 3.5 down from 4
hex1(win, 36, -7, scale)
# 3.6 down from 5
hex1(win, 36, -21, scale)
# 3.7 down from four o'clock
hex1(win, 24, -28, scale)
# 3.8 down from five o'clock
hex1(win, 12, -35, scale)
# 3.9 bottom
hex1(win, 0, -42, scale)
# 3.10 down from seven o'clock
hex1(win, -12, -35, scale)
# 3.11 down from eight o'clock
hex1(win, -24, -28, scale)
# 3.12
hex1(win, -36, -21, scale)
# 3.13 up from 12
hex1(win, -36, -7, scale)
# 3.14 up from 13
hex1(win, -36, 7, scale)
# 3.15 up from 14
hex1(win, -36, 21, scale)
# 3.16 up from ten o'clock
hex1(win, -24, 28, scale)
# 3.17 up from eleven o'clock
hex1(win, -12, 35, scale)
# 3.18 top
hex1(win, 0, 42, scale)
# 24 -> 61
# layer 4
# 4.1 above 3.1 must be 40 to 63
hex1(win, 12, 49, scale)
# 4.2 above 3.2 must be 40 to 63
hex1(win, 24, 42, scale)
# 4.3 above 3.3 must be 40 to 63
hex1(win, 36, 35, scale)
# 4.4 must be 44, 45, 46, 47, 60, 61, 62, 63
hex1(win, 48, 28, scale)
# 4.5 down from 4.4
hex1(win, 48, 14, scale)
# 4.6 down from 5
hex1(win, 48, 0, scale)
# 4.7 down from 6
hex1(win, 48, -14, scale)
# 4.8 down from 7 must be 9, 11, 25, 27, 41, 43, 57 or 59
hex1(win, 48, -28, scale)
# 4.9
hex1(win, 36, -35, scale)
# 4.10
hex1(win, 24, -42, scale)
# 4.11
hex1(win, 12, -49, scale)
# 4.12 bottom
hex1(win, 0, -56, scale)
# 4.13
hex1(win, -12, -49, scale)
# 4.14
hex1(win, -24, -42, scale)
# 4.15 must be 17, 21, 25, 29, 49, 53, 57 or 61
hex1(win, -36, -35, scale)
# 4.16
hex1(win, -48, -28, scale)
# 4.17
hex1(win, -48, -14, scale)
# 4.18
hex1(win, -48, 0, scale)
# 4.19
hex1(win, -48, 14, scale)
# 4.20
hex1(win, -48, 28, scale)
# 4.21
hex1(win, -36, 35, scale)
# 4.22
hex1(win, -24, 42, scale)
# 4.23
hex1(win, -12, 49, scale)
# 4.24 top must be 24 to 31
hex1(win, 0, 56, scale)
# 5.10 top must be 63 - 1 = 62
hex1(win, 0, 70, scale)
t = Text(Point(XCENTER,YCENTER + 70 * scale), "62")
t.draw(win)
# 5.20 lower right axis must be 63 - 16 = 47
hex1(win, 60, -35, scale)
t = Text(Point(XCENTER + 60 * scale,YCENTER - 35 * scale), "47")
t.draw(win)
# 5.30 lower left axis must be 63 - 8 = 55
hex1(win, -60, -35, scale)
t = Text(Point(XCENTER - 60 * scale,YCENTER - 35 * scale), "55")
t.draw(win)
# 30 -> 91
# layer 5
# 36 -> 127
# layer 6
# 42 -> 169 64, 128, 192, 256, 320
# layer 6
# 7 48 -> 217
# 8 54 -> 261
p0 = Point(XCENTER, YCENTER)
p0.setFill("red")
p0.setOutline("red")
p0.draw(win)
p1 = Point(XCENTER + 12 * scale, YCENTER + 7 * scale)
l1 = Line(p0, p1)
l1.setFill("red")
l1.draw(win)
t = Text(Point(XCENTER,YCENTER), "0")
t.draw(win)
win.getMouse()
win.close()
main()
#
#
# __
#/ \
#\__/
#
# ____
# / \
#/ \
#\ /
# \____/
#
# 5
# __ __
# / \
# 4 3
# / 0 \ 000000
# \ /
# 1 2
# \__ __/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 1 \ 000001
# \ /
# 1 2
# \______/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 2 \ 000010
# \ /
# 1 \ 2
# \__ __/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 3 \ 000011
# \ /
# 1 \ 2
# \______/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 4 \ 000100
# \ /
# 1 / 2
# \__ __/
# 0
#
#
# 5
# ______
# / \
# 4 / \ 3
# / 61 \ 111101
# \ /
# 1 / 2
# \______/
# 0
#
# 5
# ______
# / \
# 4 / \ 3
# / 62 \ 111110
# \ /
# 1 \ / 2
# \__ __/
# 0
#
# 5
# ______
# / \
# 4 / \ 3
# / 63 \ 111111
# \ /
# 1 \ / 2
# \______/
# 0
|
jtraver/dev
|
python/graphics/circles7.py
|
Python
|
mit
| 24,324
|
[
"Amber"
] |
df41fff1a3210666eddb11c9333a49305b5c6c2443e8f8d9242345299e2ca3c3
|
# 03.10.2007, c
filename_mesh = 'database/phono/mesh_circ21.mesh'
# Whole domain $Y$.
region_1000 = {
'name' : 'Y',
'select' : 'all',
}
# Domain $Y_1$.
region_1 = {
'name' : 'Y1',
'select' : 'elements of group 1',
}
# Domain $Y_2$.
region_2 = {
'name' : 'Y2',
'select' : 'elements of group 2',
}
# Domain $Y_3$.
region_3 = {
'name' : 'Y3',
'select' : 'nodes in (x > %f) & (x < %f) & (y > %f) & (y < %f)'\
% (-0.3, 0.3, -0.48, -0.3),
}
wx = wy = 0.499
region_10 = {
'name' : 'Bottom',
'select' : 'nodes in (y < %f)' % -wy,
}
region_11 = {
'name' : 'Top',
'select' : 'nodes in (y > %f)' % wy,
}
material_1 = {
'name' : 'solid',
'mode' : 'here',
'region' : 'Y',
'lame' : {'lambda' : 1e1, 'mu' : 1e0},
'density' : 1e-1,
}
field_1 = {
'name' : '2_displacement',
'dim' : (2,1),
'domain' : 'Y',
'bases' : {'Y' : '2_3_P2'}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_displacement',
'dual' : 'u',
}
ebc_1 = {
'name' : 'Fix',
'region' : 'Bottom',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.all' : 0.2},
}
lcbc_1 = {
'name' : 'rigid1',
'region' : 'Y2',
'dofs' : {'u.all' : 'rigid'},
}
lcbc_2 = {
'name' : 'rigid2',
'region' : 'Y3',
'dofs' : {'u.all' : 'rigid'},
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d2',
}
equations = {
'balance' : """dw_lin_elastic_iso.i1.Y( solid.lame, v, u ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.umfpack',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'lin_solver' : 'umfpack',
'matrix' : 'internal', # 'external' or 'internal'
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
from testsBasic import TestLCBC
output_name = 'test_lcbc_2d.vtk'
##
# 03.10.2007, c
class Test( TestLCBC ):
pass
|
certik/sfepy
|
tests/test_lcbc_2d.py
|
Python
|
bsd-3-clause
| 2,482
|
[
"VTK"
] |
412e2709048076eb728e742072c8158ab2aa062881ccf04b50d549b6ea48f731
|
#!/usr/bin/python
# Python conterpart of rsync written by Vivian De Smedt
# Send any comment or bug report to vivian@vdesmedt.com.
# I would like to thanks William Tan for its support in tuning rsync.py to support unicode path.
# I would like to thanks Luc Saffre for its bug reports and fixes.
#from __future__ import nested_scopes
import os, os.path, shutil, glob, re, sys, getopt, stat, string
#BEGIN POSTOPERATION - PyBackupStore
#ORIGINAL try:
#ORIGINAL import win32file
#ORIGINAL except:
#ORIGINAL win32file = None
win32file = None
#END POSTOPERATION - PyBackupStore
class Cookie:
def __init__(self):
self.sink_root = ""
self.target_root = ""
self.quiet = 0
self.recursive = 0
self.relative = 0
self.dry_run = 0
self.time = 0
self.update = 0
self.cvs_ignore = 0
self.ignore_time = 0
self.delete = 0
self.delete_excluded = 0
self.delete_from_source = 0
self.size_only = 0
self.modify_window = 2
self.existing = 0
self.filters = []
self.case_sensitivity = 0
if os.name == "nt":
self.case_sensitivity = re.I
def visit(cookie, dirname, names):
"""Copy files names from sink_root + (dirname - sink_root) to target_root + (dirname - sink_root)"""
if os.path.split(cookie.sink_root)[1]: # Should be tested with (C:\Cvs -> C:\)! (C:\Archives\MyDatas\UltraEdit -> C:\Archives\MyDatas) (Cvs -> "")! (Archives\MyDatas\UltraEdit -> Archives\MyDatas) (\Cvs -> \)! (\Archives\MyDatas\UltraEdit -> Archives\MyDatas)
dirname = dirname[len(cookie.sink_root) + 1:]
else:
dirname = dirname[len(cookie.sink_root):]
target_dir = os.path.join(cookie.target_root, dirname)
if not os.path.isdir(target_dir):
makeDir(cookie, target_dir)
sink_dir = os.path.join(cookie.sink_root, dirname)
filters = []
if cookie.cvs_ignore:
ignore = os.path.join(sink_dir, ".cvsignore")
if os.path.isfile(ignore):
filters = convertPatterns(ignore, "-")
filters = filters + cookie.filters
names_excluded = []
if filters:
# filter sink files (names):
name_index = 0
while name_index < len(names):
name = names[name_index]
path = os.path.join(dirname, name)
path = convertPath(path)
if os.path.isdir(os.path.join(sink_dir, name)):
path = path + "/"
for cfilter in filters:
if re.search(cfilter[1], path, cookie.case_sensitivity):
if cfilter[0] == '-':
sink = os.path.join(sink_dir, name)
if cookie.delete_from_source:
if os.path.isfile(sink):
removeFile(cookie, sink)
elif os.path.isdir(sink):
removeDir(cookie, sink)
else:
logError("Sink %s is neither a file nor a folder (skip removal)" % sink, cookie)
names_excluded += [names[name_index]]
del(names[name_index])
name_index = name_index - 1
break
elif cfilter[0] == '+':
break
name_index = name_index + 1
#BEGIN POSTOPERATION - PyBackupStore
rebuild = False
#END POSTOPERATION - PyBackupStore
if cookie.delete and os.path.isdir(target_dir):
# Delete files and folder in target not present in filtered sink.
#BEGIN POSTOPERATION - PyBackupStore
#ORIGINAL for name in os.listdir(target_dir):
for name in cookie.extra_operation.listdir(target_dir):
#END POSTOPERATION - PyBackupStore
if not cookie.delete_excluded and name in names_excluded:
continue
if not name in names:
target = os.path.join(target_dir, name)
#BEGIN POSTOPERATION - PyBackupStore
#ORIGINAL if os.path.isfile(target):
if name.isFile():
cookie.extra_operation.unCompressData(target_dir)
#END POSTOPERATION - PyBackupStore
removeFile(cookie, target)
rebuild =True
#BEGIN POSTOPERATION - PyBackupStore
#ORIGINAL elif os.path.isdir(target):
elif name.isFolder():
#END POSTOPERATION - PyBackupStore
removeDir(cookie, target)
else:
pass
#BEGIN POSTOPERATION - BackupStore
lstFilesToCommpress = []
#END POSTOPERATION - PyBackupStore
for name in names:
# Copy files and folder from sink to target.
sink = os.path.join(sink_dir, name)
#print sink
target = os.path.join(target_dir, name)
#BEGIN POSTOPERATION - PyBackupStore
obj_target = cookie.extra_operation.getFSObject(target)
lstFilesToCommpress.append(name)
#ORIGINAL if os.path.exists(target):
if obj_target is not None:
#END POSTOPERATION - PyBackupStore
# When target already exit:
if os.path.isfile(sink):
#BEGIN POSTOPERATION - PyBackupStore
#ORIGINAL
#if os.path.isfile(target):
# file-file
# if shouldUpdate(cookie, sink, target):
# updateFile(cookie, sink, target)
#ORIGINAL FIN
if obj_target.isFile():
# file-file
if cookie.extra_operation.shouldUpdate(cookie, sink, obj_target):
cookie.extra_operation.unCompressData(target_dir)
if not updateFile(cookie, sink, target):
del lstFilesToCommpress[-1]
rebuild = True
#ORIGINAL if os.path.isfile(target):
#ORIGINAL elif os.path.isdir(target):
elif obj_target.isFolder():
cookie.extra_operation.unCompressData(target_dir)
#END POSTOPERATION - PyBackupStore
# file-folder
removeDir(cookie, target)
if not copyFile(cookie, sink, target):
del lstFilesToCommpress[-1]
#BEGIN POSTOPERATION - PyBackupStore
rebuild = True
#END POSTOPERATION - PyBackupStore
else:
# file-???
logError("Target %s is neither a file nor folder (skip update)" % sink, cookie)
elif os.path.isdir(sink):
#BEGIN POSTOPERATION - PyBackupStore
if obj_target.isFile():
#ORIGINAL if os.path.isfile(target):
cookie.extra_operation.unCompressData(target_dir)
#END POSTOPERATION - PyBackupStore
# folder-file
removeFile(cookie, target)
makeDir(cookie, target)
#BEGIN POSTOPERATION - PyBackupStore
rebuild = True
#END POSTOPERATION - PyBackupStore
else:
# ???-xxx
logError("Sink %s is neither a file nor a folder (skip update)" % sink, cookie)
elif not cookie.existing:
# When target dont exist:
if os.path.isfile(sink):
#BEGIN POSTOPERATION - PyBackupStore
cookie.extra_operation.unCompressData(target_dir)
#END POSTOPERATION - PyBackupStore
# file
if not copyFile(cookie, sink, target):
del lstFilesToCommpress[-1]
#BEGIN POSTOPERATION - PyBackupStore
rebuild = True
#END POSTOPERATION - PyBackupStore
elif os.path.isdir(sink):
# folder
makeDir(cookie, target)
else:
logError("Sink %s is neither a file nor a folder (skip update)" % sink, cookie)
#BEGIN POSTOPERATION - PyBackupStore
if rebuild:
cookie.extra_operation.compressData(target_dir, lstFilesToCommpress)
#END POSTOPERATION - PyBackupStore
def log(cookie, message):
#BEGIN POSTOPERATION - PyBackupStore
cookie.extra_operation.log(message)
#END POSTOPERATION - PyBackupStore
if not cookie.quiet:
sys.stderr.write(message + "\n")
try:
print message
except UnicodeEncodeError:
print message.encode("utf8")
def logError(message, cookie=None):
try:
#BEGIN POSTOPERATION - PyBackupStore
if cookie:
cookie.extra_operation.log(message, error=True)
#END POSTOPERATION - PyBackupStore
sys.stderr.write(message + "\n")
except UnicodeEncodeError:
sys.stderr.write(message.encode("utf8") + "\n")
def shouldUpdate(cookie, sink, target):
try:
sink_st = os.stat(sink)
sink_sz = sink_st.st_size
sink_mt = sink_st.st_mtime
except:
logError("Fail to retrieve information about sink %s (skip update)" % sink, cookie)
return 0
try:
target_st = target.stat
target_sz = target_st.st_size
target_mt = target_st.st_mtime
except:
logError("Fail to retrieve information about target %s (skip update)" % target, cookie)
return 0
if cookie.update:
return target_mt < sink_mt - cookie.modify_window
if cookie.ignore_time:
return 1
if target_sz != sink_sz:
return 1
if cookie.size_only:
return 0
return abs(target_mt - sink_mt) > cookie.modify_window
def copyFile(cookie, sink, target):
log(cookie, "copy: %s to: %s" % (sink, target))
noError = True
if not cookie.dry_run:
try:
shutil.copyfile(sink, target)
except:
logError("Fail to copy %s" % sink, cookie)
noError = False
if cookie.time:
try:
s = os.stat(sink)
os.utime(target, (s.st_atime, s.st_mtime));
except:
logError("Fail to copy timestamp of %s" % sink, cookie)
#BEGIN POSTOPERATION - PyBackupStore
if noError:
cookie.extra_operation.updateFile(target)
#END POSTOPERATION - PyBackupStore
return noError
def updateFile(cookie, sink, target):
log(cookie, "update: %s to: %s" % (sink, target))
noError = True
if not cookie.dry_run:
# Read only and hidden and system files can not be overridden.
try:
try:
if win32file:
filemode = win32file.GetFileAttributesW(target)
win32file.SetFileAttributesW(target, filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
else:
os.chmod(target, stat.S_IWUSR)
except:
#logError("Fail to allow override of %s" % target, cookie)
pass
shutil.copyfile(sink, target)
if cookie.time:
try:
s = os.stat(sink)
os.utime(target, (s.st_atime, s.st_mtime));
except:
logError("Fail to copy timestamp of %s" % sink, cookie) # The utime api of the 2.3 version of python is not unicode compliant.
except:
logError("Fail to override %s" % sink, cookie)
noError = False
#if win32file:
# win32file.SetFileAttributesW(target, filemode)
#BEGIN POSTOPERATION - PyBackupStore
if noError:
if win32file:
win32file.SetFileAttributesW(target, filemode)
cookie.extra_operation.updateFile(target)
#END POSTOPERATION - PyBackupStore
return noError
def prepareRemoveFile(path):
if win32file:
filemode = win32file.GetFileAttributesW(path)
win32file.SetFileAttributesW(path, filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
else:
os.chmod(path, stat.S_IWUSR)
def removeFile(cookie, target):
# Read only files could not be deleted.
log(cookie, "remove: %s" % target)
if not cookie.dry_run:
try:
try:
prepareRemoveFile(target)
except:
#logError("Fail to allow removal of %s" % target, cookie)
pass
os.remove(target)
#BEGIN POSTOPERATION - PyBackupStore
cookie.extra_operation.removeFile(target)
#END POSTOPERATION - PyBackupStore
except:
logError("Fail to remove %s" % target, cookie)
def makeDir(cookie, target):
log(cookie, "make dir: %s" % target)
if not cookie.dry_run:
try:
os.makedirs(target)
except:
logError("Fail to make dir %s" % target, cookie)
#BEGIN POSTOPERATION - PyBackupStore
cookie.extra_operation.makeDir(target)
#END POSTOPERATION - PyBackupStore
def visitForPrepareRemoveDir(arg, dirname, names):
for name in names:
path = os.path.join(dirname, name)
prepareRemoveFile(path)
def prepareRemoveDir(path):
prepareRemoveFile(path)
os.path.walk(path, visitForPrepareRemoveDir, None)
def OnRemoveDirError(func, path, excinfo):
logError("Fail to remove %s" % path)
def removeDir(cookie, target):
# Read only directory could not be deleted.
log(cookie, "remove dir: %s" % target)
if not cookie.dry_run:
prepareRemoveDir(target)
try:
#BEGIN POSTOPERATION - PyBackupStore
cookie.extra_operation.preRemoveTree(target)
#END POSTOPERATION - PyBackupStore
shutil.rmtree(target, False, OnRemoveDirError)
#BEGIN POSTOPERATION - PyBackupStore
cookie.extra_operation.postRemoveTree()
#END POSTOPERATION - PyBackupStore
except:
logError("Fail to remove dir %s" % target, cookie)
def convertPath(path):
# Convert windows, mac path to unix version.
separator = os.path.normpath("/")
if separator != "/":
path = re.sub(re.escape(separator), "/", path)
# Help file, folder pattern to express that it should match the all file or folder name.
path = "/" + path
return path
def convertPattern(pattern, sign):
"""Convert a rsync pattern that match against a path to a filter that match against a converted path."""
# Check for include vs exclude patterns.
if pattern[:2] == "+ ":
pattern = pattern[2:]
sign = "+"
elif pattern[:2] == "- ":
pattern = pattern[2:]
sign = "-"
# Express windows, mac patterns in unix patterns (rsync.py extension).
separator = os.path.normpath("/")
if separator != "/":
pattern = re.sub(re.escape(separator), "/", pattern)
# If pattern contains '/' it should match from the start.
temp = pattern
if pattern[0] == "/":
pattern = pattern[1:]
if temp[-1] == "/":
temp = temp[:-1]
# Convert pattern rules: ** * ? to regexp rules.
pattern = re.escape(pattern)
pattern = string.replace(pattern, "\\?", ".")
pattern = string.replace(pattern, "\\*\\*", ".*")
pattern = string.replace(pattern, "\\*", "[^/]*")
pattern = string.replace(pattern, "\\*", ".*")
if "/" in temp:
# If pattern contains '/' it should match from the start.
pattern = "^\\/" + pattern
else:
# Else the pattern should match the all file or folder name.
pattern = "\\/" + pattern
if pattern[-2:] != "\\/" and pattern[-2:] != ".*":
# File patterns should match also folders.
pattern = pattern + "\\/?"
# Pattern should match till the end.
pattern = pattern + "$"
return (sign, pattern)
def convertPatterns(path, sign):
"""Read the files for pattern and return a vector of filters"""
filters = []
f = open(path, "r")
while 1:
pattern = f.readline()
if not pattern:
break
if pattern[-1] == "\n":
pattern = pattern[:-1]
if re.match("[\t ]*$", pattern):
continue
if pattern[0] == "#":
continue
filters = filters + [convertPattern(pattern, sign)]
f.close()
return filters
def printUsage():
"""Print the help string that should printed by rsync.py -h"""
print "usage: rsync.py [options] source target"
print """
-q, --quiet decrease verbosity
-r, --recursive recurse into directories
-R, --relative use relative path names
-u, --update update only (don't overwrite newer files)
-t, --times preserve times
-n, --dry-run show what would have been transferred
--existing only update files that already exist
--delete delete files that don't exist on the sending side
--delete-excluded also delete excluded files on the receiving side
--delete-from-source delete excluded files on the receiving side
-I, --ignore-times don't exclude files that match length and time
--size-only only use file size when determining if a file should
be transferred
--modify-window=NUM timestamp window (seconds) for file match (default=2)
--existing only update existing target files or folders
-C, --cvs-exclude auto ignore files in the same way CVS does
--exclude=PATTERN exclude files matching PATTERN
--exclude-from=FILE exclude patterns listed in FILE
--include=PATTERN don't exclude files matching PATTERN
--include-from=FILE don't exclude patterns listed in FILE
--version print version number
-h, --help show this help screen
See http://www.vdesmedt.com/~vds2212/rsync.html for informations and updates.
Send an email to vivian@vdesmedt.com for comments and bug reports."""
def printVersion():
print "rsync.py version 2.0.1"
def main(args):
cookie = Cookie()
opts, args = getopt.getopt(args, "qrRntuCIh", ["quiet", "recursive", "relative", "dry-run", "time", "update", "cvs-ignore", "ignore-times", "help", "delete", "delete-excluded", "delete-from-source", "existing", "size-only", "modify-window=", "exclude=", "exclude-from=", "include=", "include-from=", "version"])
for o, v in opts:
if o in ["-q", "--quiet"]:
cookie.quiet = 1
if o in ["-r", "--recursive"]:
cookie.recursive = 1
if o in ["-R", "--relative"]:
cookie.relative = 1
elif o in ["-n", "--dry-run"]:
cookie.dry_run = 1
elif o in ["-t", "--times", "--time"]: # --time is there to guaranty backward compatibility with previous buggy version.
cookie.time = 1
elif o in ["-u", "--update"]:
cookie.update = 1
elif o in ["-C", "--cvs-ignore"]:
cookie.cvs_ignore = 1
elif o in ["-I", "--ignore-time"]:
cookie.ignore_time = 1
elif o == "--delete":
cookie.delete = 1
elif o == "--delete-excluded":
cookie.delete = 1
cookie.delete_excluded = 1
elif o == "--delete-from-source":
cookie.delete_from_source = 1
elif o == "--size-only":
cookie.size_only = 1
elif o == "--modify-window":
cookie.modify_window = int(v)
elif o == "--existing":
cookie.existing = 1
elif o == "--exclude":
cookie.filters = cookie.filters + [convertPattern(v, "-")]
elif o == "--exclude-from":
cookie.filters = cookie.filters + convertPatterns(v, "-")
elif o == "--include":
cookie.filters = cookie.filters + [convertPattern(v, "+")]
elif o == "--include-from":
cookie.filters = cookie.filters + convertPatterns(v, "+")
elif o == "--version":
printVersion()
return 0
elif o in ["-h", "--help"]:
printUsage()
return 0
if len(args) <= 1:
printUsage()
return 1
#print cookie.filters
target_root = args[1]
try: # In order to allow compatibility below 2.3.
pass
if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
target_root = unicode(target_root, sys.getfilesystemencoding())
finally:
cookie.target_root = target_root
sinks = glob.glob(args[0])
if not sinks:
return 0
sink_families = {}
for sink in sinks:
try: # In order to allow compatibility below 2.3.
if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
sink = unicode(sink, sys.getfilesystemencoding())
except:
pass
sink_name = ""
sink_root = sink
sink_drive, sink_root = os.path.splitdrive(sink)
while not sink_name:
if sink_root == os.path.sep:
sink_name = "."
break
sink_root, sink_name = os.path.split(sink_root)
sink_root = sink_drive + sink_root
if not sink_families.has_key(sink_root):
sink_families[sink_root] = []
sink_families[sink_root] = sink_families[sink_root] + [sink_name]
for sink_root in sink_families.keys():
if cookie.relative:
cookie.sink_root = ""
else:
cookie.sink_root = sink_root
global y # In order to allow compatibility below 2.1 (nested scope where used before).
y = sink_root
files = filter(lambda x: os.path.isfile(os.path.join(y, x)), sink_families[sink_root])
if files:
visit(cookie, sink_root, files)
#global y # In order to allow compatibility below 2.1 (nested scope where used before).
y = sink_root
folders = filter(lambda x: os.path.isdir(os.path.join(y, x)), sink_families[sink_root])
for folder in folders:
folder_path = os.path.join(sink_root, folder)
if not cookie.recursive:
visit(cookie, folder_path, os.listdir(folder_path))
else:
os.path.walk(folder_path, visit, cookie)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
quanticio/backupstore
|
backupstore/src/core/common/rsync.py
|
Python
|
gpl-2.0
| 19,060
|
[
"VisIt"
] |
8d4dc939fd3e2a73f744dae98d2ab69144991b8857df604873e2e39d140a48dd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012-2014 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
This package provides modules for the **sim2net** simulator.
The :mod:`sim2net.simulator.Sim2Net` class is the main entry point for
conducting simulations, and the :mod:`sim2net.application.Application` abstract
class defines the interface for simulation applications.
"""
__title__ = 'sim2net'
__author__ = 'Michal Kalewski'
__copyright__ = 'Copyright (c) 2012-2014 Michal Kalewski'
__license__ = 'MIT'
|
mkalewski/sim2net
|
sim2net/__init__.py
|
Python
|
mit
| 981
|
[
"VisIt"
] |
68c90201602a7133a3285bc9a36c2e04e7a789969fee4b3dad3218a70709def8
|
import pytest
from ispyb.cli import last_data_collections_on
_header = "------Date------ Beamline --DCID-- ---Visit---\n"
def test_basic(capsys, testconfig):
last_data_collections_on.main(["i03", f"--credentials={testconfig}"])
captured = capsys.readouterr()
assert not captured.err
assert captured.out.startswith(_header)
assert (
"2016-01-14 12:40 i03 993677 cm14451-1 3600 images /dls/i03/data/2016/cm14451-1/20160114/tlys_jan_4/tlys_jan_4_1_####.cbf"
in captured.out
)
assert len(captured.out.split("\n")) >= 5
def test_limit(capsys, testconfig):
last_data_collections_on.main(["i03", "-n", "2", f"--credentials={testconfig}"])
captured = capsys.readouterr()
assert not captured.err
assert captured.out.startswith(_header)
assert len(captured.out.strip().split("\n")) == 3
@pytest.mark.parametrize(
"synchweb_url", ["https://ispyb.diamond.ac.uk", "https://wls.ac.uk"]
)
def test_link(synchweb_url, capsys, testconfig):
last_data_collections_on.main(
["i03", "--link", f"--credentials={testconfig}", "--synchweb-url", synchweb_url]
)
captured = capsys.readouterr()
assert not captured.err
assert captured.out.startswith(_header)
lines = captured.out[len(_header) :].strip().split("\n")
n_lines = len(lines)
n_urls = sum(1 for line in lines if f"{synchweb_url}/dc/visit" in line)
assert n_urls == n_lines / 2
def test_no_results(capsys, testconfig):
last_data_collections_on.main(["i04", f"--credentials={testconfig}"])
captured = capsys.readouterr()
assert not captured.err
assert captured.out == _header
def test_help(capsys):
with pytest.raises(SystemExit) as e:
last_data_collections_on.main(["-h"])
assert e.value.code == 0
captured = capsys.readouterr()
assert not captured.err
assert "usage: ispyb.last_data_collections_on [beamline]" in captured.out
def test_no_beamline(capsys):
with pytest.raises(SystemExit) as e:
last_data_collections_on.main([])
assert e.value.code != 0
captured = capsys.readouterr()
assert not captured.out
assert "error: the following arguments are required: beamline" in captured.err
|
DiamondLightSource/ispyb-api
|
tests/cli/test_last_data_collections_on.py
|
Python
|
apache-2.0
| 2,229
|
[
"VisIt"
] |
1db1f9386a948ed278ca45c473c76a81c760ac160630383759f36ab4fa6a68c0
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen.filters.rules/Person/_HasAssociation.py
# $Id$
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasAssociation
#
#-------------------------------------------------------------------------
class HasAssociation(Rule):
"""Rule that checks for a person with a personal association"""
labels = [ _('Number of instances:'), _('Number must be:')]
name = _('People with <count> associations')
description = _("Matches people with a certain number of associations")
category = _('General filters')
def prepare(self, db):
# things we want to do just once, not for every handle
if self.list[1] == 'lesser than':
self.count_type = 0
elif self.list[1] == 'greater than':
self.count_type = 2
else:
self.count_type = 1 # "equal to"
self.userSelectedCount = int(self.list[0])
def apply(self, db, person):
return len( person.get_person_ref_list()) > 0
if self.count_type == 0: # "lesser than"
return count < self.userSelectedCount
elif self.count_type == 2: # "greater than"
return count > self.userSelectedCount
# "equal to"
return count == self.userSelectedCount
|
arunkgupta/gramps
|
gramps/gen/filters/rules/person/_hasassociation.py
|
Python
|
gpl-2.0
| 2,577
|
[
"Brian"
] |
5d9a930bf603ccd50b42dfde2f3030cc02b236ffb371fd69be33b891623de0cb
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.gui.editors.shortcutseditor import ShortcutsEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestShortcutsEditor(GUITest):
def test_show(self):
editor = ShortcutsEditor()
self.check_dialog(editor, 'dialog-shortcuts-show')
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_shortcutseditor.py
|
Python
|
gpl-2.0
| 1,162
|
[
"VisIt"
] |
544096cdb9675820ee2368c9230a735178241aa2b23c8738c9f1fa59c86f781c
|
# setBonusGuristas
#
# Used by:
# Implants named like: grade Crystal (18 of 18)
runTime = "early"
type = "passive"
def handler(fit, implant, context):
fit.appliedImplants.filteredItemMultiply(lambda mod: mod.item.group.name == "Cyberimplant",
"shieldBoostMultiplier", implant.getModifiedItemAttr("implantSetGuristas"))
|
Ebag333/Pyfa
|
eos/effects/setbonusguristas.py
|
Python
|
gpl-3.0
| 370
|
[
"CRYSTAL"
] |
2dfb09382598b3f1e0c1eda0ffac2e733031f41c90beb427b072ba37842803eb
|
#!/usr/bin/python2
# coding=UTF-8
#
# Samsung-Tools
#
# Part of the 'Linux On My Samsung' project - <http://loms.voria.org>
#
# Copyleft (C) 2010 by
# Fortunato Ventre - <vorione@gmail.com> - <http://www.voria.org>
#
# 'Samsung-Tools' is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# <http://www.gnu.org/licenses/gpl.txt>
import os
import sys
import dbus
import gettext
_ = gettext.gettext
gettext.bindtextdomain("samsung-tools")
gettext.textdomain("samsung-tools")
WORK_DIRECTORY = "/usr/share/samsung-tools"
sys.path.append(WORK_DIRECTORY)
from backends.globals import *
from backends.session.util.locales import *
quiet = False
# Fix encoding for piping
import codecs
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
class Backlight():
def __init__(self, option):
self.option = option
success = False
retry = 3
while retry > 0 and not success:
try:
bus = dbus.SessionBus()
proxy = bus.get_object(
SESSION_INTERFACE_NAME,
SESSION_OBJECT_PATH_BACKLIGHT)
self.interface = dbus.Interface(proxy, SESSION_INTERFACE_NAME)
success = True
except:
retry = retry - 1
if retry == 0:
print unicode(_("Backlight control: unable to connect to session service!"), "utf-8")
sys.exit(1)
def __on(self):
return self.interface.Enable()
def __off(self):
return self.interface.Disable()
def __status(self):
return self.interface.IsEnabled()
def __toggle(self):
return self.interface.Toggle()
def apply(self):
if self.option is None:
return
if self.option == "on":
result = self.__on()
if not quiet:
if result:
print BACKLIGHT_ENABLED
else:
print BACKLIGHT_ENABLING_ERROR
if self.option == "off":
result = self.__off()
if not quiet:
if result:
print BACKLIGHT_DISABLED
else:
print BACKLIGHT_DISABLING_ERROR
if self.option == "toggle":
result = self.__toggle()
if not quiet:
if result:
status = self.__status()
if status:
print BACKLIGHT_ENABLED
else:
print BACKLIGHT_DISABLED
else:
print BACKLIGHT_TOGGLING_ERROR
if self.option == "hotkey":
from time import sleep
from subprocess import Popen, PIPE
tempfiles = ".samsung-tools-backlight-" + str(os.getuid()) + "-"
tempfile = "/tmp/" + tempfiles + str(os.getpid())
toggle = True
try:
ls = Popen(['ls /tmp/' + tempfiles + '*'],
stdout=PIPE, stderr=PIPE, shell=True)
if len(ls.communicate()[0]) != 0:
toggle = False
except:
pass
if toggle:
Backlight("toggle").apply()
try:
file = open(tempfile, "w").close() # create temp file
except:
pass
sleep(0.5)
try:
os.remove(tempfile)
except:
pass
if self.option == "status":
result = self.__status()
if not quiet:
if result:
print BACKLIGHT_STATUS_ENABLED
else:
print BACKLIGHT_STATUS_DISABLED
class Bluetooth():
def __init__(self, option, use_notify=False):
self.option = option
self.use_notify = use_notify
success = False
retry = 3
while retry > 0 and not success:
try:
bus = dbus.SessionBus()
proxy = bus.get_object(
SESSION_INTERFACE_NAME,
SESSION_OBJECT_PATH_BLUETOOTH)
self.interface = dbus.Interface(proxy, SESSION_INTERFACE_NAME)
success = True
except:
retry = retry - 1
if retry == 0:
print unicode(_("Bluetooth control: unable to connect to session service!"), "utf-8")
sys.exit(1)
def __is_available(self):
return self.interface.IsAvailable()
def __on(self):
return self.interface.Enable(self.use_notify)
def __off(self):
return self.interface.Disable(self.use_notify)
def __toggle(self):
return self.interface.Toggle(self.use_notify)
def __status(self):
return self.interface.IsEnabled(self.use_notify)
def apply(self):
if self.option is None:
return
if not self.__is_available():
if not quiet:
print BLUETOOTH_NOT_AVAILABLE
self.__status() # needed to show notification
return
if self.option == "on":
result = self.__on()
if not quiet:
if result:
print BLUETOOTH_ENABLED
else:
print BLUETOOTH_ENABLING_ERROR
if self.option == "off":
result = self.__off()
if not quiet:
if result:
print BLUETOOTH_DISABLED
else:
print BLUETOOTH_DISABLING_ERROR
if self.option == "toggle":
result = self.__toggle()
if not quiet:
if result:
# Temporary disable notifications
n = self.use_notify
self.use_notify = False
status = self.__status()
self.use_notify = n
# Notification re-enabled
if status:
print BLUETOOTH_ENABLED
else:
print BLUETOOTH_DISABLED
else:
print BLUETOOTH_TOGGLING_ERROR
if self.option == "hotkey":
from time import sleep
from subprocess import Popen, PIPE
tempfiles = ".samsung-tools-bluetooth-" + str(os.getuid()) + "-"
tempfile = "/tmp/" + tempfiles + str(os.getpid())
toggle = True
try:
ls = Popen(['ls /tmp/' + tempfiles + '*'],
stdout=PIPE, stderr=PIPE, shell=True)
if len(ls.communicate()[0]) != 0:
toggle = False
except:
pass
if toggle:
Bluetooth("toggle", self.use_notify).apply()
try:
file = open(tempfile, "w").close() # create temp file
except:
pass
sleep(0.5)
try:
os.remove(tempfile)
except:
pass
if self.option == "status":
result = self.__status()
if not quiet:
if result:
print BLUETOOTH_STATUS_ENABLED
else:
print BLUETOOTH_STATUS_DISABLED
class Cpu():
def __init__(self, option, use_notify=False):
self.option = option
self.use_notify = use_notify
success = False
retry = 3
while retry > 0 and not success:
try:
bus = dbus.SessionBus()
proxy = bus.get_object(
SESSION_INTERFACE_NAME,
SESSION_OBJECT_PATH_CPU)
self.interface = dbus.Interface(proxy, SESSION_INTERFACE_NAME)
success = True
except:
retry = retry - 1
if retry == 0:
print unicode(_("CPU fan control: unable to connect to session service!"), "utf-8")
sys.exit(1)
def __is_temperature_available(self):
return self.interface.IsTemperatureAvailable()
def __is_fan_available(self):
return self.interface.IsFanAvailable()
def __temp(self):
return self.interface.GetTemperature()
def __normal(self):
return self.interface.SetFanNormal(self.use_notify)
def __silent(self):
return self.interface.SetFanSilent(self.use_notify)
def __overclock(self):
return self.interface.SetFanOverclock(self.use_notify)
def __cycle(self):
return self.interface.Cycle(self.use_notify)
def __status(self):
return self.interface.Status(self.use_notify)
def apply(self):
if self.option is None:
return
if self.__is_temperature_available() and self.option != "hotkey" and not quiet:
print CPU_TEMPERATURE + " " + self.__temp() + unicode(" °C", "utf8")
if not self.__is_fan_available():
if not quiet:
print FAN_NOT_AVAILABLE
self.__status() # needed to show notification
return
if self.option == "normal":
result = self.__normal()
if not quiet:
if result:
print FAN_STATUS_NORMAL
else:
print FAN_SWITCHING_ERROR
if self.option == "silent":
result = self.__silent()
if not quiet:
if result:
print FAN_STATUS_SILENT
else:
print FAN_SWITCHING_ERROR
if self.option == "overclock":
result = self.__overclock()
if not quiet:
if result:
print FAN_STATUS_OVERCLOCK
else:
print FAN_SWITCHING_ERROR
if self.option == "cycle":
result = self.__cycle()
if not quiet:
if result:
# Temporary disable notifications
n = self.use_notify
self.use_notify = False
mode = self.__status()
self.use_notify = n
# Notification re-enabled
if mode == 0:
print FAN_STATUS_NORMAL
if mode == 1:
print FAN_STATUS_SILENT
if mode == 2:
print FAN_STATUS_OVERCLOCK
if mode == 3:
print FAN_STATUS_ERROR
else:
print FAN_SWITCHING_ERROR
if self.option == "hotkey":
from time import sleep
from subprocess import Popen, PIPE
tempfiles = ".samsung-tools-cpu-" + str(os.getuid()) + "-"
tempfile = "/tmp/" + tempfiles + str(os.getpid())
hotkey = True
try:
ls = Popen(['ls /tmp/' + tempfiles + '*'],
stdout=PIPE, stderr=PIPE, shell=True)
if len(ls.communicate()[0]) != 0:
hotkey = False
except:
pass
if hotkey:
Cpu("hotkey2", self.use_notify).apply()
try:
file = open(tempfile, "w").close() # create temp file
except:
pass
sleep(0.5)
try:
os.remove(tempfile)
except:
pass
if self.option == "hotkey2":
from time import sleep
from subprocess import Popen, PIPE
tempfiles = ".samsung-tools-cpufan-" + str(os.getuid()) + "-"
tempfile = "/tmp/" + tempfiles + str(os.getpid())
action = "status"
try:
ls = Popen(['ls /tmp/' + tempfiles + '*'],
stdout=PIPE, stderr=PIPE, shell=True)
if len(ls.communicate()[0]) != 0:
action = "cycle"
except:
pass
Cpu(action, self.use_notify).apply()
try:
file = open(tempfile, "w").close() # create temp file
except:
pass
sleep(9.5)
try:
os.remove(tempfile)
except:
pass
if self.option == "status":
result = self.__status()
if not quiet:
if result == 0:
print FAN_STATUS_NORMAL
if result == 1:
print FAN_STATUS_SILENT
if result == 2:
print FAN_STATUS_OVERCLOCK
if result == 3:
print FAN_STATUS_ERROR
class Webcam():
def __init__(self, option, use_notify=False):
self.option = option
self.use_notify = use_notify
success = False
retry = 3
while retry > 0 and not success:
try:
bus = dbus.SessionBus()
proxy = bus.get_object(
SESSION_INTERFACE_NAME,
SESSION_OBJECT_PATH_WEBCAM)
self.interface = dbus.Interface(proxy, SESSION_INTERFACE_NAME)
success = True
except:
retry = retry - 1
if retry == 0:
print unicode(_("Webcam control: unable to connect to session service!"), "utf-8")
sys.exit(1)
def __is_available(self):
return self.interface.IsAvailable()
def __on(self):
return self.interface.Enable(self.use_notify)
def __off(self):
return self.interface.Disable(self.use_notify)
def __toggle(self):
return self.interface.Toggle(self.use_notify)
def __status(self):
return self.interface.IsEnabled(self.use_notify)
def apply(self):
if self.option is None:
return
if not self.__is_available():
if not quiet:
print WEBCAM_NOT_AVAILABLE
self.__status() # needed to show notification
return
if self.option == "on":
result = self.__on()
if not quiet:
if result:
print WEBCAM_ENABLED
else:
print WEBCAM_ENABLING_ERROR
if self.option == "off":
result = self.__off()
if not quiet:
if result:
print WEBCAM_DISABLED
else:
print WEBCAM_DISABLING_ERROR
if self.option == "toggle":
result = self.__toggle()
if not quiet:
if result:
# Temporary disable notifications
n = self.use_notify
self.use_notify = False
status = self.__status()
self.use_notify = n
# Notification re-enabled
if status:
print WEBCAM_ENABLED
else:
print WEBCAM_DISABLED
else:
print WEBCAM_TOGGLING_ERROR
if self.option == "hotkey":
from time import sleep
from subprocess import Popen, PIPE
tempfiles = ".samsung-tools-webcam-" + str(os.getuid()) + "-"
tempfile = "/tmp/" + tempfiles + str(os.getpid())
toggle = True
try:
ls = Popen(['ls /tmp/' + tempfiles + '*'],
stdout=PIPE, stderr=PIPE, shell=True)
if len(ls.communicate()[0]) != 0:
toggle = False
except:
pass
if toggle:
Webcam("toggle", self.use_notify).apply()
try:
file = open(tempfile, "w").close() # create temp file
except:
pass
sleep(0.5)
try:
os.remove(tempfile)
except:
pass
if self.option == "status":
result = self.__status()
if not quiet:
if result:
print WEBCAM_STATUS_ENABLED
else:
print WEBCAM_STATUS_DISABLED
class Wireless():
def __init__(self, option, use_notify=False):
self.option = option
self.use_notify = use_notify
success = False
retry = 3
while retry > 0 and not success:
try:
bus = dbus.SessionBus()
proxy = bus.get_object(
SESSION_INTERFACE_NAME,
SESSION_OBJECT_PATH_WIRELESS)
self.interface = dbus.Interface(proxy, SESSION_INTERFACE_NAME)
success = True
except:
retry = retry - 1
if retry == 0:
print unicode(_("Wireless control: unable to connect to session service!"), "utf-8")
sys.exit(1)
def __is_available(self):
return self.interface.IsAvailable()
def __on(self):
return self.interface.Enable(self.use_notify)
def __off(self):
return self.interface.Disable(self.use_notify)
def __toggle(self):
return self.interface.Toggle(self.use_notify)
def __status(self):
return self.interface.IsEnabled(self.use_notify)
def apply(self):
if self.option is None:
return
if not self.__is_available():
if not quiet:
print WIRELESS_NOT_AVAILABLE
self.__status() # needed to show notification
return
if self.option == "on":
result = self.__on()
if not quiet:
if result:
print WIRELESS_ENABLED
else:
print WIRELESS_ENABLING_ERROR
if self.option == "off":
result = self.__off()
if not quiet:
if result:
print WIRELESS_DISABLED
else:
print WIRELESS_DISABLING_ERROR
if self.option == "toggle":
result = self.__toggle()
if not quiet:
if result:
# Temporary disable notifications
n = self.use_notify
self.use_notify = False
status = self.__status()
self.use_notify = n
# Notification re-enabled
if status:
print WIRELESS_ENABLED
else:
print WIRELESS_DISABLED
else:
print WIRELESS_TOGGLING_ERROR
if self.option == "hotkey":
from time import sleep
from subprocess import Popen, PIPE
tempfiles = ".samsung-tools-wireless-" + str(os.getuid()) + "-"
tempfile = "/tmp/" + tempfiles + str(os.getpid())
toggle = True
try:
ls = Popen(['ls /tmp/' + tempfiles + '*'],
stdout=PIPE, stderr=PIPE, shell=True)
if len(ls.communicate()[0]) != 0:
toggle = False
except:
pass
if toggle:
Wireless("toggle", self.use_notify).apply()
try:
file = open(tempfile, "w").close() # create temp file
except:
pass
sleep(0.5)
try:
os.remove(tempfile)
except:
pass
if self.option == "status":
result = self.__status()
if not quiet:
if result:
print WIRELESS_STATUS_ENABLED
else:
print WIRELESS_STATUS_DISABLED
def usage(option=None, opt=None, value=None, parser=None):
print "Samsung Tools", APP_VERSION, "-",
print unicode(_("Command Line Utility"), "utf-8")
print
print unicode(_("Usage: %s <interface> <option> ...") % os.path.basename(sys.argv[0]), "utf-8")
print
print unicode(_("Backlight:"), "utf-8")
print "\t" + unicode(_("Interface"), "utf-8") + ":\t-b | --backlight"
print "\t" + unicode(_("Options"), "utf-8") + ":\ton | off | toggle | hotkey | status"
print unicode(_("Bluetooth:"), "utf-8")
print "\t" + unicode(_("Interface"), "utf-8") + ":\t-B | --bluetooth"
print "\t" + unicode(_("Options"), "utf-8") + ":\ton | off | toggle | hotkey | status"
print unicode(_("CPU fan:"), "utf-8")
print "\t" + unicode(_("Interface"), "utf-8") + ":\t-c | --cpu"
print "\t" + unicode(_("Options"), "utf-8") + ":\tnormal | silent | overclock | cycle | hotkey | status"
print unicode(_("Webcam:"), "utf-8")
print "\t" + unicode(_("Interface"), "utf-8") + ":\t-w | --webcam"
print "\t" + unicode(_("Options"), "utf-8") + ":\ton | off | toggle | hotkey | status"
print unicode(_("Wireless:"), "utf-8")
print "\t" + unicode(_("Interface"), "utf-8") + ":\t-W | --wireless"
print "\t" + unicode(_("Options"), "utf-8") + ":\ton | off | toggle | hotkey | status"
print
print unicode(_("Other options:"), "utf-8")
print " -a | --status\t\t" + unicode(_("Show status for all devices."), "utf-8")
print " -n | --show-notify\t" + unicode(_("Show graphical notifications."), "utf-8")
print " -q | --quiet\t\t" + unicode(_("Do not print messages on standard output."), "utf-8")
print " -i | --interface\t" + unicode(_("Show the control interface currently in use."), "utf-8")
print " -s | --stop-session\t" + unicode(_("Stop the session service."), "utf-8")
print " -S | --stop-system\t" + unicode(_("Stop the system service."), "utf-8")
print
print unicode(_("Examples of use:"), "utf-8")
print unicode(_(" - Toggle backlight:"), "utf-8")
print " %s --backlight toggle" % os.path.basename(sys.argv[0])
print
print unicode(_(" - Toggle wireless and set CPU fan mode to 'silent':"), "utf-8")
print " %s --wireless toggle --cpu silent" % os.path.basename(sys.argv[0])
print
print unicode(_(" - Disable bluetooth, webcam and wireless:"), "utf-8")
print " %s -B off -w off -W off" % os.path.basename(sys.argv[0])
print
print unicode(_("For more informations, visit the 'Linux On My Samsung' forum:"), "utf-8")
print
print " - http://loms.voria.org"
print
print "Copyleft by: Fortunato Ventre - vorione@gmail.com"
print unicode(_("Released under GPLv3 license"), "utf-8") + "."
sys.exit(0)
def main():
if len(sys.argv) == 1:
print unicode(_("No action(s) specified."), "utf-8")
print unicode(_("Use --help for instructions."), "utf-8")
sys.exit(1)
from optparse import OptionParser
usage_string = unicode(_("Usage: %s <interface> <option> ...") %
os.path.basename(sys.argv[0]), "utf-8")
parser = OptionParser(usage_string, add_help_option=False)
parser.add_option('-h', '--help',
action="callback",
callback=usage)
parser.add_option('-b', '--backlight',
dest="backlight",
type="choice",
choices=['on', 'off', 'toggle', 'hotkey', 'status'])
parser.add_option('-B', '--bluetooth',
dest="bluetooth",
type="choice",
choices=['on', 'off', 'toggle', 'hotkey', 'status'])
parser.add_option(
'-c',
'--cpu',
dest="cpu",
type="choice",
choices=[
'normal',
'silent',
'overclock',
'cycle',
'hotkey',
'status'])
parser.add_option('-w', '--webcam',
dest="webcam",
type="choice",
choices=['on', 'off', 'toggle', 'hotkey', 'status'])
parser.add_option('-W', '--wireless',
dest="wireless",
type="choice",
choices=['on', 'off', 'toggle', 'hotkey', 'status'])
parser.add_option('-n', '--show-notify',
action="store_true",
dest="show_notify",
default=False)
parser.add_option('-q', '--quiet',
action="store_true",
dest="quiet",
default=False)
parser.add_option('-i', '--interface',
action="store_true",
dest="interface",
default=False)
parser.add_option('-s', '--stop-session',
action="store_true",
dest="stopsession",
default=False)
parser.add_option('-S', '--stop-system',
action="store_true",
dest="stopsystem",
default=False)
parser.add_option('-a', '--status',
action="store_true",
dest="status",
default=False)
(options, args) = parser.parse_args()
global quiet
quiet = options.quiet
if options.status:
options.backlight = "status"
options.bluetooth = "status"
options.cpu = "status"
options.webcam = "status"
options.wireless = "status"
if os.getuid() == 0:
print unicode(_("This program is intended to be used only by non-privileged users."), "utf-8")
sys.exit(1)
if len(args) != 0:
print unicode(_("Wrong argument(s)."), "utf-8")
print unicode(_("Use --help for instructions."), "utf-8")
sys.exit(1)
# Check if the dbus daemon is running. If not, start it.
if "DBUS_SESSION_BUS_ADDRESS" not in os.environ:
try:
from subprocess import Popen, PIPE, STDOUT
p = Popen(
'dbus-launch --exit-with-session',
shell=True,
stdout=PIPE,
stderr=STDOUT)
for var in p.stdout:
sp = var.split('=', 1)
os.environ[sp[0]] = sp[1][:-1]
except:
print unicode(_("Unable to start a DBus daemon!"), "utf-8")
sys.exit(1)
Backlight(options.backlight).apply()
Bluetooth(options.bluetooth, options.show_notify).apply()
Cpu(options.cpu, options.show_notify).apply()
Webcam(options.webcam, options.show_notify).apply()
Wireless(options.wireless, options.show_notify).apply()
if options.interface and not quiet:
try:
bus = dbus.SystemBus()
proxy = bus.get_object(
SYSTEM_INTERFACE_NAME,
SYSTEM_OBJECT_PATH_OPTIONS)
opts = dbus.Interface(proxy, SYSTEM_INTERFACE_NAME)
ci = opts.GetControlInterface()
print unicode(_("Control interface:"), "utf-8"),
if ci == "esdm":
print "easy-slow-down-manager"
elif ci == "sl":
print "samsung-laptop"
else:
print "-"
except:
print unicode(_("Control interface: unable to connect to system service!"), "utf-8")
pass
if options.stopsession:
try:
bus = dbus.SessionBus()
proxy = bus.get_object(
SESSION_INTERFACE_NAME,
SESSION_OBJECT_PATH_GENERAL)
general = dbus.Interface(proxy, SESSION_INTERFACE_NAME)
general.Exit()
if not quiet:
print unicode(_("Session service stopped"), "utf-8")
except:
if not quiet:
print unicode(_("Cannot stop session service"), "utf-8")
pass
if options.stopsystem:
try:
bus = dbus.SystemBus()
proxy = bus.get_object(
SYSTEM_INTERFACE_NAME,
SYSTEM_OBJECT_PATH_GENERAL)
general = dbus.Interface(proxy, SYSTEM_INTERFACE_NAME)
general.Exit()
if not quiet:
print unicode(_("System service stopped"), "utf-8")
except:
if not quiet:
print unicode(_("Cannot stop system service"), "utf-8")
pass
if __name__ == "__main__":
main()
|
voria/samsung-tools
|
samsung-tools.py
|
Python
|
gpl-3.0
| 28,993
|
[
"VisIt"
] |
83b7d49c55b793cc9381b4ba86a1d65605bff6484e33deaef4d247e75bb8e343
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
res = 6
plane = vtk.vtkPlaneSource()
plane.SetResolution(res,res)
colors = vtk.vtkElevationFilter()
colors.SetInputConnection(plane.GetOutputPort())
colors.SetLowPoint(-0.25,-0.25,-0.25)
colors.SetHighPoint(0.25,0.25,0.25)
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(colors.GetOutputPort())
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
planeActor.GetProperty().SetRepresentationToWireframe()
# create simple poly data so we can apply glyph
squad = vtk.vtkSuperquadricSource()
squadColors = vtk.vtkElevationFilter()
squadColors.SetInputConnection(squad.GetOutputPort())
squadColors.SetLowPoint(-0.25,-0.25,-0.25)
squadColors.SetHighPoint(0.25,0.25,0.25)
squadCaster = vtk.vtkCastToConcrete()
squadCaster.SetInputConnection(squadColors.GetOutputPort())
squadTransform = vtk.vtkTransform()
transformSquad = vtk.vtkTransformPolyDataFilter()
transformSquad.SetInputConnection(squadColors.GetOutputPort())
transformSquad.SetTransform(squadTransform)
transformSquad.Update()
# procedure for generating glyphs
def Glyph (__vtk__temp0=0,__vtk__temp1=0):
global res
ptId = glypher.GetPointId()
pd = glypher.GetPointData()
xyz = glypher.GetPoint()
x = lindex(xyz,0)
y = lindex(xyz,1)
length = glypher.GetInput(0).GetLength()
scale = expr.expr(globals(), locals(),["length","/","(","2.0","*","res",")"])
squadTransform.Identity()
if (x == y):
squad.ToroidalOn()
squadTransform.Translate(xyz)
squadTransform.RotateX(90)
pass
else:
squadTransform.Translate(xyz)
squad.ToroidalOff()
pass
squadTransform.Scale(scale,scale,scale)
squad.SetPhiRoundness(expr.expr(globals(), locals(),["abs","(","x",")*","5.0"]))
squad.SetThetaRoundness(expr.expr(globals(), locals(),["abs","(","y",")*","5.0"]))
glypher = vtk.vtkProgrammableGlyphFilter()
glypher.SetInputConnection(colors.GetOutputPort())
glypher.SetSourceConnection(transformSquad.GetOutputPort())
glypher.SetGlyphMethod(Glyph)
glypher.SetColorModeToColorBySource()
glyphMapper = vtk.vtkPolyDataMapper()
glyphMapper.SetInputConnection(glypher.GetOutputPort())
glyphActor = vtk.vtkActor()
glyphActor.SetMapper(glyphMapper)
# Create the rendering stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(planeActor)
ren1.AddActor(glyphActor)
ren1.SetBackground(1,1,1)
renWin.SetSize(450,450)
renWin.Render()
ren1.GetActiveCamera().Zoom(1.3)
# Get handles to some useful objects
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Programmable/Testing/Python/progGlyphsBySource.py
|
Python
|
bsd-3-clause
| 2,931
|
[
"VTK"
] |
63eefaeb56879efffe889f5c463b1091600b7879d2d76985e125c70493253365
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGostats(RPackage):
"""A set of tools for interacting with GO and microarray data.
A variety of basic manipulation tools for graphs, hypothesis
testing and other simple calculations."""
homepage = "https://www.bioconductor.org/packages/GOstats/"
url = "https://git.bioconductor.org/packages/GOstats"
version('2.42.0', git='https://git.bioconductor.org/packages/GOstats', commit='8b29709064a3b66cf1d963b2be0c996fb48c873e')
depends_on('r@3.4.1:3.4.9', when='@2.42.0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-category', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-graph', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-rbgl', type=('build', 'run'))
depends_on('r-annotate', type=('build', 'run'))
depends_on('r-annotationforge', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-gostats/package.py
|
Python
|
lgpl-2.1
| 2,181
|
[
"Bioconductor"
] |
af76ca9b4add81b37edc2028665da41dcfb9a861ab316ad33dcfff5f944c24e4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (@bcoca)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(win_find) module instead.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
- Use a negative age to find files equal to or less than the specified time.
- You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
type: str
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
- The pattern is matched against the file base name, excluding the directory.
- When using regexen, the pattern MUST match the ENTIRE file name, not just parts of it. So
if you are looking to match all files ending in .default, you'd need to use '.*\.default'
as a regexp and not just '\.default'.
- This parameter expects a list, which can be either comma separated or YAML. If any of the
patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
in undesirable ways.
type: list
aliases: [ pattern ]
elements: str
excludes:
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- Items whose basenames match an C(excludes) pattern are culled from C(patterns) matches.
Multiple patterns can be specified using a list.
type: list
aliases: [ exclude ]
version_added: "2.5"
elements: str
contains:
description:
- A regular expression or pattern which should be matched against the file content.
type: str
paths:
description:
- List of paths of directories to search. All paths must be fully qualified.
type: list
required: true
aliases: [ name, path ]
elements: str
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in Ansible 2.3.
type: str
choices: [ any, directory, file, link ]
default: file
recurse:
description:
- If target is a directory, recursively descend into the directory looking for files.
type: bool
default: no
size:
description:
- Select files whose size is equal to or greater than the specified size.
- Use a negative size to find files equal to or less than the specified size.
- Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
- Size is not evaluated for directories.
type: str
age_stamp:
description:
- Choose the file property against which we compare age.
type: str
choices: [ atime, ctime, mtime ]
default: mtime
hidden:
description:
- Set this to C(yes) to include hidden files, otherwise they will be ignored.
type: bool
default: no
follow:
description:
- Set this to C(yes) to follow symlinks in path for systems with python 2.6+.
type: bool
default: no
get_checksum:
description:
- Set this to C(yes) to retrieve a file's SHA1 checksum.
type: bool
default: no
use_regex:
description:
- If C(no), the patterns are file globs (shell).
- If C(yes), they are python regexes.
type: bool
default: no
depth:
description:
- Set the maximum number of levels to descend into.
- Setting recurse to C(no) will override this value, which is effectively depth 1.
- Default is unlimited depth.
type: int
version_added: "2.6"
seealso:
- module: win_find
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
- name: Find /var/log all directories, exclude nginx and mysql
find:
paths: /var/log
recurse: no
file_type: directory
excludes: 'nginx,mysql'
# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
- name: Use a single pattern that contains a comma formatted as a list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns: ['^_[0-9]{2,4}_.*.log$']
- name: Use multiple patterns that contain a comma formatted as a YAML list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns:
- '^_[0-9]{2,4}_.*.log$'
- '^[a-z]{1,5}_.*log$'
'''
RETURN = r'''
files:
description: All matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: Number of matches
returned: success
type: int
sample: 14
examined:
description: Number of filesystem objects looked at
returned: success
type: int
sample: 34
'''
import fnmatch
import grp
import os
import pwd
import re
import stat
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, excludes=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None and excludes is None:
return True
if use_regex:
if patterns and excludes is None:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
elif patterns and excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
for e in excludes:
r = re.compile(e)
if r.match(f):
return False
return True
else:
if patterns and excludes is None:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
elif patterns and excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
for e in excludes:
if fnmatch.fnmatch(f, e):
return False
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern):
"""
Filter files which contain the given expression
:arg fsname: Filename to scan for lines matching a pattern
:arg pattern: Pattern to look for inside of line
:rtype: bool
:returns: True if one of the lines in fsname matches the pattern. Otherwise False
"""
if pattern is None:
return True
prog = re.compile(pattern)
try:
with open(fsname) as f:
for line in f:
if prog.match(line):
return True
except Exception:
pass
return False
def statinfo(st):
pw_name = ""
gr_name = ""
try: # user data
pw_name = pwd.getpwuid(st.st_uid).pw_name
except Exception:
pass
try: # group data
gr_name = grp.getgrgid(st.st_gid).gr_name
except Exception:
pass
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'gr_name': gr_name,
'pw_name': pw_name,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
patterns=dict(type='list', default=['*'], aliases=['pattern'], elements='str'),
excludes=dict(type='list', aliases=['exclude'], elements='str'),
contains=dict(type='str'),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
size=dict(type='str'),
recurse=dict(type='bool', default=False),
hidden=dict(type='bool', default=False),
follow=dict(type='bool', default=False),
get_checksum=dict(type='bool', default=False),
use_regex=dict(type='bool', default=False),
depth=dict(type='int'),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
for root, dirs, files in os.walk(npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if params['depth']:
wpath = npath.rstrip(os.path.sep) + os.path.sep
depth = int(fsname.count(os.path.sep)) - int(wpath.count(os.path.sep)) + 1
if depth > params['depth']:
continue
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except Exception:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
if stat.S_ISREG(st.st_mode) and params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
|
indrajitr/ansible
|
lib/ansible/modules/find.py
|
Python
|
gpl-3.0
| 16,313
|
[
"Brian"
] |
0ccef32e9fa6c1c6a7acd02f792f51b37090a21aabf6910a4cffd8e4071d1d09
|
# Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, angle, count_nonzero)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://hdl.handle.net/1877/438, 2006.
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
Notes
-----
The return values from bayes_mvs(data) is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic is the unique symmetric unbiased estimator of the nth
cumulant kappa_n.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
Notes
-----
The cumulants are related to central moments but are specifically defined
using a power series expansion of the logarithm of the characteristic
function (which is the Fourier transform of the PDF).
In particular let phi(t) be the characteristic function, then::
ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf)
The first few cumulants (kappa_n) in terms of central moments (mu_n) are::
kappa_1 = mu_1
kappa_2 = mu_2
kappa_3 = mu_3
kappa_4 = mu_4 - 3*mu_2**2
kappa_5 = mu_5 - 10*mu_2 * mu_3
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = zeros(n + 1, 'd')
data = ravel(data)
N = len(data)
for k in range(1, n + 1):
S[k] = sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> stats.probplot(x, plot=ax1)
>>> stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=np.float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=np.float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
"""
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn(str(ifault))
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [sum(1.0/(1+tmp2), axis=0) - 0.5*N,
sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * sum(a1[find:], axis=0) / total
else:
pval = 2.0 * sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
"""
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = sum(Ni, axis=0)
spsq = sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
def fligner(*args, **kwds):
"""
Perform Fligner's test for equal variances.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner's test is
non-parametric in contrast to Bartlett's test `bartlett` and
Levene's test `levene`.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
Xsq : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return Xsq, pval
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j * ang), axis=axis))
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/scipy/stats/morestats.py
|
Python
|
gpl-2.0
| 82,400
|
[
"Gaussian"
] |
e6523048d1035ffcfe6c439996cf2a076f8ae64eb40d554cb8ba1b1c51d45478
|
import sys
sys.path.append('/usr/local/lib/python2.7/dist-packages')
from flask import Flask, jsonify, render_template, request, abort
import nest
import lib.helpers as hh
app = Flask(__name__)
trusted_proxies = ('127.0.0.1','132.230.177.59')
@app.before_request
def limit_remote_addr():
if request.remote_addr not in trusted_proxies:
abort(403) # Forbidden
@app.template_filter('stringify')
def stringify_filter(s):
return s.replace('_',' ')
@app.route('/')
def init():
# import pdb;pdb.set_trace()
nest.ResetKernel()
global neuron
neuron = nest.Create('iaf_neuron', params={'C_m': 250., 'tau_m': 10.})
global input
input = nest.Create('noise_generator', params={'mean':250., 'std':250.})
nest.Connect(input,neuron)
global vm
vm = nest.Create('voltmeter')
nest.Connect(vm,neuron)
nest.Simulate(1000.)
events = nest.GetStatus(vm,'events')[0]
nest.SetStatus(vm, {'n_events': 0})
# return render_template('iaf_neuron_d3-slider.html', data=hh.prep_single([events['times'], events['V_m']]))
return render_template('iaf_neuron_jquery-slider.html', data=hh.prep_single([events['times'], events['V_m']]))
@app.route('/simulate/', methods=['POST'])
def simulate():
values = request.get_json()
nest.SetStatus(input, {'mean':float(values['mean']),'std':float(values['std'])})
nest.SetStatus(neuron, {'C_m':float(values['C_m']),'tau_m':float(values['tau_m'])})
nest.Simulate(1.)
events = nest.GetStatus(vm,'events')[0]
nest.SetStatus(vm, {'n_events': 0})
return jsonify(data=hh.prep_single([events['times'], events['V_m']]))
if __name__ == '__main__':
if len(sys.argv) > 1:
app.run(sys.argv[1])
else:
app.run()
|
babsey/nest-webapp
|
iaf_neuron.py
|
Python
|
mit
| 1,747
|
[
"NEURON"
] |
b1be39f1d22f2c7e21b1dc8454ff84b0d8a936aed5ec0d0de0a4461d806c45c1
|
from aloe import step, world
from aloe_webdriver.util import find_any_field, find_field_by_value
from aloe_webdriver import TEXT_FIELDS
from selenium.common.exceptions import NoSuchElementException
from user.models import UserProfile, CATEGORY, ENGINEERING, Settings
from booking.models import Booking, Place, BookTime, Building, date_range, Tag
from django.contrib.auth.models import User
from django.test import Client
from django.core.management import call_command
from datetime import timedelta, datetime
from dateutil import parser
from sas.basic import Configuration
@step(r'I type in "(.*)" to "(.*)"')
def fill_bootstrap_field(step, text, field):
words_list = field.lower().split()
words_list.insert(0, "id")
id_field = "_".join(words_list)
date_field = find_any_field(world.browser, TEXT_FIELDS, id_field)
date_field.send_keys(text)
@step(r'I type in "(.*)" to id "(.*)"')
def fill_bootstrap_field(step, text, id_field):
date_field = find_any_field(world.browser, TEXT_FIELDS, id_field)
date_field.send_keys(text)
@step(r'I click on an element with id of "(.*)"')
def click_on_element_by_id(step, id):
try:
elem = world.browser.find_element_by_id(id)
except NoSuchElementException:
raise AssertionError("Element with ID '{}' not found.".format(id))
elem.click()
@step(r'I click on an element "(.*)" called "(.*)"')
def click_on_element_by_value(step, value, typeelement):
try:
text = find_field_by_value(world.browser, typeelement)
except NoSuchElementException:
raise AssertionError("Element not found.")
text.click()
@step(r'I register the user "(.*)" with the password "(.*)" and registration number "(.*)" and engineering "(.*)" and category "(.*)"')
def register_user(step, username, password, registration_number, engineering, category):
user = UserProfile()
user.user = User()
user.registration_number = registration_number
user.user.email = username
user.user.username = username
user.user.first_name = "Usuário"
user.user.set_password(password)
user.save()
user.make_as_academic_staff()
for number,engineering_type in ENGINEERING:
if engineering_type == engineering:
user.engineering = engineering
for number,category_type in CATEGORY:
if category_type == category:
user.category = number
user.save()
@step(r'I register the user "(.*)" with the password "(.*)" and registration number "(.*)"')
def register_user(step, username, password, registration_number):
user = UserProfile()
user.user = User()
user.registration_number = registration_number
user.user.email = username
user.user.username = username
user.user.first_name = "Usuário"
user.user.set_password(password)
user.save()
user.make_as_academic_staff()
@step(r'I register the user "(.*)" with the password "(.*)" and registration number "(.*)" and category "(.*)"')
def register_user(step, username, password, registration_number, category):
user = UserProfile()
user.user = User()
user.registration_number = registration_number
user.user.email = username
user.user.username = username
user.user.first_name = "Usuário"
user.user.set_password(password)
user.save()
user.make_as_academic_staff()
for number,category_type in CATEGORY:
if category_type == category:
user.category = number
user.save()
@step(r'I load a semester')
def register_semester(step):
settings = Settings()
settings.start_semester = datetime.strptime("21092017", "%d%m%Y")
settings.end_semester = datetime.strptime("22092018", "%d%m%Y")
settings.save()
@step(r'I register an admin with email "(.*)" and password "(.*)" and registration number "(.*)" and category "(.*)"')
def register_admin(step, username, password, registration_number, category):
user = UserProfile()
user.user = User()
user.registration_number = registration_number
user.user.email = username
user.user.username = username
user.user.first_name = "Usuário"
user.user.set_password(password)
user.save()
for number, category_type in CATEGORY:
if category_type == category:
user.category = number
user.make_as_admin()
user.save()
@step(r'I register the booking "(.*)" with the building "(.*)" with the place name "(.*)" and start_date "(.*)" and end_date "(.*)" of user "(.*)"')
def new_booking(step, booking_name, building, place_name, start_date, end_date, username):
booking = Booking()
booking.user = User()
booking.user = User.objects.get(username=username)
booking.name = booking_name
booking.start_date = start_date
booking.end_date = end_date
booking.place = Place()
booking.place.name = place_name
booking.place.building = Building()
booking.place.building.name = building
booking.save()
for day in range(0, 10):
book = BookTime()
book.date_booking = parser.parse(start_date) + timedelta(days=day)
book.start_hour = "20:00"
book.end_hour = "22:00"
book.save()
booking.time.add(book)
booking.save()
@step(r'I register the booking "(.*)" with the building "(.*)" with the place name "(.*)" and start_date "(.*)" and end_date "(.*)" of responsible "(.*)"')
def new_booking(step, booking_name, building, place_name, start_date, end_date, responsible):
booking = Booking()
booking.user = User()
booking.user = User.objects.get(username=responsible)
booking.name = booking_name
booking.start_date = start_date
booking.end_date = end_date
booking.place = Place()
booking.place.name = place_name
booking.place.building = Building()
booking.place.building.name = building
booking.responsible = responsible
booking.save()
tag_o = Tag(name="Software")
tag_o.save()
booking.tags.add(tag_o)
for day in range(0, 10):
book = BookTime()
book.date_booking = parser.parse(start_date) + timedelta(days=day)
book.start_hour = "20:00"
book.end_hour = "22:00"
book.save()
booking.time.add(book)
booking.save()
@step(r'I register the tagged booking "(.*)" with the building "(.*)" with the place name "(.*)" and start_date "(.*)" and end_date "(.*)" of user "(.*)" and tag "(.*)"')
def new_tagged_booking(step, booking_name, building, place_name, start_date, end_date, username, tag):
booking = Booking()
booking.user = User()
booking.user = User.objects.get(username=username)
booking.name = booking_name
booking.start_date = start_date
booking.end_date = end_date
booking.place = Place()
booking.place.name = place_name
booking.place.building = Building()
booking.place.building.name = building
booking.responsible = username
booking.save()
tag_o = Tag(name=tag)
tag_o.save()
booking.tags.add(tag_o)
for day in range(0, 10):
book = BookTime()
book.date_booking = parser.parse(start_date) + timedelta(days=day)
book.start_hour = "20:00"
book.end_hour = "22:00"
book.save()
booking.time.add(book)
booking.save()
@step(r'I login in with email "(.*)" and password "(.*)"')
def login_user(step, email, password):
step.given("I visit site page \"/\"")
c = Client()
response = c.login(username=email, password=password)
cookies = {}
for co in c.cookies.values():
cookies['name'] = co.key
cookies['value'] = co.value
world.browser.add_cookie(cookies)
world.browser.refresh()
@step(r'I run loaddata to populate dropdowns')
def run_command_line(step):
call_command('loaddata', 'buildings', 'places')
call_command('loaddata', 'user/fixtures/group.json')
call_command('loaddata', 'user/fixtures/users.json')
call_command('loaddata', 'user/fixtures/userProfiles.json')
call_command('loaddata', 'booking/fixtures/bookTimes.json')
call_command('loaddata', 'booking/fixtures/bookings.json')
@step(r'I create bookings')
def create_bookings(step):
for b in Booking.objects.all():
b.delete()
conf = Configuration()
conf.create_bookings()
|
fga-gpp-mds/2016.2-SAS_FGA
|
sas/sas/steps/steps.py
|
Python
|
gpl-3.0
| 8,231
|
[
"VisIt"
] |
4bae468d41546aa09a2c53d056b678189bb656bc20f5a29307ead79fd55e635f
|
""" Contains unit tests of NetworkAgent module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import sys
import DIRAC.AccountingSystem.Agent.NetworkAgent as module
from mock.mock import MagicMock
__RCSID__ = "$Id$"
MQURI1 = "mq.dirac.net::Topics::perfsonar.summary.packet-loss-rate"
MQURI2 = "mq.dirac.net::Queues::perfsonar.summary.histogram-owdelay"
ROOT_PATH = "/Resources/Sites"
SITE1 = "LCG.Dirac.net"
SITE2 = "LCG.DiracToRemove.net"
SITE3 = "VAC.DiracToAdd.org"
SITE1_HOST1 = "perfsonar.diracold.net"
SITE1_HOST2 = "perfsonar-to-disable.diracold.net"
SITE2_HOST1 = "perfsonar.diractoremove.net"
SITE3_HOST1 = "perfsonar.diractoadd.org"
INITIAL_CONFIG = {
"%s/LCG/%s/Network/%s/Enabled" % (ROOT_PATH, SITE1, SITE1_HOST1): "True",
"%s/LCG/%s/Network/%s/Enabled" % (ROOT_PATH, SITE1, SITE1_HOST2): "True",
"%s/LCG/%s/Network/%s/Enabled" % (ROOT_PATH, SITE2, SITE2_HOST1): "True",
}
UPDATED_CONFIG = {
"%s/LCG/%s/Network/%s/Enabled" % (ROOT_PATH, SITE1, SITE1_HOST1): "True",
"%s/LCG/%s/Network/%s/Enabled" % (ROOT_PATH, SITE1, SITE1_HOST2): "False",
"%s/LCG/%s/Network/%s/Enabled" % (ROOT_PATH, SITE3, SITE3_HOST1): "True",
}
class NetworkAgentSuccessTestCase(unittest.TestCase):
"""Test class to check success scenarios."""
def setUp(self):
# external dependencies
module.datetime = MagicMock()
# internal dependencies
module.S_ERROR = MagicMock()
module.S_OK = MagicMock()
module.gLogger = MagicMock()
module.AgentModule = MagicMock()
module.Network = MagicMock()
module.gConfig = MagicMock()
module.CSAPI = MagicMock()
module.createConsumer = MagicMock()
# prepare test object
module.NetworkAgent.__init__ = MagicMock(return_value=None)
module.NetworkAgent.am_getOption = MagicMock(return_value=100) # buffer timeout
self.agent = module.NetworkAgent()
self.agent.initialize()
@classmethod
def tearDownClass(cls):
sys.modules.pop("DIRAC.AccountingSystem.Agent.NetworkAgent")
def test_updateNameDictionary(self):
module.gConfig.getConfigurationTree.side_effect = [
{"OK": True, "Value": INITIAL_CONFIG},
{"OK": True, "Value": UPDATED_CONFIG},
]
# check if name dictionary is empty
self.assertFalse(self.agent.nameDictionary)
self.agent.updateNameDictionary()
self.assertEqual(self.agent.nameDictionary[SITE1_HOST1], SITE1)
self.assertEqual(self.agent.nameDictionary[SITE1_HOST2], SITE1)
self.assertEqual(self.agent.nameDictionary[SITE2_HOST1], SITE2)
self.agent.updateNameDictionary()
self.assertEqual(self.agent.nameDictionary[SITE1_HOST1], SITE1)
self.assertEqual(self.agent.nameDictionary[SITE3_HOST1], SITE3)
# check if hosts were removed form dictionary
self.assertRaises(KeyError, lambda: self.agent.nameDictionary[SITE1_HOST2])
self.assertRaises(KeyError, lambda: self.agent.nameDictionary[SITE2_HOST1])
def test_agentExecute(self):
module.NetworkAgent.am_getOption.return_value = "%s, %s" % (MQURI1, MQURI2)
module.gConfig.getConfigurationTree.return_value = {"OK": True, "Value": INITIAL_CONFIG}
# first run
result = self.agent.execute()
self.assertTrue(result["OK"])
# second run (simulate new messages)
self.agent.messagesCount += 10
result = self.agent.execute()
self.assertTrue(result["OK"])
# third run (no new messages - restart consumers)
result = self.agent.execute()
self.assertTrue(result["OK"])
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(NetworkAgentSuccessTestCase)
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
ic-hep/DIRAC
|
src/DIRAC/AccountingSystem/Agent/test/Test_NetworkAgent.py
|
Python
|
gpl-3.0
| 3,928
|
[
"DIRAC"
] |
9237d38d7961ca8ebd894bcf4eaa4d8fa1cfe5a1ed21100a80b414a006be2969
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import JTensor
from bigdl.nn.layer import Layer
import numpy as np
if sys.version >= '3':
long = int
unicode = str
class Criterion(JavaValue):
"""
Criterion is helpful to train a neural network.
Given an input and a target, they compute a gradient according to a given loss function.
"""
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
def forward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding loss of the criterion,
compared with `target`
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: value of loss
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionForward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return output
def backward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the criterion, with respect to the given input.
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: ndarray
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionBackward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return Layer.convert_output(output)
@classmethod
def of(cls, jcriterion, bigdl_type="float"):
"""
Create a python Criterion by a java criterion object
:param jcriterion: A java criterion object which created by Py4j
:return: a criterion.
"""
criterion = Criterion(bigdl_type, jcriterion)
criterion.value = jcriterion
criterion.bigdl_type = bigdl_type
return criterion
class ClassNLLCriterion(Criterion):
'''
The negative log likelihood criterion. It is useful to train a classification problem with n
classes. If provided, the optional argument weights should be a 1D Tensor assigning weight to
each of the classes. This is particularly useful when you have an unbalanced training set.
The input given through a forward() is expected to contain log-probabilities/probabilities of
each class: input has to be a 1D Tensor of size n. Obtaining log-probabilities/probabilities
in a neural network is easily achieved by adding a LogSoftMax/SoftMax layer in the last layer
of your neural network. You may use CrossEntropyCriterion instead, if you prefer not to add an
extra layer to your network. This criterion expects a class index (1 to the number of class) as
target when calling forward(input, target) and backward(input, target).
In the log-probabilities case,
The loss can be described as:
loss(x, class) = -x[class]
or in the case of the weights argument it is specified as follows:
loss(x, class) = -weights[class] * x[class]
Due to the behaviour of the backend code, it is necessary to set sizeAverage to false when
calculating losses in non-batch mode.
Note that if the target is `-1`, the training process will skip this sample.
In other will, the forward process will return zero output and the backward process
will also return zero `gradInput`.
By default, the losses are averaged over observations for each minibatch. However, if the field
sizeAverage is set to false, the losses are instead summed for each minibatch.
In particular, when weights=None, size_average=True and logProbAsInput=False, this is same as
`sparse_categorical_crossentropy` loss in keras.
:param weights: weights of each class
:param size_average: whether to average or not
:param logProbAsInput: indicating whether to accept log-probabilities or probabilities as input.
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> classNLLCriterion = ClassNLLCriterion(weights, True, True)
creating: createClassNLLCriterion
>>> classNLLCriterion = ClassNLLCriterion()
creating: createClassNLLCriterion
'''
def __init__(self,
weights=None,
size_average=True,
logProbAsInput=True,
bigdl_type="float"):
super(ClassNLLCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average, logProbAsInput)
class MSECriterion(Criterion):
'''
Creates a criterion that measures the mean squared error between n elements
in the input x and output y:
```
loss(x, y) = 1/n \sum |x_i - y_i|^2
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The two Tensors must have the same number of elements (but their sizes might be different).
The division by n can be avoided if one sets the internal variable sizeAverage to false.
By default, the losses are averaged over observations for each minibatch. However,
if the field sizeAverage is set to false, the losses are instead summed.
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
'''
def __init__(self, bigdl_type="float"):
super(MSECriterion, self).__init__(None, bigdl_type)
class AbsCriterion(Criterion):
'''
measures the mean absolute value of the element-wise difference between input
>>> absCriterion = AbsCriterion(True)
creating: createAbsCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(AbsCriterion, self).__init__(None, bigdl_type,
size_average)
class ClassSimplexCriterion(Criterion):
'''
ClassSimplexCriterion implements a criterion for classification.
It learns an embedding per class, where each class' embedding is a
point on an (N-1)-dimensional simplex, where N is the number of classes.
:param nClasses: the number of classes.
>>> classSimplexCriterion = ClassSimplexCriterion(2)
creating: createClassSimplexCriterion
'''
def __init__(self,
n_classes,
bigdl_type="float"):
super(ClassSimplexCriterion, self).__init__(None, bigdl_type,
n_classes)
class CosineDistanceCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input and target,
Loss = 1 - cos(x, y)
>>> cosineDistanceCriterion = CosineDistanceCriterion(True)
creating: createCosineDistanceCriterion
>>> cosineDistanceCriterion.forward(np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0]))
0.07272728
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(CosineDistanceCriterion, self).__init__(None, bigdl_type,
size_average)
class CosineEmbeddingCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a Tensor label y with values 1 or -1.
:param margin: a number from -1 to 1, 0 to 0.5 is suggested
>>> cosineEmbeddingCriterion = CosineEmbeddingCriterion(1e-5, True)
creating: createCosineEmbeddingCriterion
>>> cosineEmbeddingCriterion.forward([np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0])],
... [np.ones(5)])
0.0
"""
def __init__(self,
margin=0.0,
size_average=True,
bigdl_type="float"):
super(CosineEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class DistKLDivCriterion(Criterion):
'''
The Kullback-Leibler divergence criterion
:param sizeAverage:
>>> distKLDivCriterion = DistKLDivCriterion(True)
creating: createDistKLDivCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(DistKLDivCriterion, self).__init__(None, bigdl_type,
size_average)
class CategoricalCrossEntropy(Criterion):
"""
This criterion is same with cross entropy criterion, except it takes a one-hot format target
tensor
>>> cce = CategoricalCrossEntropy()
creating: createCategoricalCrossEntropy
"""
def __init__(self, bigdl_type="float"):
super(CategoricalCrossEntropy, self).__init__(None, bigdl_type)
class HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an
input x which is a 1-dimensional vector and a label y (1 or -1).
This is usually used for measuring whether two inputs are similar
or dissimilar,
e.g. using the L1 pairwise distance, and is typically used for
learning nonlinear embeddings or semi-supervised learning.
If x and y are n-dimensional Tensors, the sum operation still operates
over all the elements, and divides by n (this can be avoided if one sets
the internal variable sizeAverage to false). The margin has a default
value of 1, or can be set in the constructor.
>>> hingeEmbeddingCriterion = HingeEmbeddingCriterion(1e-5, True)
creating: createHingeEmbeddingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class L1HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a label y (1 or -1):
:param margin:
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion(1e-5)
creating: createL1HingeEmbeddingCriterion
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion()
creating: createL1HingeEmbeddingCriterion
>>> input1 = np.array([2.1, -2.2])
>>> input2 = np.array([-0.55, 0.298])
>>> input = [input1, input2]
>>> target = np.array([1.0])
>>> result = l1HingeEmbeddingCriterion.forward(input, target)
>>> (result == 5.148)
True
'''
def __init__(self,
margin=1.0,
bigdl_type="float"):
super(L1HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin)
class MarginCriterion(Criterion):
'''
Creates a criterion that optimizes a two-class classification hinge loss (margin-based loss)
between input x (a Tensor of dimension 1) and output y.
When margin = 1, size_average = True and squared = False, this is the same as hinge loss in keras;
When margin = 1, size_average = False and squared = True, this is the same as squared_hinge loss in keras.
:param margin: if unspecified, is by default 1.
:param size_average: size average in a mini-batch
:param squared: whether to calculate the squared hinge loss
>>> marginCriterion = MarginCriterion(1e-5, True, False)
creating: createMarginCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
squared=False,
bigdl_type="float"):
super(MarginCriterion, self).__init__(None, bigdl_type,
margin,
size_average,
squared)
class MarginRankingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors of size 1 (they contain only scalars), and a label y (1 or -1).
In batch mode, x is a table of two Tensors of size batchsize, and y is a Tensor of size
batchsize containing 1 or -1 for each corresponding pair of elements in the input Tensor.
If y == 1 then it assumed the first input should be ranked higher (have a larger value) than
the second input, and vice-versa for y == -1.
:param margin:
>>> marginRankingCriterion = MarginRankingCriterion(1e-5, True)
creating: createMarginRankingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MarginRankingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class MultiCriterion(Criterion):
'''
a weighted sum of other criterions each applied to the same input and target
>>> multiCriterion = MultiCriterion()
creating: createMultiCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> multiCriterion = multiCriterion.add(mSECriterion)
>>> multiCriterion = multiCriterion.add(mSECriterion)
'''
def __init__(self,
bigdl_type="float"):
super(MultiCriterion, self).__init__(None, bigdl_type)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class MultiLabelMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class multi-classification hinge loss (
margin-based loss) between input x and output y (which is a Tensor of target class indices)
:param size_average: size average in a mini-batch
>>> multiLabelMarginCriterion = MultiLabelMarginCriterion(True)
creating: createMultiLabelMarginCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(MultiLabelMarginCriterion, self).__init__(None, bigdl_type,
size_average)
class ParallelCriterion(Criterion):
'''
ParallelCriterion is a weighted sum of other criterions each applied to a different input
and target. Set repeatTarget = true to share the target for criterions.
Use add(criterion[, weight]) method to add criterion. Where weight is a scalar(default 1).
:param repeat_target: Whether to share the target for all criterions.
>>> parallelCriterion = ParallelCriterion(True)
creating: createParallelCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
'''
def __init__(self,
repeat_target=False,
bigdl_type="float"):
super(ParallelCriterion, self).__init__(None, bigdl_type,
repeat_target)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class KLDCriterion(Criterion):
'''
Computes the KL-divergence of the input normal distribution to a standard normal distribution.
The input has to be a table. The first element of input is the mean of the distribution,
the second element of input is the log_variance of the distribution. The input distribution is
assumed to be diagonal.
>>> KLDCriterion = KLDCriterion(True)
creating: createKLDCriterion
'''
def __init__(self, size_average=True, bigdl_type="float"):
super(KLDCriterion, self).__init__(None, bigdl_type, size_average)
class GaussianCriterion(Criterion):
'''
Computes the log-likelihood of a sample x given a Gaussian distribution p.
>>> GaussianCriterion = GaussianCriterion()
creating: createGaussianCriterion
'''
def __init__(self, bigdl_type="float"):
super(GaussianCriterion, self).__init__(None, bigdl_type)
class SmoothL1Criterion(Criterion):
'''
Creates a criterion that can be thought of as a smooth version of the AbsCriterion.
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some
cases prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
| 0.5 * (x_i - y_i)^2^, if |x_i - y_i| < 1
loss(x, y) = 1/n \sum |
| |x_i - y_i| - 0.5, otherwise
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The division by n can be avoided if one sets the internal variable sizeAverage to false
:param size_average: whether to average the loss
>>> smoothL1Criterion = SmoothL1Criterion(True)
creating: createSmoothL1Criterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SmoothL1Criterion, self).__init__(None, bigdl_type,
size_average)
class SmoothL1CriterionWithWeights(Criterion):
'''
a smooth version of the AbsCriterion
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
d = (x - y) * w_in
loss(x, y, w_in, w_out)
| 0.5 * (sigma * d_i)^2 * w_out if |d_i| < 1 / sigma / sigma
= 1/n \sum |
| (|d_i| - 0.5 / sigma / sigma) * w_out otherwise
```
>>> smoothL1CriterionWithWeights = SmoothL1CriterionWithWeights(1e-5, 1)
creating: createSmoothL1CriterionWithWeights
'''
def __init__(self,
sigma,
num=0,
bigdl_type="float"):
super(SmoothL1CriterionWithWeights, self).__init__(None, bigdl_type,
sigma,
num)
class SoftmaxWithCriterion(Criterion):
'''
Computes the multinomial logistic loss for a one-of-many classification task,
passing real-valued predictions through a softmax to get a probability distribution over classes.
It should be preferred over separate SoftmaxLayer + MultinomialLogisticLossLayer
as its gradient computation is more numerically stable.
:param ignoreLabel: (optional) Specify a label value thatshould be ignored when computing the loss.
:param normalizeMode: How to normalize the output loss.
>>> softmaxWithCriterion = SoftmaxWithCriterion()
creating: createSoftmaxWithCriterion
>>> softmaxWithCriterion = SoftmaxWithCriterion(1, "FULL")
creating: createSoftmaxWithCriterion
'''
def __init__(self,
ignore_label=None,
normalize_mode="VALID",
bigdl_type="float"):
super(SoftmaxWithCriterion, self).__init__(None, bigdl_type,
ignore_label,
normalize_mode)
class TimeDistributedMaskCriterion(Criterion):
'''
This class is intended to support inputs with 3 or more dimensions.
Apply Any Provided Criterion to every temporal slice of an input.
In addition, it supports padding mask.
eg. if the target is [ [-1, 1, 2, 3, -1], [5, 4, 3, -1, -1] ],
and set the paddingValue property to -1, then the loss of -1 would not
be accumulated and the loss is only divided by 6 (ont including the amount of
-1, in this case, we are only interested in 1, 2, 3, 5, 4, 3)
:param criterion: embedded criterion
:param padding_value: padding value
>>> td = TimeDistributedMaskCriterion(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createTimeDistributedMaskCriterion
'''
def __init__(self, criterion, padding_value=0, bigdl_type="float"):
super(TimeDistributedMaskCriterion, self).__init__(
None, bigdl_type, criterion, padding_value)
class TimeDistributedCriterion(Criterion):
'''
This class is intended to support inputs with 3 or more dimensions.
Apply Any Provided Criterion to every temporal slice of an input.
:param criterion: embedded criterion
:param size_average: whether to divide the sequence length
>>> td = TimeDistributedCriterion(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createTimeDistributedCriterion
'''
def __init__(self, criterion, size_average=False, dimension=2, bigdl_type="float"):
super(TimeDistributedCriterion, self).__init__(
None, bigdl_type, criterion, size_average, dimension)
class CrossEntropyCriterion(Criterion):
"""
This criterion combines LogSoftMax and ClassNLLCriterion in one single class.
:param weights: A tensor assigning weight to each of the classes
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> cec = CrossEntropyCriterion(weights)
creating: createCrossEntropyCriterion
>>> cec = CrossEntropyCriterion()
creating: createCrossEntropyCriterion
"""
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(CrossEntropyCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(
weights),
size_average)
class BCECriterion(Criterion):
'''
Creates a criterion that measures the Binary Cross Entropy
between the target and the output
:param weights: weights for each class
:param sizeAverage: whether to average the loss or not
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> bCECriterion = BCECriterion(weights)
creating: createBCECriterion
>>> bCECriterion = BCECriterion()
creating: createBCECriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(BCECriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiLabelSoftMarginCriterion(Criterion):
'''
A MultiLabel multiclass criterion based on sigmoid:
the loss is:
```
l(x,y) = - sum_i y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i])
```
where p[i] = exp(x[i]) / (1 + exp(x[i]))
and with weights:
```
l(x,y) = - sum_i weights[i] (y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i]))
```
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion(weights)
creating: createMultiLabelSoftMarginCriterion
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion()
creating: createMultiLabelSoftMarginCriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(MultiLabelSoftMarginCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss)
between input x and output y (which is a target class index).
:param p:
:param weights:
:param margin:
:param size_average:
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiMarginCriterion = MultiMarginCriterion(1,weights)
creating: createMultiMarginCriterion
>>> multiMarginCriterion = MultiMarginCriterion()
creating: createMultiMarginCriterion
'''
def __init__(self,
p=1,
weights=None,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MultiMarginCriterion, self).__init__(None, bigdl_type,
p,
JTensor.from_ndarray(weights),
margin,
size_average)
class SoftMarginCriterion(Criterion):
"""
Creates a criterion that optimizes a two-class classification logistic loss
between input x (a Tensor of dimension 1) and output y (which is a tensor
containing either 1s or -1s).
```
loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x:nElement()
```
:param sizeaverage: The normalization by the number of elements in the inputcan be disabled by setting
>>> softMarginCriterion = SoftMarginCriterion(False)
creating: createSoftMarginCriterion
>>> softMarginCriterion = SoftMarginCriterion()
creating: createSoftMarginCriterion
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SoftMarginCriterion, self).__init__(None, bigdl_type, size_average)
class DiceCoefficientCriterion(Criterion):
'''
The Dice-Coefficient criterion
input: Tensor,target: Tensor
```
return: 2 * (input intersection target)
1 - ----------------------------------
input union target
```
>>> diceCoefficientCriterion = DiceCoefficientCriterion(size_average = True, epsilon = 1.0)
creating: createDiceCoefficientCriterion
>>> diceCoefficientCriterion = DiceCoefficientCriterion()
creating: createDiceCoefficientCriterion
'''
def __init__(self,
size_average=True,
epsilon=1.0,
bigdl_type="float"):
super(DiceCoefficientCriterion, self).__init__(None, bigdl_type,
size_average,
epsilon)
class L1Cost(Criterion):
'''
compute L1 norm for input, and sign of input
>>> l1Cost = L1Cost()
creating: createL1Cost
'''
def __init__(self,
bigdl_type="float"):
super(L1Cost, self).__init__(None, bigdl_type)
class CosineProximityCriterion(Criterion):
'''
compute the negative of the mean cosine proximity between predictions and targets.
```
x'(i) = x(i) / sqrt(max(sum(x(i)^2), 1e-12))
y'(i) = y(i) / sqrt(max(sum(x(i)^2), 1e-12))
cosine_proximity(x, y) = sum_i(-1 * x'(i) * y'(i))
```
>>> cosineProximityCriterion = CosineProximityCriterion()
creating: createCosineProximityCriterion
'''
def __init__(self,
bigdl_type="float"):
super(CosineProximityCriterion, self).__init__(None, bigdl_type)
class MeanAbsolutePercentageCriterion(Criterion):
'''
This method is same as `mean_absolute_percentage_error` loss in keras.
It caculates diff = K.abs((y - x) / K.clip(K.abs(y), K.epsilon(), Double.MaxValue))
and return 100 * K.mean(diff) as output. Here, the x and y can have or not have a batch.
>>> error = MeanAbsolutePercentageCriterion()
creating: createMeanAbsolutePercentageCriterion
'''
def __init__(self,
bigdl_type="float"):
super(MeanAbsolutePercentageCriterion, self).__init__(None, bigdl_type)
class MeanSquaredLogarithmicCriterion(Criterion):
'''
This method is same as `mean_squared_logarithmic_error` loss in keras.
It calculates: first_log = K.log(K.clip(y, K.epsilon(), Double.MaxValue) + 1.)
second_log = K.log(K.clip(x, K.epsilon(), Double.MaxValue) + 1.)
and output K.mean(K.square(first_log - second_log)). Here, the x and y can have or not have a batch.
>>> error = MeanSquaredLogarithmicCriterion()
creating: createMeanSquaredLogarithmicCriterion
'''
def __init__(self,
bigdl_type="float"):
super(MeanSquaredLogarithmicCriterion, self).__init__(None, bigdl_type)
class KullbackLeiblerDivergenceCriterion(Criterion):
'''
compute Kullback Leibler DivergenceCriterion error for intput and target
This method is same as `kullback_leibler_divergence` loss in keras. Loss calculated as:
y_true = K.clip(input, K.epsilon(), 1)
y_pred = K.clip(target, K.epsilon(), 1)
and output K.sum(y_true * K.log(y_true / y_pred), axis=-1)
>>> error = KullbackLeiblerDivergenceCriterion()
creating: createKullbackLeiblerDivergenceCriterion
'''
def __init__(self,
bigdl_type="float"):
super(KullbackLeiblerDivergenceCriterion, self).__init__(None, bigdl_type)
class PoissonCriterion(Criterion):
'''
compute Poisson error for input and target, loss calculated as:
mean(input - target * K.log(input + K.epsilon()), axis=-1)
>>> error = PoissonCriterion()
creating: createPoissonCriterion
'''
def __init__(self,
bigdl_type="float"):
super(PoissonCriterion, self).__init__(None, bigdl_type)
class TransformerCriterion(Criterion):
'''
The criterion that takes two modules to transform input and target, and take
one criterion to compute the loss with the transformed input and target.
This criterion can be used to construct complex criterion. For example, the
`inputTransformer` and `targetTransformer` can be pre-trained CNN networks,
and we can use the networks' output to compute the high-level feature
reconstruction loss, which is commonly used in areas like neural style transfer
(https://arxiv.org/abs/1508.06576), texture synthesis (https://arxiv.org/abs/1505.07376),
.etc.
>>> trans = TransformerCriterion(MSECriterion())
creating: createMSECriterion
creating: createTransformerCriterion
'''
def __init__(self,
criterion,
input_transformer = None,
target_transformer = None,
bigdl_type="float"):
super(TransformerCriterion, self).__init__(None,
bigdl_type,
criterion,
input_transformer,
target_transformer)
class DotProductCriterion(Criterion):
'''
Compute the dot product of input and target tensor.
Input and target are required to have the same size.
:param size_average: whether to average over each observations in the same batch
>>> dp =DotProductCriterion(False)
creating: createDotProductCriterion
'''
def __init__(self,
size_average = False,
bigdl_type="float"):
super(DotProductCriterion, self).__init__(None,
bigdl_type,
size_average)
class PGCriterion(Criterion):
'''
The Criterion to compute the negative policy gradient given a
multinomial distribution and the sampled action and reward.
The input to this criterion should be a 2-D tensor representing
a batch of multinomial distribution, the target should also be
a 2-D tensor with the same size of input, representing the sampled
action and reward/advantage with the index of non-zero element in the vector
represents the sampled action and the non-zero element itself represents
the reward. If the action is space is large, you should consider using
SparseTensor for target.
The loss computed is simple the standard policy gradient,
loss = - 1/n * sum(R_{n} dot_product log(P_{n}))
where R_{n} is the reward vector, and P_{n} is the input distribution.
:param sizeAverage whether to average over each observations in the same batch
>>> pg = PGCriterion()
creating: createPGCriterion
'''
def __init__(self,
sizeAverage = False,
bigdl_type="float"):
super(PGCriterion, self).__init__(None,
bigdl_type,
sizeAverage)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import criterion
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = criterion.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test criterion",
conf=create_spark_conf())
globs['sc'] = sc
init_engine()
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
wzhongyuan/BigDL
|
pyspark/bigdl/nn/criterion.py
|
Python
|
apache-2.0
| 35,190
|
[
"Gaussian"
] |
d56097baa3540cb854babfafa36c584662e21f20c4696ecb9ea6269e8ef98087
|
""" The CS! (Configuration Service)
"""
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.private.ServiceInterface import ServiceInterface
from DIRAC.Core.DISET.RequestHandler import RequestHandler, getServiceOption
from DIRAC.WorkloadManagementSystem.Utilities.PilotCStoJSONSynchronizer import PilotCStoJSONSynchronizer
gServiceInterface = False
__RCSID__ = "$Id$"
def initializeConfigurationHandler( serviceInfo ):
global gServiceInterface
gServiceInterface = ServiceInterface( serviceInfo[ 'URL' ] )
return S_OK()
class ConfigurationHandler( RequestHandler ):
""" The CS handler
"""
@classmethod
def initializeHandler( cls, _serviceInfo ):
"""
Handler class initialization
"""
# Check the flag for updating the pilot 3 JSON file
cls.updatePilotJSONFile = cls.srv_getCSOption( 'UpdatePilotCStoJSONFile', False )
if cls.updatePilotJSONFile:
cls.paramDict = {}
cls.paramDict['pilotFileServer'] = getServiceOption( _serviceInfo, "pilotFileServer", '' )
cls.paramDict['pilotRepo'] = getServiceOption( _serviceInfo, "pilotRepo", '' )
cls.paramDict['pilotVORepo'] = getServiceOption( _serviceInfo, "pilotVORepo", '' )
cls.paramDict['projectDir'] = getServiceOption( _serviceInfo, "projectDir", '' )
cls.paramDict['pilotVOScriptPath'] = getServiceOption( _serviceInfo, "pilotVOScriptPath", '' )
cls.paramDict['pilotScriptsPath'] = getServiceOption( _serviceInfo, "pilotScriptsPath", '' )
return S_OK( 'Initialization went well' )
types_getVersion = []
def export_getVersion( self ):
return S_OK( gServiceInterface.getVersion() )
types_getCompressedData = []
def export_getCompressedData( self ):
sData = gServiceInterface.getCompressedConfigurationData()
return S_OK( sData )
types_getCompressedDataIfNewer = [ basestring ]
def export_getCompressedDataIfNewer( self, sClientVersion ):
sVersion = gServiceInterface.getVersion()
retDict = { 'newestVersion' : sVersion }
if sClientVersion < sVersion:
retDict[ 'data' ] = gServiceInterface.getCompressedConfigurationData()
return S_OK( retDict )
types_publishSlaveServer = [ basestring ]
def export_publishSlaveServer( self, sURL ):
gServiceInterface.publishSlaveServer( sURL )
return S_OK()
types_commitNewData = [ basestring ]
def export_commitNewData( self, sData ):
credDict = self.getRemoteCredentials()
if not 'DN' in credDict or not 'username' in credDict:
return S_ERROR( "You must be authenticated!" )
res = gServiceInterface.updateConfiguration( sData, credDict[ 'username' ] )
if self.updatePilotJSONFile:
if not res['OK']:
return res
return PilotCStoJSONSynchronizer( self.paramDict ).sync()
else:
return res
types_writeEnabled = []
def export_writeEnabled( self ):
return S_OK( gServiceInterface.isMaster() )
types_getCommitHistory = []
def export_getCommitHistory( self, limit = 100 ):
if limit > 100:
limit = 100
history = gServiceInterface.getCommitHistory()
if limit:
history = history[ :limit ]
return S_OK( history )
types_getVersionContents = [ list ]
def export_getVersionContents( self, versionList ):
contentsList = []
for version in versionList:
retVal = gServiceInterface.getVersionContents( version )
if retVal[ 'OK' ]:
contentsList.append( retVal[ 'Value' ] )
else:
return S_ERROR( "Can't get contents for version %s: %s" % ( version, retVal[ 'Message' ] ) )
return S_OK( contentsList )
types_rollbackToVersion = [ basestring ]
def export_rollbackToVersion( self, version ):
retVal = gServiceInterface.getVersionContents( version )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't get contents for version %s: %s" % ( version, retVal[ 'Message' ] ) )
credDict = self.getRemoteCredentials()
if not 'DN' in credDict or not 'username' in credDict:
return S_ERROR( "You must be authenticated!" )
return gServiceInterface.updateConfiguration( retVal[ 'Value' ],
credDict[ 'username' ],
updateVersionOption = True )
|
hgiemza/DIRAC
|
ConfigurationSystem/Service/ConfigurationHandler.py
|
Python
|
gpl-3.0
| 4,258
|
[
"DIRAC"
] |
9e3378d9878787ebf85e15279a6c96131deaa1ef06d6be0fc668752e8df6faad
|
from BoloMass.Arnett82 import tau_0, Lambda, A82LC_full, A82LC_gp
from RetroSpect.Plotting import color_ramp
import sys
import numpy as np
import matplotlib.pyplot as pypl
def test_A82_Lambda():
"""Test plots for Lambda"""
y = np.arange(0.7, 1.41, 0.1)
c = color_ramp(len(y))
for yi, ci in zip(y, c):
pypl.semilogy(t, Lambda(t, yi), color=ci)
pypl.show()
def test_A82LC_full_01():
"""Test plots for A82LC_full"""
y, tg, MNi, Eth0 = 1.0, 40.0, 0.6, 0.0e+51
R0 = np.array([0.0, 0.1, 0.3, 1.0, 3.0, 10.0]) * 1e+14
c = color_ramp(len(R0))
for R0i, ci in zip(R0, c):
td = tau_0(R0i, 0.1, 2.8e+33)
L0, w = Eth0/(td * 86400), y*17.6/td
print "R0, tau0, L0, w =", R0i, td, L0, w
pypl.subplot(2, 1, 1)
pypl.plot(t, A82LC_full(t, y, w, tg, MNi, Eth0), color=ci)
pypl.subplot(2, 1, 2)
pypl.semilogy(t, A82LC_full(t, y, w, tg, MNi, Eth0), color=ci)
pypl.show()
def test_A82LC_full_02():
"""More test plots for A82LC_full"""
y, tg, MNi, R0 = 1.0, 40.0, 0.6, 1e+13
Eth0 = np.arange(0.0, 0.51, 0.1) * 1e+51
c = color_ramp(len(Eth0))
for Ethi, ci in zip(Eth0, c):
td = tau_0(R0, 0.1, 2.8e+33)
L0, w = Ethi/(td * 86400), y*17.6/td
print "R0, tau0, L0, w =", R0, td, L0, w
pypl.subplot(2, 1, 1)
pypl.plot(t, A82LC_full(t, y, w, tg, MNi, Ethi), color=ci)
pypl.subplot(2, 1, 2)
pypl.semilogy(t, A82LC_full(t, y, w, tg, MNi, Ethi), color=ci)
pypl.show()
def test_A82LC_gp():
"""Test plots for the Gaussian process stuff"""
# Set up a Gaussian process interpolator
gpint = A82LC_gp("a82lcgp_4d_alt.pkl")
t = np.arange(0.0, 120.1, 0.5)
test_resids = True
def my_plot_set(p, c, l):
res = [ ]
for pi, ci, li in zip(p, c, l):
gpfit = gpint(t, pi)
pypl.semilogy(t, gpfit, color=ci, label=li)
if test_resids:
orig = A82LC_full(t, *pi)
else:
orig = gpfit
pypl.semilogy(t, orig, color=ci, ls='--')
# calculate residuals
res.append((orig - gpfit)/orig)
res = np.array(res).ravel()
res = res[abs(res) < np.inf]
print "nmad, rms, max resids = {0:.4f}, {1:.4f}, {2:.4f};".format(
np.median(np.abs(res)), res.std(), np.abs(res).max()),
nok, ntot = np.sum(np.abs(res.ravel()) > 0.02), len(res.ravel())
fok = nok / (1.0*ntot)
print "fvals(res > 2\%) = {0}/{1} = {2:.2f}\%".format(
nok, ntot, 100.0*fok)
sys.stdout.flush()
pypl.legend()
pypl.show()
# Vary y
y = np.arange(0.7, 1.41, 0.05)
pars = [(yi, 0.0, 40.0, 0.6, 0.0) for yi in y]
colors = color_ramp(len(pars))
labels = ["y = {0:.2f}".format(yi) for yi in y]
print "varying y:",
my_plot_set(pars, colors, labels)
# Vary w with Eth0 = 0
w = np.arange(0.0, 0.26, 0.05)
pars = [(1.0, wi, 40.0, 0.6, 0.0) for wi in w]
colors = color_ramp(len(pars))
labels = ["w = {0:.2f}".format(wi) for wi in w]
print "varying w:",
my_plot_set(pars, colors, labels)
# Vary w with Eth0 = 0.5e+51 erg
w = np.arange(0.0, 0.26, 0.05)
pars = [(1.0, wi, 40.0, 0.6, 0.5e+51) for wi in w]
colors = color_ramp(len(pars))
labels = ["w = {0:.2f}".format(wi) for wi in w]
print "varying w:",
my_plot_set(pars, colors, labels)
# Vary tg
tg = np.arange(20.0, 70.1, 5.0)
pars = [(1.0, 0.0, tgi, 0.6, 0.0) for tgi in tg]
colors = color_ramp(len(pars))
labels = ["t$_\gamma$ = {0:.0f} days".format(tgi) for tgi in tg]
print "varying tg:",
my_plot_set(pars, colors, labels)
# test_A82_Lambda()
# test_A82LC_full_01()
# test_A82LC_full_02()
test_A82LC_gp()
|
rscalzo/pyBoloSN
|
Tests/test_A82.py
|
Python
|
mit
| 3,804
|
[
"Gaussian"
] |
650d2aa508c7ae014e5e3d42214f5f401e6445268e69ca1fecea80d2a3b241c9
|
import os
from setuptools import setup, find_packages
import distutils.command.build_py
from ece2cmor3 import __version__
name = "ece2cmor3"
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# Utility function to get the git hash, possibly with local changes flag
def get_git_hash():
import git
try:
repo = git.Repo(search_parent_directories=True)
sha = str(repo.head.object.hexsha)
if repo.is_dirty():
sha += "-changes"
except git.exc.InvalidGitRepositoryError:
sha = "local unknown branch"
return sha
# Overriden build command, appending git hash to version python file
class add_sha(distutils.command.build_py.build_py):
def run(self):
distutils.command.build_py.build_py.run(self)
if not self.dry_run:
filepath = os.path.join(self.build_lib, name, "__version__.py")
with open(filepath, "a") as version_file:
version_file.write("sha = \"{hash}\"\n".format(hash=get_git_hash()))
setup(name=name,
version=__version__.version,
author="Gijs van den Oord",
author_email="g.vandenoord@esciencecenter.nl",
description="CMORize and post-process EC-Earth output data",
license="Apache License, Version 2.0",
url="https://github.com/EC-Earth/ece2cmor3",
packages=find_packages(exclude=("tests", "examples")),
package_data={"ece2cmor3": ["resources/*.json",
"resources/*.xlsx",
"resources/*.txt",
"resources/tables/*.json",
"resources/lpjg-grid-content/*",
"resources/b2share-data/nemo-vertices-ORCA*.nc",
"resources/b2share-data/fx-sftlf-EC-Earth3-T*.nc",
"resources/metadata-templates/*.json",
"scripts/create-nemo-only-list/*.xlsx",
"resources/xios-nemo-file_def-files/basic*-file_def_nemo.xml",
"resources/miscellaneous-data-requests/*/*",
"resources/lists-of-omitted-variables/*.xlsx"]},
include_package_data=True,
long_description=read("README.md"),
entry_points={"console_scripts": [
"ece2cmor = ece2cmor3.ece2cmor:main",
"checkvars = ece2cmor3.scripts.checkvars:main",
"drq2ppt = ece2cmor3.scripts.drq2ppt:main",
"drq2file_def = ece2cmor3.scripts.drq2file_def:main",
"drq2ins = ece2cmor3.scripts.drq2ins:main",
"drq2varlist = ece2cmor3.scripts.drq2varlist:main",
"estimate_tm5_volume = ece2cmor3.scripts.estimate_tm5_volume:main",
"convert_component_to_flat_json = ece2cmor3.scripts.convert_component_to_flat_json:main",
"fixmonths = ece2cmor3.scripts.fixmonths:main",
"splitbalance = ece2cmor3.scripts.splitbalance:main"
]},
scripts=["ece2cmor3/scripts/genecec-per-mip-experiment.sh"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"License :: OSI Approved :: Apache Software License"
],
cmdclass={"build_py": add_sha}
)
|
goord/ece2cmor3
|
setup.py
|
Python
|
apache-2.0
| 3,554
|
[
"ORCA"
] |
462478c5e541dbbeaeebe1476aea9c279271ddc449a66828a1caab513786d760
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from monty.json import MSONable
from enum import Enum, unique
import numpy as np
"""
This module provides core classes needed by all define electronic structure,
such as the Spin, Orbital, etc.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
@unique
class Spin(Enum):
"""
Enum type for Spin. Only up and down.
Usage: Spin.up, Spin.down.
"""
up, down = (1, -1)
def __int__(self):
return self.value
def __float__(self):
return float(self.value)
def __str__(self):
return str(self.value)
@unique
class OrbitalType(Enum):
"""
Enum type for orbital type. Indices are basically the azimuthal quantum
number, l.
"""
s = 0
p = 1
d = 2
f = 3
def __str__(self):
return self.name
@unique
class Orbital(Enum):
"""
Enum type for specific orbitals. The indices are basically the order in
which the orbitals are reported in VASP and has no special meaning.
"""
s = 0
py = 1
pz = 2
px = 3
dxy = 4
dyz = 5
dz2 = 6
dxz = 7
dx2 = 8
f_3 = 9
f_2 = 10
f_1 = 11
f0 = 12
f1 = 13
f2 = 14
f3 = 15
def __int__(self):
return self.value
def __str__(self):
return self.name
@property
def orbital_type(self):
"""
Returns OrbitalType of an orbital.
"""
return OrbitalType[self.name[0]]
class Magmom(MSONable):
"""
New class in active development. Use with caution, feedback is
appreciated.
Class to handle magnetic moments. Defines the magnetic moment of a
site or species relative to a spin quantization axis. Designed for
use in electronic structure calculations.
* For the general case, Magmom can be specified by a vector,
e.g. m = Magmom([1.0, 1.0, 2.0]), and subscripts will work as
expected, e.g. m[0] gives 1.0
* For collinear calculations, Magmom can assumed to be scalar-like,
e.g. m = Magmom(5.0) will work as expected, e.g. float(m) gives 5.0
Both of these cases should be safe and shouldn't give any surprises,
but more advanced functionality is available if required.
There also exist useful static methods for lists of magmoms:
* Magmom.are_collinear(magmoms) - if true, a collinear electronic
structure calculation can be safely initialized, with float(Magmom)
giving the expected scalar magnetic moment value
* Magmom.get_consistent_set_and_saxis(magmoms) - for non-collinear
electronic structure calculations, a global, consistent spin axis
has to be used. This method returns a list of Magmoms which all
share a common spin axis, along with the global spin axis.
All methods that take lists of magmoms will accept magmoms either as
Magmom objects or as scalars/lists and will automatically convert to
a Magmom representation internally.
The following methods are also particularly useful in the context of
VASP calculations:
* Magmom.get_xyz_magmom_with_001_saxis()
* Magmom.get_00t_magmom_with_xyz_saxis()
See VASP documentation for more information:
https://cms.mpi.univie.ac.at/wiki/index.php/SAXIS
"""
def __init__(self, moment, saxis=(0, 0, 1)):
"""
:param moment: magnetic moment, supplied as float or list/np.ndarray
:param saxis: spin axis, supplied as list/np.ndarray, parameter will
be converted to unit vector (default is [0, 0, 1])
:return: Magmom object
"""
# to init from another Magmom instance
if isinstance(moment, Magmom):
saxis = moment.saxis
moment = moment.moment
moment = np.array(moment, dtype='d')
if moment.ndim == 0:
moment = moment * [0, 0, 1]
self.moment = moment
saxis = np.array(saxis, dtype='d')
self.saxis = saxis / np.linalg.norm(saxis)
@classmethod
def from_global_moment_and_saxis(cls, global_moment, saxis):
"""
Convenience method to initialize Magmom from a given global
magnetic moment, i.e. magnetic moment with saxis=(0,0,1), and
provided saxis.
Method is useful if you do not know the components of your
magnetic moment in frame of your desired saxis.
:param global_moment:
:param saxis: desired saxis
:return:
"""
magmom = Magmom(global_moment)
return cls(magmom.get_moment(saxis=saxis), saxis=saxis)
def _get_transformation_matrix(self, saxis):
saxis = saxis / np.linalg.norm(saxis)
alpha = np.arctan2(saxis[1], saxis[0])
beta = np.arctan2(np.sqrt(saxis[0]**2 + saxis[1]**2), saxis[2])
cos_a = np.cos(alpha)
cos_b = np.cos(beta)
sin_a = np.sin(alpha)
sin_b = np.sin(beta)
m = [[cos_b*cos_a, -sin_a, sin_b*cos_a],
[cos_b*sin_a, cos_a, sin_b*sin_a],
[-sin_b, 0, cos_b]]
return m
def _get_transformation_matrix_inv(self, saxis):
saxis = saxis / np.linalg.norm(saxis)
alpha = np.arctan2(saxis[1], saxis[0])
beta = np.arctan2(np.sqrt(saxis[0]**2 + saxis[1]**2), saxis[2])
cos_a = np.cos(alpha)
cos_b = np.cos(beta)
sin_a = np.sin(alpha)
sin_b = np.sin(beta)
m = [[cos_b*cos_a, cos_b*sin_a, -sin_b],
[-sin_a, cos_a, 0],
[sin_b*cos_a, sin_b*sin_a, cos_b]]
return m
def get_moment(self, saxis=(0, 0, 1)):
"""
Get magnetic moment relative to a given spin quantization axis.
If no axis is provided, moment will be given relative to the
Magmom's internal spin quantization axis, i.e. equivalent to
Magmom.moment
:param axis: (list/numpy array) spin quantization axis
:return: np.ndarray of length 3
"""
# transform back to moment with spin axis [0, 0, 1]
m_inv = self._get_transformation_matrix_inv(self.saxis)
moment = np.matmul(self.moment, m_inv)
# transform to new saxis
m = self._get_transformation_matrix(saxis)
moment = np.matmul(moment, m)
# round small values to zero
moment[np.abs(moment) < 1e-8] = 0
return moment
@property
def global_moment(self):
"""
Get the magnetic moment defined in an arbitrary global reference frame.
:return: np.ndarray of length 3
"""
return self.get_moment()
@property
def projection(self):
"""
Projects moment along spin quantisation axis. Useful for obtaining
collinear approximation for slightly non-collinear magmoms.
:return: float
"""
return np.dot(self.moment, self.saxis)
def get_xyz_magmom_with_001_saxis(self):
"""
Returns a Magmom in the default setting of saxis = [0, 0, 1] and
the magnetic moment rotated as required.
:return: Magmom
"""
return Magmom(self.get_moment())
def get_00t_magmom_with_xyz_saxis(self):
"""
For internal implementation reasons, in non-collinear calculations
VASP prefers:
MAGMOM = 0 0 total_magnetic_moment
SAXIS = x y z
to an equivalent:
MAGMOM = x y z
SAXIS = 0 0 1
This method returns a Magmom object with magnetic moment [0, 0, t],
where t is the total magnetic moment, and saxis rotated as required.
A consistent direction of saxis is applied such that t might be positive
or negative depending on the direction of the initial moment. This is useful
in the case of collinear structures, rather than constraining assuming
t is always positive.
:return: Magmom
"""
# reference direction gives sign of moment
# entirely arbitrary, there will always be a pathological case
# where a consistent sign is not possible if the magnetic moments
# are aligned along the reference direction, but in practice this
# is unlikely to happen
ref_direction = np.array([1.01, 1.02, 1.03])
t = abs(self)
if t != 0:
new_saxis = self.moment/np.linalg.norm(self.moment)
if np.dot(ref_direction, new_saxis) < 0:
t = -t
new_saxis = -new_saxis
return Magmom([0, 0, t], saxis=new_saxis)
else:
return Magmom(self)
@staticmethod
def have_consistent_saxis(magmoms):
"""
This method checks that all Magmom objects in a list have a
consistent spin quantization axis. To write MAGMOM tags to a
VASP INCAR, a global SAXIS value for all magmoms has to be used.
If saxis are inconsistent, can create consistent set with:
Magmom.get_consistent_set(magmoms)
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: bool
"""
magmoms = [Magmom(magmom) for magmom in magmoms]
ref_saxis = magmoms[0].saxis
match_ref = [magmom.saxis == ref_saxis for magmom in magmoms]
if np.all(match_ref):
return True
else:
return False
@staticmethod
def get_consistent_set_and_saxis(magmoms, saxis=None):
"""
Method to ensure a list of magmoms use the same spin axis.
Returns a tuple of a list of Magmoms and their global spin axis.
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:param saxis: can provide a specific global spin axis
:return: (list of Magmoms, global spin axis) tuple
"""
magmoms = [Magmom(magmom) for magmom in magmoms]
if saxis is None:
saxis = Magmom.get_suggested_saxis(magmoms)
else:
saxis = saxis/np.linalg.norm(saxis)
magmoms = [magmom.get_moment(saxis=saxis) for magmom in magmoms]
return (magmoms, saxis)
@staticmethod
def get_suggested_saxis(magmoms):
"""
This method returns a suggested spin axis for a set of magmoms,
taking the largest magnetic moment as the reference. For calculations
with collinear spins, this would give a sensible saxis for a ncl
calculation.
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: np.ndarray of length 3
"""
# heuristic, will pick largest magmom as reference
# useful for creating collinear approximations of
# e.g. slightly canted magnetic structures
# for fully collinear structures, will return expected
# result
magmoms = [Magmom(magmom) for magmom in magmoms]
# filter only non-zero magmoms
magmoms = [magmom for magmom in magmoms if abs(magmom)]
magmoms.sort(reverse=True)
if len(magmoms) > 0:
return magmoms[0].get_00t_magmom_with_xyz_saxis().saxis
else:
return np.array([0, 0, 1], dtype="d")
@staticmethod
def are_collinear(magmoms):
"""
Method checks to see if a set of magnetic moments are collinear
with each other.
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: bool
"""
magmoms = [Magmom(magmom) for magmom in magmoms]
if not Magmom.have_consistent_saxis(magmoms):
magmoms = Magmom.get_consistent_set(magmoms)
# convert to numpy array for convenience
magmoms = np.array([list(magmom) for magmom in magmoms])
magmoms = magmoms[np.any(magmoms, axis=1)] # remove zero magmoms
if len(magmoms) == 0:
return True
# use first moment as reference to compare against
ref_magmom = magmoms[0]
# magnitude of cross products != 0 if non-collinear with reference
num_ncl = np.count_nonzero(np.linalg.norm(np.cross(ref_magmom, magmoms), axis=1))
if num_ncl > 0:
return False
else:
return True
@classmethod
def from_moment_relative_to_crystal_axes(cls, moment, lattice):
"""
Obtaining a Magmom object from a magnetic moment provided
relative to crystal axes.
Used for obtaining moments from magCIF file.
:param magmom: list of floats specifying vector magmom
:param lattice: Lattice
:return: Magmom
"""
# get matrix representing unit lattice vectors
unit_m = lattice.matrix / np.linalg.norm(lattice.matrix, axis=1)[:, None]
moment = np.matmul(list(moment), unit_m)
# round small values to zero
moment[np.abs(moment) < 1e-8] = 0
return cls(moment)
def get_moment_relative_to_crystal_axes(self, lattice):
"""
If scalar magmoms, moments will be given arbitrarily along z.
Used for writing moments to magCIF file.
:param magmom: Magmom
:param lattice: Lattice
:return: vector as list of floats
"""
# get matrix representing unit lattice vectors
unit_m = lattice.matrix / np.linalg.norm(lattice.matrix, axis=1)[:, None]
# note np.matmul() requires numpy version >= 1.10
moment = np.matmul(self.global_moment, np.linalg.inv(unit_m))
# round small values to zero
moment[np.abs(moment) < 1e-8] = 0
return moment
def __getitem__(self, key):
return self.moment[key]
def __iter__(self):
return iter(self.moment)
def __abs__(self):
return np.linalg.norm(self.moment)
def __eq__(self, other):
"""
Equal if 'global' magnetic moments are the same, saxis can differ.
"""
other = Magmom(other)
return np.allclose(self.global_moment, other.global_moment)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return abs(self) < abs(other)
def __neg__(self):
return Magmom(-self.moment, saxis=self.saxis)
def __hash__(self):
return (tuple(self.moment)+tuple(self.saxis)).__hash__()
def __float__(self):
"""
Returns magnitude of magnetic moment with a sign with respect to
an arbitrary direction.
Should give unsurprising output if Magmom is treated like a
scalar or if a set of Magmoms describes a collinear structure.
Implemented this way rather than simpler abs(self) so that
moments will have a consistent sign in case of e.g.
antiferromagnetic collinear structures without additional
user intervention.
However, should be used with caution for non-collinear
structures and might give non-sensical results except in the case
of only slightly non-collinear structures (e.g. small canting).
This approach is also used to obtain "diff" VolumetricDensity
in pymatgen.io.vasp.outputs.VolumetricDensity when processing
Chgcars from SOC calculations.
"""
return float(self.get_00t_magmom_with_xyz_saxis()[2])
def __str__(self):
return str(float(self))
def __repr__(self):
if np.allclose(self.saxis, (0, 0, 1)):
return 'Magnetic moment {0}'.format(self.moment, self.saxis)
else:
return 'Magnetic moment {0} (spin axis = {1})'.format(self.moment,
self.saxis)
|
dongsenfo/pymatgen
|
pymatgen/electronic_structure/core.py
|
Python
|
mit
| 15,795
|
[
"CRYSTAL",
"VASP",
"pymatgen"
] |
af14e8e0a0701d8aad7105357aef96012c86487ab8f86c1a10dfd7ff9fa1bce5
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
import json
from io import open
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class BandStructureSymmLine_test(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "Cu2O_361_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
self.bs = BandStructureSymmLine.from_dict(d)
self.assertListEqual(self.bs._projections[Spin.up][10][12][Orbital.s], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "wrong projections")
self.assertListEqual(self.bs._projections[Spin.up][25][0][Orbital.dyz], [0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069], "wrong projections")
self.assertAlmostEqual(self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
self.assertAlmostEqual(self.bs.get_projections_on_elts_and_orbitals({'Cu':['s','d']})[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(self.bs.get_projections_on_elts_and_orbitals({'Cu':['s','d']})[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
with open(os.path.join(test_dir, "CaO_2605_bandstructure.json"), "r",
encoding='utf-8') as f:
d = json.load(f)
#print d.keys()
self.bs = BandStructureSymmLine.from_dict(d)
#print self.bs.as_dict().keys()
#this doesn't really test as_dict() -> from_dict very well
#self.assertEqual(self.bs.as_dict().keys(), d.keys())
self.one_kpoint = self.bs.kpoints[31]
self.assertEqual(self.bs._nb_bands, 16)
self.assertAlmostEqual(self.bs._bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs._bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs._branches[5]['name'], "L-U")
self.assertEqual(self.bs._branches[5]['start_index'], 80)
self.assertEqual(self.bs._branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs._distance[70], 4.2335127528765737)
with open(os.path.join(test_dir, "NiO_19009_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
self.bs_spin = BandStructureSymmLine.from_dict(d)
#this doesn't really test as_dict() -> from_dict very well
#self.assertEqual(self.bs_spin.as_dict().keys(), d.keys())
self.assertEqual(self.bs_spin._nb_bands, 27)
self.assertAlmostEqual(self.bs_spin._bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin._bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs.get_branch(110)[0]['name'], "U-W")
def test_is_metal(self):
self.assertFalse(self.bs.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 10, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 79, "wrong VBM kpoint index")
self.assertEqual(vbm_spin['kpoint'].frac_coords[0], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[1], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[2], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, "L", "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs.get_band_gap()
self.assertAlmostEqual(bg['energy'], 3.6348, "wrong gap energy")
self.assertEqual(bg['transition'], "\\Gamma-X", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 2.3148, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "L-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
if __name__ == '__main__':
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/electronic_structure/tests/test_bandstructure.py
|
Python
|
mit
| 8,363
|
[
"pymatgen"
] |
597b27e6e3e1bd2eb8f97cb2aab5d017ee1cf704610cba60a8fed3160654d713
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from scipy.constants import N_A
from pymatgen.core.periodic_table import Element
from pymatgen.core.units import Charge, Time
from pymatgen.analysis.reaction_calculator import BalancedReaction
from pymatgen.core.composition import Composition
from pymatgen.apps.battery.battery_abc import AbstractElectrode, \
AbstractVoltagePair
from pymatgen.analysis.phase_diagram import PhaseDiagram
from monty.json import MontyDecoder
"""
This module contains the classes to build a ConversionElectrode.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Feb 1, 2012"
__status__ = "Beta"
class ConversionElectrode(AbstractElectrode):
"""
Class representing a ConversionElectrode.
"""
def __init__(self, voltage_pairs, working_ion_entry, initial_comp):
"""
General constructor for ConversionElectrode. However, it is usually
easier to construct a ConversionElectrode using one of the static
constructors provided.
Args:
voltage_pairs: The voltage pairs making up the Conversion
Electrode.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li.
initial_comp: Starting composition for ConversionElectrode.
"""
self._composition = initial_comp
self._working_ion_entry = working_ion_entry
ion_el = self._working_ion_entry.composition.elements[0]
self._working_ion = ion_el.symbol
self._vpairs = voltage_pairs
@staticmethod
def from_composition_and_pd(comp, pd, working_ion_symbol="Li"):
"""
Convenience constructor to make a ConversionElectrode from a
composition and a phase diagram.
Args:
comp:
Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
pd:
A PhaseDiagram of the relevant system (e.g., Li-Fe-F)
working_ion_symbol:
Element symbol of working ion. Defaults to Li.
"""
working_ion = Element(working_ion_symbol)
entry = None
working_ion_entry = None
for e in pd.stable_entries:
if e.composition.reduced_formula == comp.reduced_formula:
entry = e
elif e.is_element and \
e.composition.reduced_formula == working_ion_symbol:
working_ion_entry = e
if not entry:
raise ValueError("Not stable compound found at composition {}."
.format(comp))
profile = pd.get_element_profile(working_ion, comp)
# Need to reverse because voltage goes form most charged to most
# discharged.
profile.reverse()
if len(profile) < 2:
return None
working_ion_entry = working_ion_entry
working_ion = working_ion_entry.composition.elements[0].symbol
normalization_els = {}
for el, amt in comp.items():
if el != Element(working_ion):
normalization_els[el] = amt
vpairs = [ConversionVoltagePair.from_steps(profile[i], profile[i + 1],
normalization_els)
for i in range(len(profile) - 1)]
return ConversionElectrode(vpairs, working_ion_entry, comp)
@staticmethod
def from_composition_and_entries(comp, entries_in_chemsys,
working_ion_symbol="Li"):
"""
Convenience constructor to make a ConversionElectrode from a
composition and all entries in a chemical system.
Args:
comp: Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
entries_in_chemsys: Sequence containing all entries in a
chemical system. E.g., all Li-Fe-F containing entries.
working_ion_symbol: Element symbol of working ion. Defaults to Li.
"""
pd = PhaseDiagram(entries_in_chemsys)
return ConversionElectrode.from_composition_and_pd(comp, pd,
working_ion_symbol)
def get_sub_electrodes(self, adjacent_only=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set true
Returns:
A list of ConversionElectrode objects
"""
if adjacent_only:
return [self.__class__(self._vpairs[i:i + 1],
self._working_ion_entry, self._composition)
for i in range(len(self._vpairs))]
sub_electrodes = []
for i in range(len(self._vpairs)):
for j in range(i, len(self._vpairs)):
sub_electrodes.append(self.__class__(self._vpairs[i:j + 1],
self._working_ion_entry,
self._composition))
return sub_electrodes
@property
def composition(self):
return self._composition
@property
def working_ion(self):
"""
The working ion as an Element object
"""
return self._working_ion_entry.composition.elements[0]
@property
def working_ion_entry(self):
return self._working_ion_entry
@property
def voltage_pairs(self):
return self._vpairs
def is_super_electrode(self, conversion_electrode):
"""
Checks if a particular conversion electrode is a sub electrode of the
current electrode. Starting from a more lithiated state may result in
a subelectrode that is essentially on the same path. For example, a
ConversionElectrode formed by starting from an FePO4 composition would
be a super_electrode of a ConversionElectrode formed from an LiFePO4
composition.
"""
for pair1 in conversion_electrode:
rxn1 = pair1.rxn
all_formulas1 = set([rxn1.all_comp[i].reduced_formula
for i in range(len(rxn1.all_comp))
if abs(rxn1.coeffs[i]) > 1e-5])
for pair2 in self:
rxn2 = pair2.rxn
all_formulas2 = set([rxn2.all_comp[i].reduced_formula
for i in range(len(rxn2.all_comp))
if abs(rxn2.coeffs[i]) > 1e-5])
if all_formulas1 == all_formulas2:
break
else:
return False
return True
def __eq__(self, conversion_electrode):
"""
Check if two electrodes are exactly the same:
"""
if len(self) != len(conversion_electrode):
return False
for pair1 in conversion_electrode:
rxn1 = pair1.rxn
all_formulas1 = set([rxn1.all_comp[i].reduced_formula
for i in range(len(rxn1.all_comp))
if abs(rxn1.coeffs[i]) > 1e-5])
for pair2 in self:
rxn2 = pair2.rxn
all_formulas2 = set([rxn2.all_comp[i].reduced_formula
for i in range(len(rxn2.all_comp))
if abs(rxn2.coeffs[i]) > 1e-5])
if all_formulas1 == all_formulas2:
break
else:
return False
return True
def __hash__(self):
return 7
def __str__(self):
return self.__repr__()
def __repr__(self):
output = ["Conversion electrode with formula {} and nsteps {}"
.format(self._composition.reduced_formula, self.num_steps),
"Avg voltage {} V, min voltage {} V, max voltage {} V"
.format(self.get_average_voltage(), self.min_voltage,
self.max_voltage),
"Capacity (grav.) {} mAh/g, capacity (vol.) {} Ah/l"
.format(self.get_capacity_grav(),
self.get_capacity_vol()),
"Specific energy {} Wh/kg, energy density {} Wh/l"
.format(self.get_specific_energy(),
self.get_energy_density())]
return "\n".join(output)
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
return cls(dec.process_decoded(d["voltage_pairs"]),
dec.process_decoded(d["working_ion_entry"]),
Composition(d["initial_comp"]))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"voltage_pairs": [v.as_dict() for v in self._vpairs],
"working_ion_entry": self.working_ion_entry.as_dict(),
"initial_comp": self._composition.as_dict()}
def get_summary_dict(self, print_subelectrodes=True):
"""
Args:
print_subelectrodes:
Also print data on all the possible subelectrodes
Returns:
a summary of this electrode"s properties in dictionary format
"""
d = {}
framework_comp = Composition({k: v
for k, v in self._composition.items()
if k.symbol != self.working_ion.symbol})
d["framework"] = framework_comp.to_data_dict
d["framework_pretty"] = framework_comp.reduced_formula
d["average_voltage"] = self.get_average_voltage()
d["max_voltage"] = self.max_voltage
d["min_voltage"] = self.min_voltage
d["max_delta_volume"] = self.max_delta_volume
d["max_instability"] = 0
d["max_voltage_step"] = self.max_voltage_step
d["nsteps"] = self.num_steps
d["capacity_grav"] = self.get_capacity_grav()
d["capacity_vol"] = self.get_capacity_vol()
d["energy_grav"] = self.get_specific_energy()
d["energy_vol"] = self.get_energy_density()
d["working_ion"] = self.working_ion.symbol
d["reactions"] = []
d["reactant_compositions"] = []
comps = []
frac = []
for pair in self._vpairs:
rxn = pair.rxn
frac.append(pair.frac_charge)
frac.append(pair.frac_discharge)
d["reactions"].append(str(rxn))
for i in range(len(rxn.coeffs)):
if abs(rxn.coeffs[i]) > 1e-5 and rxn.all_comp[i] not in comps:
comps.append(rxn.all_comp[i])
if abs(rxn.coeffs[i]) > 1e-5 and \
rxn.all_comp[i].reduced_formula != d["working_ion"]:
reduced_comp = rxn.all_comp[i].reduced_composition
comp_dict = reduced_comp.as_dict()
d["reactant_compositions"].append(comp_dict)
d["fracA_charge"] = min(frac)
d["fracA_discharge"] = max(frac)
d["nsteps"] = self.num_steps
if print_subelectrodes:
f_dict = lambda c: c.get_summary_dict(print_subelectrodes=False)
d["adj_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=True))
d["all_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=False))
return d
class ConversionVoltagePair(AbstractVoltagePair):
"""
A VoltagePair representing a Conversion Reaction with a defined voltage.
Typically not initialized directly but rather used by ConversionElectrode.
Args:
balanced_rxn (BalancedReaction): BalancedReaction for the step
voltage (float): Voltage for the step
mAh (float): Capacity of the step
vol_charge (float): Volume of charged state
vol_discharge (float): Volume of discharged state
mass_charge (float): Mass of charged state
mass_discharge (float): Mass of discharged state
frac_charge (float): Fraction of working ion in the charged state
frac_discharge (float): Fraction of working ion in the discharged state
entries_charge ([ComputedEntry]): Entries in the charged state
entries_discharge ([ComputedEntry]): Entries in discharged state
working_ion_entry (ComputedEntry): Entry of the working ion.
"""
def __init__(self, balanced_rxn, voltage, mAh, vol_charge, vol_discharge,
mass_charge, mass_discharge, frac_charge, frac_discharge,
entries_charge, entries_discharge, working_ion_entry):
self._working_ion_entry = working_ion_entry
working_ion = self._working_ion_entry.composition.elements[0].symbol
self._voltage = voltage
self._mAh = mAh
self._vol_charge = vol_charge
self._mass_charge = mass_charge
self._mass_discharge = mass_discharge
self._vol_discharge = vol_discharge
self._frac_charge = frac_charge
self._frac_discharge = frac_discharge
self._rxn = balanced_rxn
self._working_ion = working_ion
self._entries_charge = entries_charge
self._entries_discharge = entries_discharge
@staticmethod
def from_steps(step1, step2, normalization_els):
"""
Creates a ConversionVoltagePair from two steps in the element profile
from a PD analysis.
Args:
step1: Starting step
step2: Ending step
normalization_els: Elements to normalize the reaction by. To
ensure correct capacities.
"""
working_ion_entry = step1["element_reference"]
working_ion = working_ion_entry.composition.elements[0].symbol
working_ion_valence = max(Element(working_ion).oxidation_states)
voltage = (-step1["chempot"] + working_ion_entry.energy_per_atom)/working_ion_valence
mAh = (step2["evolution"] - step1["evolution"]) \
* Charge(1, "e").to("C") * Time(1, "s").to("h") * N_A * 1000*working_ion_valence
licomp = Composition(working_ion)
prev_rxn = step1["reaction"]
reactants = {comp: abs(prev_rxn.get_coeff(comp))
for comp in prev_rxn.products if comp != licomp}
curr_rxn = step2["reaction"]
products = {comp: abs(curr_rxn.get_coeff(comp))
for comp in curr_rxn.products if comp != licomp}
reactants[licomp] = (step2["evolution"] - step1["evolution"])
rxn = BalancedReaction(reactants, products)
for el, amt in normalization_els.items():
if rxn.get_el_amount(el) > 1e-6:
rxn.normalize_to_element(el, amt)
break
prev_mass_dischg = sum([prev_rxn.all_comp[i].weight
* abs(prev_rxn.coeffs[i])
for i in range(len(prev_rxn.all_comp))]) / 2
vol_charge = sum([abs(prev_rxn.get_coeff(e.composition))
* e.structure.volume
for e in step1["entries"]
if e.composition.reduced_formula != working_ion])
mass_discharge = sum([curr_rxn.all_comp[i].weight
* abs(curr_rxn.coeffs[i])
for i in range(len(curr_rxn.all_comp))]) / 2
mass_charge = prev_mass_dischg
mass_discharge = mass_discharge
vol_discharge = sum([abs(curr_rxn.get_coeff(e.composition))
* e.structure.volume
for e in step2["entries"]
if e.composition.reduced_formula != working_ion])
totalcomp = Composition({})
for comp in prev_rxn.products:
if comp.reduced_formula != working_ion:
totalcomp += comp * abs(prev_rxn.get_coeff(comp))
frac_charge = totalcomp.get_atomic_fraction(Element(working_ion))
totalcomp = Composition({})
for comp in curr_rxn.products:
if comp.reduced_formula != working_ion:
totalcomp += comp * abs(curr_rxn.get_coeff(comp))
frac_discharge = totalcomp.get_atomic_fraction(Element(working_ion))
rxn = rxn
entries_charge = step2["entries"]
entries_discharge = step1["entries"]
return ConversionVoltagePair(rxn, voltage, mAh, vol_charge,
vol_discharge, mass_charge,
mass_discharge,
frac_charge, frac_discharge,
entries_charge, entries_discharge,
working_ion_entry)
@property
def working_ion(self):
return self._working_ion
@property
def entries_charge(self):
return self._entries_charge
@property
def entries_discharge(self):
return self._entries_discharge
@property
def frac_charge(self):
return self._frac_charge
@property
def frac_discharge(self):
return self._frac_discharge
@property
def rxn(self):
return self._rxn
@property
def voltage(self):
return self._voltage
@property
def mAh(self):
return self._mAh
@property
def mass_charge(self):
return self._mass_charge
@property
def mass_discharge(self):
return self._mass_discharge
@property
def vol_charge(self):
return self._vol_charge
@property
def vol_discharge(self):
return self._vol_discharge
@property
def working_ion_entry(self):
return self._working_ion_entry
def __repr__(self):
output = ["Conversion voltage pair with working ion {}"
.format(self._working_ion_entry.composition.reduced_formula),
"Reaction : {}".format(self._rxn),
"V = {}, mAh = {}".format(self.voltage, self.mAh),
"frac_charge = {}, frac_discharge = {}"
.format(self.frac_charge, self.frac_discharge),
"mass_charge = {}, mass_discharge = {}"
.format(self.mass_charge, self.mass_discharge),
"vol_charge = {}, vol_discharge = {}"
.format(self.vol_charge, self.vol_discharge)]
return "\n".join(output)
def __str__(self):
return self.__repr__()
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
working_ion_entry = dec.process_decoded(d["working_ion_entry"])
balanced_rxn = dec.process_decoded(d["balanced_rxn"])
entries_charge = dec.process_decoded(d["entries_charge"])
entries_discharge = dec.process_decoded(d["entries_discharge"])
return ConversionVoltagePair(balanced_rxn, d["voltage"], d["mAh"],
d["vol_charge"], d["vol_discharge"],
d["mass_charge"], d["mass_discharge"],
d["frac_charge"], d["frac_discharge"],
entries_charge, entries_discharge,
working_ion_entry)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"working_ion_entry": self._working_ion_entry.as_dict(),
"voltage": self._voltage, "mAh": self._mAh,
"vol_charge": self._vol_charge,
"mass_charge": self._mass_charge,
"mass_discharge": self._mass_discharge,
"vol_discharge": self._vol_discharge,
"frac_charge": self._frac_charge,
"frac_discharge": self._frac_discharge,
"balanced_rxn": self._rxn.as_dict(),
"entries_charge": [e.as_dict() for e in self._entries_charge],
"entries_discharge": [e.as_dict() for e in
self._entries_discharge]}
|
dongsenfo/pymatgen
|
pymatgen/apps/battery/conversion_battery.py
|
Python
|
mit
| 20,849
|
[
"pymatgen"
] |
0ab9e2c615798ae5eab05cdd1bc7969042670803252556f2df9045b71d58f8e5
|
import gdb
def exit_handler(event):
"""Quit GDB as soon as the program exited. """
# print("Program exited with exit code: %d" % event.exit_code)
# gdb.execute('quit')
gdb.events.exited.connect(exit_handler)
gdb.execute('set pagination off')
gdb.execute('file gzip')
gdb.execute('set args ' + '-cdk test.txt.gz')
function_info = gdb.execute('info functions', False, True)
g_variable_info = gdb.execute('info variables', False, True)
function_list = []
variable_list = []
# Routine to get function names
for line in function_info.splitlines():
if line.startswith("All") or line.startswith("File"):
continue
else:
for word in line.split():
if "(" in word:
function_list.append(word.split("(")[0])
else:
continue
# Routine to get local variables and store them in the variable list
for func in function_list:
local_variables = []
temp_b = gdb.Breakpoint(str(func))
gdb.execute('run')
try:
local_info = gdb.execute('info locals', False, True)
except gdb.error:
temp_b.delete()
continue
for line in local_info.splitlines():
if "No locals." in line or "<optimized out>" in line:
continue
variable_list.append('{}\t{}'.format(line.split()[0], func))
temp_b.delete()
# Routine to get global variables and store them in the variable list
# TODO: Add arrays in variable list
for line in g_variable_info.splitlines():
if line.startswith("All") or line.startswith("File") or line.startswith("Non") or line.startswith("0x"):
continue
else:
for word in line.split():
if ")" in word or "[" in word:
continue
elif ";" in word:
variable_list.append(word.split(";")[0])
else:
continue
# Generating input spec files for fault injection script
for index, variable in enumerate(variable_list):
file_name = 'input_spec{}.cfg'.format(index)
with open(file_name, 'w') as f_input_config:
f_input_config.writelines(
'executable /home/han/GDrive/projects/Playground/GDB/Experiments/gzip/gzip\n')
f_input_config.writelines(
'arguments -cdk /home/han/GDrive/projects/Playground/GDB/Experiments/gzip/test.txt.gz > gzip{}.log\n'.format(index))
f_input_config.writelines('fault\t{}\t0\t1\tBIT_FLIPS\n'.format(variable))
gdb.execute('continue')
gdb.execute('quit')
|
timtian090/Playground
|
GDB/Experiments/gzip/get_variables.py
|
Python
|
mit
| 2,482
|
[
"CDK"
] |
4cb61f63a66563a53cc49d13334384d2f946f07a79a903af530568bf87141b16
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 3. Use proper functions provided by PySCF
# * Switch between df.incore and df.outcore according to system memory
# * (Koh: Is there an identical function in outcore? which one, incore or outcore, is used when need of more memory?)
# * Use get_veff of scf object instead of get_vxc
# * (Koh: get_vxc cannot generate correct J,K matrix from complex density matrix)
#
import numpy as np
import scipy, time
import scipy.linalg
from pyscf import gto, dft, df
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import diis
import sys
FSPERAU = 0.0241888
def transmat(M,U,inv = 1):
if inv == 1:
# U.t() * M * U
Mtilde = np.dot(np.dot(U.T.conj(),M),U)
elif inv == -1:
# U * M * U.t()
Mtilde = np.dot(np.dot(U,M),U.T.conj())
return Mtilde
def trdot(A,B):
C = np.trace(np.dot(A,B))
return C
def matrixpower(A,p,PrintCondition=False):
""" Raise a Hermitian Matrix to a possibly fractional power. """
u,s,v = np.linalg.svd(A)
if (PrintCondition):
print("matrixpower: Minimal Eigenvalue =", np.min(s))
for i in range(len(s)):
if (abs(s[i]) < np.power(10.0,-14.0)):
s[i] = np.power(10.0,-14.0)
return np.dot(u,np.dot(np.diag(np.power(s,p)),v))
class RTTDSCF(lib.StreamObject):
"""
RT-TDSCF base object.
Other types of propagations may inherit from this.
Calling this class starts the propagation
Attributes:
verbose: int
Print level. Default value equals to :class:`ks.verbose`
conv_tol: float
converge threshold. Default value equals to :class:`ks.conv_tol`
auxbas: str
auxilliary basis for 2c/3c eri. Default is weigend
prm: str
string object with |variable value| on each line
Saved results
output: str
name of the file with result of propagation
"""
def __init__(self,ks,prm=None,output = "log.dat", auxbas = "weigend"):
self.stdout = sys.stdout
self.verbose = ks.verbose
self.enuc = ks.energy_nuc()
self.conv_tol = ks.conv_tol
self.auxbas = auxbas
self.hyb = ks._numint.hybrid_coeff(ks.xc, spin=(ks.mol.spin>0)+1)
self.adiis = None
self.ks = ks
self.eri3c = None
self.eri2c = None
self.s = ks.mol.intor_symmetric('int1e_ovlp')
self.x = matrixpower(self.s,-1./2.)
self._keys = set(self.__dict__.keys())
fmat, c_am, v_lm, rho = self.initialcondition(prm)
start = time.time()
self.prop(fmat, c_am, v_lm, rho, output)
end = time.time()
logger.info(self,"Propagation time: %f", end-start)
logger.warn(self, 'RT-TDSCF is an experimental feature. It is '
'still in testing.\nFeatures and APIs may be changed '
'in the future.')
def auxmol_set(self, mol, auxbas = "weigend"):
"""
Generate 2c/3c electron integral (eri2c,eri3c)
Generate ovlp matrix (S), and AO to Lowdin AO matrix transformation matrix (X)
Args:
mol: Mole class
Default is ks.mol
Kwargs:
auxbas: str
auxilliary basis for 2c/3c eri. Default is weigend
Returns:
eri3c: float
3 center eri. shape: (AO,AO,AUX)
eri2c: float
2 center eri. shape: (AUX,AUX)
"""
auxmol = gto.Mole()
auxmol.atom = mol.atom
auxmol.basis = auxbas
auxmol.build()
self.auxmol = auxmol
nao = mol.nao_nr()
naux = auxmol.nao_nr()
atm, bas, env = gto.conc_env(mol._atm, mol._bas, mol._env, auxmol._atm,\
auxmol._bas, auxmol._env)
eri3c = df.incore.aux_e2(mol, auxmol, intor="cint3c2e_sph", aosym="s1",\
comp=1 )
eri2c = df.incore.fill_2c2e(mol,auxmol)
self.eri3c = eri3c.copy()
self.eri2c = eri2c.copy()
return eri3c, eri2c
def fockbuild(self,dm_lao,it = -1):
"""
Updates Fock matrix
Args:
dm_lao: float or complex
Lowdin AO density matrix.
it: int
iterator for SCF DIIS
Returns:
fmat: float or complex
Fock matrix in Lowdin AO basis
jmat: float or complex
Coulomb matrix in AO basis
kmat: float or complex
Exact Exchange in AO basis
"""
if self.params["Model"] == "TDHF":
Pt = 2.0*transmat(dm_lao,self.x,-1)
jmat,kmat = self.get_jk(Pt)
veff = 0.5*(jmat+jmat.T.conj()) - 0.5*(0.5*(kmat + kmat.T.conj()))
if self.adiis and it > 0:
return transmat(self.adiis.update(self.s,Pt,self.h + veff),\
self.x), jmat, kmat
else:
return transmat(self.h + veff,self.x), jmat, kmat
elif self.params["Model"] == "TDDFT":
Pt = 2 * transmat(dm_lao,self.x,-1)
jmat = self.J = self.get_j(Pt)
Veff = self.J.astype(complex)
Vxc, excsum, kmat = self.get_vxc(Pt)
Veff += Vxc
if self.adiis and it > 0:
return transmat(self.adiis.update(self.s,Pt,self.h + Veff),\
self.x), jmat, kmat
else:
return transmat(self.h + Veff,self.x), jmat, kmat
def get_vxc(self,dm):
"""
Update exchange matrices and energy
Args:
dm: float or complex
AO density matrix.
Returns:
vxc: float or complex
exchange-correlation matrix in AO basis
excsum: float
exchange-correlation energy
kmat: float or complex
Exact Exchange in AO basis
"""
nelec, excsum, vxc = self.ks._numint.nr_vxc(self.ks.mol, \
self.ks.grids, self.ks.xc, dm)
self.exc = excsum
vxc = vxc.astype(complex)
if(self.hyb > 0.01):
kmat = self.get_k(dm)
vxc += -0.5 * self.hyb * kmat
else:
kmat = None
return vxc, excsum, kmat
def get_jk(self, dm):
"""
Update Coulomb and Exact Exchange Matrix
Args:
dm: float or complex
AO density matrix.
Returns:
jmat: float or complex
Coulomb matrix in AO basis
kmat: float or complex
Exact Exchange in AO basis
"""
jmat = self.get_j(dm)
kmat = self.get_k(dm)
return jmat, kmat
def get_j(self,dm):
"""
Update Coulomb Matrix
Args:
dm: float or complex
AO density matrix.
Returns:
jmat: float or complex
Coulomb matrix in AO basis
"""
rho = np.einsum("ijp,ij->p", self.eri3c, dm)
rho = np.linalg.solve(self.eri2c, rho)
jmat = np.einsum("p,ijp->ij", rho, self.eri3c)
return jmat
def get_k(self,dm):
"""
Update Exact Exchange Matrix
Args:
dm: float or complex
AO density matrix.
Returns:
kmat: float or complex
Exact Exchange in AO basis
"""
naux = self.auxmol.nao_nr()
nao = self.ks.mol.nao_nr()
kpj = np.einsum("ijp,jk->ikp", self.eri3c, dm)
pik = np.linalg.solve(self.eri2c, kpj.reshape(-1,naux).T.conj())
kmat = np.einsum("pik,kjp->ij", pik.reshape(naux,nao,nao), self.eri3c)
return kmat
def initialcondition(self,prm):
"""
Prepare the variables/Matrices needed for propagation
The SCF is done here to make matrices that are not accessable from pyscf.scf
Args:
prm: str
string object with |variable value| on each line
Returns:
fmat: float or complex
Fock matrix in Lowdin AO basis
c_am: float
Transformation Matrix |AO><MO|
v_lm: float
Transformation Matrix |LAO><MO|
rho: float or complex
Initial MO density matrix.
"""
from pyscf.rt import tdfields
self.auxmol_set(self.ks.mol, auxbas = self.auxbas)
self.params = dict()
logger.log(self,"""
===================================
| Realtime TDSCF module |
===================================
| J. Parkhill, T. Nguyen |
| J. Koh, J. Herr, K. Yao |
===================================
| Refs: 10.1021/acs.jctc.5b00262 |
| 10.1063/1.4916822 |
===================================
""")
n_ao = self.ks.mol.nao_nr()
n_occ = int(sum(self.ks.mo_occ)/2)
logger.log(self,"n_ao: %d n_occ: %d", n_ao,\
n_occ)
self.readparams(prm)
fmat, c_am, v_lm = self.initfockbuild() # updates self.C
rho = 0.5*np.diag(self.ks.mo_occ).astype(complex)
self.field = tdfields.FIELDS(self, self.params)
self.field.initializeexpectation(rho, c_am)
return fmat, c_am, v_lm, rho
def readparams(self,prm):
"""
Set Defaults, Read the file and fill the params dictionary
Args:
prm: str
string object with |variable value| on each line
"""
self.params["Model"] = "TDDFT"
self.params["Method"] = "MMUT"
self.params["BBGKY"]=0
self.params["TDCIS"]=1
self.params["dt"] = 0.02
self.params["MaxIter"] = 15000
self.params["ExDir"] = 1.0
self.params["EyDir"] = 1.0
self.params["EzDir"] = 1.0
self.params["FieldAmplitude"] = 0.01
self.params["FieldFreq"] = 0.9202
self.params["Tau"] = 0.07
self.params["tOn"] = 7.0*self.params["Tau"]
self.params["ApplyImpulse"] = 1
self.params["ApplyCw"] = 0
self.params["StatusEvery"] = 5000
self.params["Print"]=0
# Here they should be read from disk.
if(prm != None):
for line in prm.splitlines():
s = line.split()
if len(s) > 1:
if s[0] == "MaxIter" or s[0] == str("ApplyImpulse") or \
s[0] == str("ApplyCw") or s[0] == str("StatusEvery"):
self.params[s[0]] = int(s[1])
elif s[0] == "Model" or s[0] == "Method":
self.params[s[0]] = s[1].upper()
else:
self.params[s[0]] = float(s[1])
logger.log(self,"=============================")
logger.log(self," Parameters")
logger.log(self,"=============================")
logger.log(self,"Model: " + self.params["Model"].upper())
logger.log(self,"Method: "+ self.params["Method"].upper())
logger.log(self,"dt: %.2f", self.params["dt"])
logger.log(self,"MaxIter: %d", self.params["MaxIter"])
logger.log(self,"ExDir: %.2f", self.params["ExDir"])
logger.log(self,"EyDir: %.2f", self.params["EyDir"])
logger.log(self,"EzDir: %.2f", self.params["EzDir"])
logger.log(self,"FieldAmplitude: %.4f", self.params["FieldAmplitude"])
logger.log(self,"FieldFreq: %.4f", self.params["FieldFreq"])
logger.log(self,"Tau: %.2f", self.params["Tau"])
logger.log(self,"tOn: %.2f", self.params["tOn"])
logger.log(self,"ApplyImpulse: %d", self.params["ApplyImpulse"])
logger.log(self,"ApplyCw: %d", self.params["ApplyCw"])
logger.log(self,"StatusEvery: %d", self.params["StatusEvery"])
logger.log(self,"=============================\n\n")
return
def initfockbuild(self):
"""
Using Roothan's equation to build a Initial Fock matrix and
Transformation Matrices
Returns:
fmat: float or complex
Fock matrix in Lowdin AO basis
c_am: float
Transformation Matrix |AO><MO|
v_lm: float
Transformation Matrix |LAO><MO|
"""
start = time.time()
n_occ = int(sum(self.ks.mo_occ)/2)
err = 100
it = 0
self.h = self.ks.get_hcore()
s = self.s.copy()
x = self.x.copy()
sx = np.dot(s,x)
dm_lao = 0.5*transmat(self.ks.get_init_guess(self.ks.mol, \
self.ks.init_guess), sx).astype(complex)
if isinstance(self.ks.diis, lib.diis.DIIS):
self.adiis = self.ks.diis
elif self.ks.diis:
self.adiis = diis.SCF_DIIS(self.ks, self.ks.diis_file)
self.adiis.space = self.ks.diis_space
self.adiis.rollback = self.ks.diis_space_rollback
else:
self.adiis = None
fmat, jmat, kmat = self.fockbuild(dm_lao)
dm_lao_old = dm_lao
etot = self.energy(dm_lao,fmat, jmat, kmat)+ self.enuc
while (err > self.conv_tol):
# Diagonalize F in the lowdin basis
eigs, v_lm = np.linalg.eig(fmat)
idx = eigs.argsort()
eigs.sort()
v_lm = v_lm[:,idx].copy()
# Fill up the density in the MO basis and then Transform back
rho = 0.5*np.diag(self.ks.mo_occ).astype(complex)
dm_lao = transmat(rho,v_lm,-1)
etot_old = etot
etot = self.energy(dm_lao,fmat, jmat, kmat)
fmat, jmat, kmat = self.fockbuild(dm_lao,it)
err = abs(etot-etot_old)
logger.debug(self, "Ne: %f", np.trace(rho))
logger.debug(self, "Iteration: %d Energy: %.11f \
Error = %.11f", it, etot, err)
it += 1
if it > self.ks.max_cycle:
logger.log(self, "Max cycle of SCF reached: %d\n Exiting TDSCF. Please raise ks.max_cycle", it)
quit()
rho = 0.5*np.diag(self.ks.mo_occ).astype(complex)
dm_lao = transmat(rho,v_lm,-1)
c_am = np.dot(self.x,v_lm)
logger.log(self, "Ne: %f", np.trace(rho))
logger.log(self, "Converged Energy: %f", etot)
# logger.log(self, "Eigenvalues: %f", eigs.real)
# print "Eigenvalues: ", eigs.real
end = time.time()
logger.info(self, "Initial Fock Built time: %f", end-start)
return fmat, c_am, v_lm
def split_rk4_step_mmut(self, w, v , oldrho , tnow, dt ,IsOn):
Ud = np.exp(w*(-0.5j)*dt);
U = transmat(np.diag(Ud),v,-1)
RhoHalfStepped = transmat(oldrho,U,-1)
# If any TCL propagation occurs...
# DontDo=
# SplitLiouvillian( RhoHalfStepped, k1,tnow,IsOn);
# v2 = (dt/2.0) * k1;
# v2 += RhoHalfStepped;
# SplitLiouvillian( v2, k2,tnow+(dt/2.0),IsOn);
# v3 = (dt/2.0) * k2;
# v3 += RhoHalfStepped;
# SplitLiouvillian( v3, k3,tnow+(dt/2.0),IsOn);
# v4 = (dt) * k3;
# v4 += RhoHalfStepped;
# SplitLiouvillian( v4, k4,tnow+dt,IsOn);
# newrho = RhoHalfStepped;
# newrho += dt*(1.0/6.0)*k1;
# newrho += dt*(2.0/6.0)*k2;
# newrho += dt*(2.0/6.0)*k3;
# newrho += dt*(1.0/6.0)*k4;
# newrho = U*newrho*U.t();
#
newrho = transmat(RhoHalfStepped,U,-1)
return newrho
def tddftstep(self,fmat, c_am, v_lm, rho, rhom12, tnow):
"""
Take dt step in propagation
updates matrices and rho to next timestep
Args:
fmat: float or complex
Fock matrix in Lowdin AO basis
c_am: float or complex
Transformation Matrix |AO><MO|
v_lm: float or complex
Transformation Matrix |LAO><MO|
rho: complex
MO density matrix.
rhom12: complex
tnow: float
current time in A.U.
Returns:
n_rho: complex
MO density matrix.
n_rhom12: complex
n_c_am: complex
Transformation Matrix |AO><MO|
n_v_lm: complex
Transformation Matrix |LAO><MO|
n_fmat: complex
Fock matrix in Lowdin AO basis
n_jmat: complex
Coulomb matrix in AO basis
n_kmat: complex
Exact Exchange in AO basis
"""
if (self.params["Method"] == "MMUT"):
fmat, n_jmat, n_kmat = self.fockbuild(transmat(rho,v_lm,-1))
n_fmat = fmat.copy()
fmat_c = np.conj(fmat)
fmat_prev = transmat(fmat_c, v_lm)
eigs, rot = np.linalg.eig(fmat_prev)
idx = eigs.argsort()
eigs.sort()
rot = rot[:,idx].copy()
rho = transmat(rho, rot)
rhoM12 = transmat(rhom12, rot)
v_lm = np.dot(v_lm , rot)
c_am = np.dot(self.x , v_lm)
n_v_lm = v_lm.copy()
n_c_am = c_am.copy()
fmat_mo = np.diag(eigs).astype(complex)
fmatfield, IsOn = self.field.applyfield(fmat_mo,c_am,tnow)
w,v = scipy.linalg.eig(fmatfield)
NewRhoM12 = self.split_rk4_step_mmut(w, v, rhom12, tnow, \
self.params["dt"], IsOn)
NewRho = self.split_rk4_step_mmut(w, v, NewRhoM12, tnow,\
self.params["dt"]/2.0, IsOn)
n_rho = 0.5*(NewRho+(NewRho.T.conj()));
n_rhom12 = 0.5*(NewRhoM12+(NewRhoM12.T.conj()))
return n_rho, n_rhom12, n_c_am, n_v_lm, n_fmat, n_jmat, n_kmat
else:
raise Exception("Unknown Method...")
return
def dipole(self, rho, c_am):
"""
Args:
c_am: float or complex
Transformation Matrix |AO><MO|
rho: complex
MO density matrix.
Returns:
dipole: float
xyz component of dipole of a molecule. [x y z]
"""
return self.field.expectation(rho, c_am)
def energy(self,dm_lao,fmat,jmat,kmat):
"""
Args:
dm_lao: complex
Density in LAO basis.
fmat: complex
Fock matrix in Lowdin AO basis
jmat: complex
Coulomb matrix in AO basis
kmat: complex
Exact Exchange in AO basis
Returns:
e_tot: float
Total Energy of a system
"""
if (self.params["Model"] == "TDHF"):
hlao = transmat(self.h,self.x)
e_tot = (self.enuc+np.trace(np.dot(dm_lao,hlao+fmat))).real
return e_tot
elif self.params["Model"] == "TDDFT":
dm = transmat(dm_lao,self.x,-1)
exc = self.exc
if(self.hyb > 0.01):
exc -= 0.5 * self.hyb * trdot(dm,kmat)
# if not using auxmol
eh = trdot(dm,2*self.h)
ej = trdot(dm,jmat)
e_tot = (eh + ej + exc + self.enuc).real
return e_tot
def loginstant(self, rho, c_am, v_lm, fmat, jmat, kmat, tnow, it):
"""
time is logged in atomic units.
Args:
rho: complex
MO density matrix.
c_am: complex
Transformation Matrix |AO><MO|
v_lm: complex
Transformation Matrix |LAO><MO|
fmat: complex
Fock matrix in Lowdin AO basis
jmat: complex
Coulomb matrix in AO basis
kmat: complex
Exact Exchange in AO basis
tnow: float
Current time in propagation in A.U.
it: int
Number of iteration of propagation
Returns:
tore: str
|t, dipole(x,y,z), energy|
"""
np.set_printoptions(precision = 7)
tore = str(tnow)+" "+str(self.dipole(rho, c_am).real).rstrip("]").lstrip("[")+\
" " +str(self.energy(transmat(rho,v_lm,-1),fmat, jmat, kmat))
if it%self.params["StatusEvery"] ==0 or it == self.params["MaxIter"]-1:
logger.log(self, "t: %f fs Energy: %f a.u. Total Density: %f",\
tnow*FSPERAU,self.energy(transmat(rho,v_lm,-1),fmat, jmat, kmat), \
2*np.trace(rho))
logger.log(self, "Dipole moment(X, Y, Z, au): %8.5f, %8.5f, %8.5f",\
self.dipole(rho, c_am).real[0],self.dipole(rho, c_am).real[1],\
self.dipole(rho, c_am).real[2])
return tore
def prop(self, fmat, c_am, v_lm, rho, output):
"""
The main tdscf propagation loop.
Args:
fmat: complex
Fock matrix in Lowdin AO basis
c_am: complex
Transformation Matrix |AO><MO|
v_lm: complex
Transformation Matrix |LAO><MO|
rho: complex
MO density matrix.
output: str
name of the file with result of propagation
Saved results:
f: file
output file with |t, dipole(x,y,z), energy|
"""
it = 0
tnow = 0
rhom12 = rho.copy()
n_occ = int(sum(self.ks.mo_occ)/2)
f = open(output,"a")
logger.log(self,"\n\nPropagation Begins")
start = time.time()
while (it<self.params["MaxIter"]):
rho, rhom12, c_am, v_lm, fmat, jmat, kmat = self.tddftstep(fmat, c_am, v_lm, rho, rhom12, tnow)
# rho = newrho.copy()
# rhom12 = newrhom12.copy()
#self.log.append(self.loginstant(it))
f.write(self.loginstant(rho, c_am, v_lm, fmat, jmat, kmat, tnow, it)+"\n")
# Do logging.
tnow = tnow + self.params["dt"]
if it%self.params["StatusEvery"] ==0 or \
it == self.params["MaxIter"]-1:
end = time.time()
logger.log(self, "%f hr/ps", \
(end - start)/(60*60*tnow * FSPERAU * 0.001))
it = it + 1
f.close()
|
gkc1000/pyscf
|
pyscf/rt/tdscf.py
|
Python
|
apache-2.0
| 22,845
|
[
"PySCF"
] |
b8b13a294a3f1d0baff6078d36bef6b60908147e8ca58fc4e529e4fa5ae8050e
|
"""This demo program solves the incompressible Navier-Stokes equations
on an L-shaped domain using Chorin's splitting method."""
# Copyright (C) 2010-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Mikael Mortensen 2011
#
# First added: 2010-08-30
# Last changed: 2011-06-30
#
# SC14 Paraview's Catalyst tutorial
#
# Step 6 : Add field data arrays to VTK grid
#
# [SC14-Catalyst] we need a python environment that enables import of both Dolfin and ParaView
execfile("simulation-env.py")
# [SC14-Catalyst] import paraview, vtk and paraview's simple API
import sys
import paraview
import paraview.vtk as vtk
import paraview.simple as pvsimple
# [SC14-Catalyst] check for command line arguments
if len(sys.argv) != 3:
print "command is 'python",sys.argv[0],"<script name> <number of time steps>'"
sys.exit(1)
# [SC14-Catalyst] initialize and read input parameters
paraview.options.batch = True
paraview.options.symmetric = True
# [SC14-Catalyst] import user co-processing script
import vtkPVCatalystPython
import os
scriptpath, scriptname = os.path.split(sys.argv[1])
sys.path.append(scriptpath)
if scriptname.endswith(".py"):
print 'script name is ', scriptname
scriptname = scriptname[0:len(scriptname)-3]
try:
cpscript = __import__(scriptname)
except:
print sys.exc_info()
print 'Cannot find ', scriptname, ' -- no coprocessing will be performed.'
sys.exit(1)
# [SC14-Catalyst] Co-Processing routine to be called at the end of each simulation time step
def coProcess(grid, time, step):
# initialize data description
datadescription = vtkPVCatalystPython.vtkCPDataDescription()
datadescription.SetTimeData(time, step)
datadescription.AddInput("input")
cpscript.RequestDataDescription(datadescription)
inputdescription = datadescription.GetInputDescriptionByName("input")
if inputdescription.GetIfGridIsNecessary() == False:
return
if grid != None:
# attach VTK data set to pipeline input
inputdescription.SetGrid(grid)
# execute catalyst processing
cpscript.DoCoProcessing(datadescription)
# [SC14-Catalyst] convert dolfin mesh to a VTK unstructured grid
def Mesh2VTKUGrid(mesh):
vtkcelltypes=((),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_QUAD,vtk.VTK_POLYGON,vtk.VTK_POLYGON),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_TETRA,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_HEXAHEDRON))
npoints=mesh.num_vertices()
geom=mesh.geometry()
pts=vtk.vtkPoints()
pts.SetNumberOfPoints(npoints)
for i in xrange(npoints):
p=geom.point(i)
pts.SetPoint(i,p.x(),p.y(),p.z())
dim = mesh.topology().dim()
ncells=mesh.num_cells()
cells=vtk.vtkCellArray()
cellTypes=vtk.vtkUnsignedCharArray()
cellTypes.SetNumberOfTuples(ncells)
cellLocations=vtk.vtkIdTypeArray()
cellLocations.SetNumberOfTuples(ncells)
loc=0
for (cell,i) in zip(mesh.cells(),xrange(ncells)) :
ncellpoints=len(cell)
cells.InsertNextCell(ncellpoints)
for cpoint in cell:
cells.InsertCellPoint(cpoint)
cellTypes.SetTuple1(i,vtkcelltypes[dim][ncellpoints])
cellLocations.SetTuple1(i,loc)
loc+=1+ncellpoints
ugrid = vtk.vtkUnstructuredGrid()
ugrid.SetPoints(pts)
ugrid.SetCells(cellTypes,cellLocations,cells)
return ugrid
# [SC14-Catalyst] convert a flattened sequence of values to VTK double array
def Values2VTKArray(values,n,name):
ncomps=len(values)/n
array=vtk.vtkDoubleArray()
array.SetNumberOfComponents(ncomps)
array.SetNumberOfTuples(n)
for i in range(n):
a = []
for j in range(ncomps):
a.append(values[i+j*n])
array.SetTupleValue(i, a)
array.SetName(name)
return array
def AddFieldData(ugrid, pointArrays, cellArrays ):
# add Point data fields
npoints = ugrid.GetNumberOfPoints()
for (name,values) in pointArrays:
ugrid.GetPointData().AddArray( Values2VTKArray(values,npoints,name) )
# add Cell data fields
ncells = ugrid.GetNumberOfCells()
for (name,values) in cellArrays:
ugrid.GetCellData().AddArray( Values2VTKArray(values,ncells,name) )
# Begin demo
from dolfin import *
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh(DOLFIN_EXAMPLE_DATA_DIR+"/lshape.xml.gz")
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "Lagrange", 2)
Q = FunctionSpace(mesh, "Lagrange", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),
"on_boundary && \
(x[0] < DOLFIN_EPS | x[1] < DOLFIN_EPS | \
(x[0] > 0.5 - DOLFIN_EPS && x[1] > 0.5 - DOLFIN_EPS))")
inflow = DirichletBC(Q, p_in, "x[1] > 1.0 - DOLFIN_EPS")
outflow = DirichletBC(Q, 0, "x[0] > 1.0 - DOLFIN_EPS")
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
maxtimestep = int(sys.argv[2])
tstep = 0
t = dt
while tstep < maxtimestep:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution [SC14-Catalyst] Not anymore
# plot(p1, title="Pressure", rescale=True)
# plot(u1, title="Velocity", rescale=True)
# Save to file [SC14-Catalyst] Not anymore
# ufile << u1
# pfile << p1
# [SC14-Catalyst] convert solution to VTK grid
ugrid = Mesh2VTKUGrid( u1.function_space().mesh() )
# [SC14-Catalyst] add field data to the VTK grid
velocity = u1.compute_vertex_values()
pressure = p1.compute_vertex_values()
AddFieldData( ugrid, [ ("Velocity",velocity) , ("Pressure",pressure) ] , [] )
# [SC14-Catalyst] trigger catalyst execution
coProcess(ugrid,t,tstep)
# Move to next time step
u0.assign(u1)
t += dt
tstep += 1
print "t =", t, "step =",tstep
# Hold plot [SC14-Catalyst] Not anymore
# interactive()
|
mathstuf/ParaViewCatalystExampleCode
|
PythonDolfinExample/simulation-catalyst-step6.py
|
Python
|
bsd-3-clause
| 8,110
|
[
"ParaView",
"VTK"
] |
c750f04f53ceeae9bb81c55fcdd5a51562e393a5cb89ffa29199078345f5e484
|
import sys
class Node(object):
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
:param buf:
Open IO buffer into which the Node is printed.
:param offset:
Initial offset (amount of leading spaces)
:param _my_node_name:
name of node
"""
lead = ' ' * offset
if _my_node_name is not None:
buf.write(lead + self.__class__.__name__ + ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__ + ' <top>: ')
# if self.__class__ == Number:
# print ": " + self.value
nvlist = [(n, getattr(self, n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
buf.write(attrstr)
buf.write('\n')
for (child_name, child) in self.children():
## print child
child.show(
buf,
offset=offset + 2,
_my_node_name=child_name)
class NodeVisitor(object):
current_parent = None
def __init__(self):
self.current_child = None
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
retval = visitor(node)
return retval
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
oldparent = NodeVisitor.current_parent
NodeVisitor.current_parent = node
for c_name, c in node.children():
self.current_child = c_name
self.visit(c)
NodeVisitor.current_parent = oldparent
class FileAST(Node):
def __init__(self, ext, coord=None):
self.ext = ext
self.coord = coord
def __repr__(self):
return "FileAST(%r)" % self.ext
def children(self):
nodelist = []
for i, child in enumerate(self.ext or []):
nodelist.append(("ext[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Comment(Node):
def __init__(self, value, coord=None):
self.value = value
self.coord = coord
def __repr__(self):
return "Comment(%r)" % self.value
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('value',)
class ArrayInit(Node):
def __init__(self, values, coord=None):
self.values = values
self.coord = coord
def __repr__(self):
return "ArrayInit(%r)" % self.values
def children(self):
nodelist = []
for n in self.values:
nodelist.append(n)
return tuple(nodelist)
attr_names = ()
class Constant(Node):
def __init__(self, value, coord=None):
self.value = value
self.coord = coord
def __repr__(self):
return "Constant(%r)" % self.value
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('value',)
class Increment(Node):
def __init__(self, name, op, coord=None):
self.name = name
self.op = op
self.coord = coord
def __repr__(self):
return "Increment(%r%r)" % (self.name, self.op)
def children(self):
nodelist = []
nodelist.append(("name", self.name))
return tuple(nodelist)
attr_names = ("op",)
class UnaryBefore(Node):
def __init__(self, op, expr, coord=None):
self.op = op
self.expr = expr
self.coord = coord
def __repr__(self):
return "UnaryBefore(%r %r)" % (self.op, self.expr)
def children(self):
nodelist = []
nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ("op",)
class Id(Node):
def __init__(self, name, coord=None):
self.name = name
self.coord = coord
def __repr__(self):
return "Id(%r)" % self.name
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class Include(Node):
def __init__(self, name, coord=None):
self.name = name
self.coord = coord
def __repr__(self):
return "Include(%r)" % self.name
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class TypeId(Node):
def __init__(self, type, name, coord=None):
self.type = type
self.name = name
self.coord = coord
def __repr__(self):
return "TypeId(%r %r)" % (self.type, self.name)
def children(self):
nodelist = [("name", self.name)]
return tuple(nodelist)
attr_names = ('type',)
class ArrayTypeId(Node):
def __init__(self, type, name, subscript, coord=None):
self.type = type
self.name = name
self.subscript = subscript
self.coord = coord
def __repr__(self):
return "ArrayTypeId(%r %r % r)" % (self.type, self.name, self.subscript)
def children(self):
nodelist = [("name", self.name)]
for count, i in enumerate(self.subscript):
nodelist.append(("subscript %r" % count, i))
return tuple(nodelist)
attr_names = ('type',)
class Assignment(Node):
def __init__(self, lval, rval, op='=', coord=None):
self.lval = lval
self.op = op
self.rval = rval
self.coord = coord
def __repr__(self):
return "Assignment(%r %r %r)" % (self.lval, self.op, self.rval)
def children(self):
nodelist = [("lval", self.lval), ("rval", self.rval)]
return tuple(nodelist)
attr_names = ("op",)
# Special class for grouping statements (no newlines in between)
class GroupCompound(Node):
def __init__(self, statements, coord=None):
self.statements = statements
self.coord = coord
def __repr__(self):
return "GroupCompound({%r})" % self.statements
def children(self):
nodelist = []
count = 0
for i in self.statements:
nodelist.append(("stmt[%r]" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class Compound(Node):
def __init__(self, statements, coord=None):
self.statements = statements
self.coord = coord
def __repr__(self):
return "Compound({%r})" % self.statements
def children(self):
nodelist = []
count = 0
for i in self.statements:
nodelist.append(("stmt[%r]" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class ArgList(Node):
def __init__(self, arglist, coord=None):
self.arglist = arglist
self.coord = coord
def __repr__(self):
return "ArgList(%r)" % self.arglist
def children(self):
nodelist = []
count = 0
for i in self.arglist:
nodelist.append(("arglist[%r]" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class ArrayRef(Node):
def __init__(self, name, subscript, coord=None, extra=dict()):
self.name = name
self.subscript = subscript
self.coord = coord
self.extra = extra
def __repr__(self):
return "ArrayRef(%r%r)" % (self.name, self.subscript)
def children(self):
nodelist = [("name", self.name)]
count = 0
for i in self.subscript:
nodelist.append(("subscript %r" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class BinOp(Node):
def __init__(self, lval, op, rval, coord=None):
self.lval = lval
self.rval = rval
self.op = op
self.coord = coord
def __repr__(self):
return "BinOp(%r %r %r)" % (self.lval, self.op, self.rval)
def children(self):
nodelist = []
nodelist.append(("lval", self.lval))
nodelist.append(("rval", self.rval))
return tuple(nodelist)
attr_names = ("op",)
class FuncDecl(Node):
def __init__(self, typeid, arglist, compound, coord=None):
self.typeid = typeid
self.arglist = arglist
self.compound = compound
self.coord = coord
def __repr__(self):
return "FuncDecl(%r %r %r)" % (self.typeid,
self.arglist,
self.compound)
def children(self):
nodelist = []
nodelist.append(("typeid", self.typeid))
nodelist.append(("arglist", self.arglist))
nodelist.append(("compound", self.compound))
return tuple(nodelist)
attr_names = ()
class FuncCall(Node):
def __init__(self, id, arglist, coord=None):
self.id = id
self.arglist = arglist
self.coord = coord
def __repr__(self):
return "FuncCall(%r %r)" % (self.id,
self.arglist)
def children(self):
nodelist = []
nodelist.append(("id", self.id))
nodelist.append(("arglist", self.arglist))
return tuple(nodelist)
attr_names = ()
class FuncCall(Node):
def __init__(self, id, arglist, coord=None):
self.id = id
self.arglist = arglist
self.coord = coord
def __repr__(self):
return "FuncCall(%r %r)" % (self.id,
self.arglist)
def children(self):
nodelist = []
nodelist.append(("id", self.id))
nodelist.append(("arglist", self.arglist))
return tuple(nodelist)
attr_names = ()
class ClassMemberFuncCall(Node):
def __init__(self, classname, name, arglist, coord=None):
self.classname = classname
self.name = name
self.arglist = arglist
self.coord = coord
def __repr__(self):
return "ClassMemberFuncCall(%r %r %r)" % (self.classname, self.name, self.arglist)
def children(self):
nodelist = [("classname", self.classname), ("name", self.name), ("arglist", self.arglist)]
return tuple(nodelist)
attr_names = ()
class ForLoop(Node):
def __init__(self, init, cond, inc, compound, coord=None):
self.init = init
self.cond = cond
self.inc = inc
self.compound = compound
def __repr__(self):
return "\nForLoop(%r, %r, %r, %r) " % (self.init.lval.name,
self.cond,
self.inc,
self.compound
)
def children(self):
nodelist = []
nodelist.append(("init", self.init))
nodelist.append(("cond", self.cond))
nodelist.append(("inc", self.inc))
nodelist.append(("compound", self.compound))
return tuple(nodelist)
attr_names = ()
class IfThen(Node):
def __init__(self, cond, compound_list, coord=None):
self.cond = cond
self.compound = GroupCompound(compound_list)
def __repr__(self):
return "If(%r) then {%r}" % (self.cond,
self.compound)
def children(self):
nodelist = []
nodelist.append(("cond", self.cond))
nodelist.append(("compound", self.compound))
return tuple(nodelist)
attr_names = ()
class IfThenElse(Node):
def __init__(self, cond, compound1_list, compound2_list, coord=None):
self.cond = cond
self.compound1 = GroupCompound(compound1_list)
self.compound2 = GroupCompound(compound2_list)
def __repr__(self):
return "If(%r) then {%r} else {%r}" % (self.cond,
self.compound1,
self.compound2)
def children(self):
nodelist = []
nodelist.append(("cond", self.cond))
nodelist.append(("compoundthen", self.compound1))
nodelist.append(("compoundelse", self.compound2))
return tuple(nodelist)
attr_names = ()
class Return(Node):
def __init__(self, expr):
self.expr = expr
def __repr__(self):
return "Return(%r)" % self.expr
def children(self):
nodelist = [("expr", self.expr)]
return tuple(nodelist)
attr_names = ()
# EXTRAS FOR OPTIMIZATION INFORMATION PURPOSES
class Transpose(Node):
def __init__(self, type, name, base_name, hst_name):
self.type = type
self.name = name
self.base_name = base_name
self.hst_name = hst_name
def __repr__(self):
return "Transpose(%r %r %r %r)" % (self.type, self.name, self.base_name, self.hst_name)
def children(self):
nodelist = [("name", self.name), ("base_name", self.base_name), ("hst_name", self.hst_name)]
return tuple(nodelist)
attr_names = ('type',)
class KernelArgDefine(Node):
def __init__(self, name):
self.name = name
def __repr__(self):
return "KernelArgDefine(%r)" % self.name
def children(self):
nodelist = [("name", self.name)]
return tuple(nodelist)
class Stencil(Node):
def __init__(self, name, local_name, size):
self.name = name
self.local_name = local_name
self.size = size
def __repr__(self):
return "Stencil(%r %r %r)" % (self.name, self.local_name, self.size)
def children(self):
nodelist = [("name", self.name), ("local_name", self.local_name)]
return tuple(nodelist)
attr_names = ('size',)
class Block(Node):
def __init__(self, name, size):
self.name = name
self.size = size
def __repr__(self):
return "Block(%r %r)" % (self.name, self.size)
def children(self):
nodelist = [("name", self.name)]
return tuple(nodelist)
attr_names = ('size',)
class ParDim(Node):
def __init__(self, par_dim):
self.par_dim = par_dim
def __repr__(self):
return "ParDim(%r)" % self.par_dim
def children(self):
return tuple([])
attr_names = ('par_dim',)
class ProgramName(Node):
def __init__(self, name):
self.name = name
def __repr__(self):
return "ProgramName(%r)" % self.name
def children(self):
return tuple([])
attr_names = ('name',)
class RawCpp(Node):
def __init__(self, code):
self.code = code
def __repr__(self):
return "RawCpp(%r)" % self.code
def children(self):
return tuple([])
attr_names = ('code',)
class Type(Node):
def __init__(self, type):
self.type = type
def __repr__(self):
return "Type(%r)" % self.type
def children(self):
return tuple([])
attr_names = ('type',)
class Ref(Node):
def __init__(self, expr):
if isinstance(expr, str):
expr = Id(expr)
self.expr = expr
def __repr__(self):
return "Ref(%r)" % self.expr
def children(self):
nodelist = [("var", self.expr)]
return tuple(nodelist)
attr_names = ()
class Cout(Node):
def __init__(self, print_args):
self.print_args = print_args
def __repr__(self):
return "Cout(%r)" % self.print_args
def children(self):
nodelist = [("print_args", self.print_args)]
return tuple(nodelist)
attr_names = ()
class RunOCLArg(Node):
def __init__(self, ocl_arg):
self.ocl_arg = ocl_arg
def __repr__(self):
return "RunOCLArg(%r)" % self.ocl_arg
def children(self):
nodelist = [("ocl_arg", self.ocl_arg)]
return tuple(nodelist)
attr_names = ()
class CppClass(Node):
def __init__(self, name, var_list, public_list, protected_list, private_list):
self.name = name
self.var_list = var_list
self.public_list = public_list
self.protected_list = protected_list
self.private_list = private_list
def __repr__(self):
return "CppClass(%r\n %r\n %r \n %r)" % self.name, self.var_list, \
self.public_list, self.protected_list, self.private_list
def children(self):
nodelist = [("name", self.name), ("var_list", self.var_list),
("public_list", self.public_list), ("protected_list", self.protected_list),
("private_list", self.private_list)]
return tuple(nodelist)
attr_names = ()
class ClassConstructor(Node):
def __init__(self, name, arglist, compound, coord=None):
self.name = name
self.arglist = arglist
self.compound = compound
self.coord = coord
def __repr__(self):
return "ClassConstructor(%r %r %r)" % (self.name,
self.arglist,
self.compound)
def children(self):
nodelist = []
nodelist.append(("name", self.name))
nodelist.append(("arglist", self.arglist))
nodelist.append(("compound", self.compound))
return tuple(nodelist)
attr_names = ()
|
dikujepsen/OpenTran
|
src/framework/lan/lan_ast.py
|
Python
|
mit
| 17,449
|
[
"VisIt"
] |
c3a6736c3bc6a23afe18341e50cdaf1a335a3e79f27c04e79a0aa4190ade979d
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import division, absolute_import
import numpy as np
import os
from six.moves import zip
from nose.plugins.attrib import attr
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, dec)
from unittest import TestCase
import MDAnalysis as mda
from MDAnalysisTests.datafiles import (PDB, PSF, CRD, DCD,
GRO, XTC, TRR, PDB_small, PDB_closed)
from MDAnalysisTests import parser_not_found, tempdir
class TestChainReader(TestCase):
@dec.skipif(parser_not_found('DCD'),
'DCD parset not available. Are you using python 3?')
def setUp(self):
self.universe = mda.Universe(PSF,
[DCD, CRD, DCD, CRD, DCD, CRD, CRD])
self.trajectory = self.universe.trajectory
self.prec = 3
# dummy output DCD file
self.tmpdir = tempdir.TempDir()
self.outfile = os.path.join(self.tmpdir.name, 'chain-reader.dcd')
def tearDown(self):
try:
os.unlink(self.outfile)
except OSError:
pass
del self.universe
del self.tmpdir
def test_next_trajectory(self):
self.trajectory.rewind()
self.trajectory.next()
assert_equal(self.trajectory.ts.frame, 1, "loading frame 2")
def test_n_atoms(self):
assert_equal(self.universe.trajectory.n_atoms, 3341,
"wrong number of atoms")
def test_n_frames(self):
assert_equal(self.universe.trajectory.n_frames, 3 * 98 + 4,
"wrong number of frames in chained dcd")
def test_iteration(self):
for ts in self.trajectory:
pass # just forward to last frame
assert_equal(
self.trajectory.n_frames - 1, ts.frame,
"iteration yielded wrong number of frames ({0:d}), "
"should be {1:d}".format(ts.frame, self.trajectory.n_frames))
def test_jump_lastframe_trajectory(self):
self.trajectory[-1]
assert_equal(self.trajectory.ts.frame + 1, self.trajectory.n_frames,
"indexing last frame with trajectory[-1]")
def test_slice_trajectory(self):
frames = [ts.frame for ts in self.trajectory[5:17:3]]
assert_equal(frames, [5, 8, 11, 14], "slicing dcd [5:17:3]")
def test_full_slice(self):
trj_iter = self.universe.trajectory[:]
frames = [ts.frame for ts in trj_iter]
assert_equal(frames, np.arange(self.universe.trajectory.n_frames))
def test_frame_numbering(self):
self.trajectory[98] # index is 0-based and frames are 0-based
assert_equal(self.universe.trajectory.frame, 98, "wrong frame number")
def test_frame(self):
self.trajectory[0]
coord0 = self.universe.atoms.positions.copy()
# forward to frame where we repeat original dcd again:
# dcd:0..97 crd:98 dcd:99..196
self.trajectory[99]
assert_array_equal(
self.universe.atoms.positions, coord0,
"coordinates at frame 1 and 100 should be the same!")
def test_time(self):
self.trajectory[30] # index and frames 0-based
assert_almost_equal(self.universe.trajectory.time,
30.0,
5,
err_msg="Wrong time of frame")
@dec.slow
def test_write_dcd(self):
"""test that ChainReader written dcd (containing crds) is correct
(Issue 81)"""
with mda.Writer(self.outfile, self.universe.atoms.n_atoms) as W:
for ts in self.universe.trajectory:
W.write(self.universe)
self.universe.trajectory.rewind()
u = mda.Universe(PSF, self.outfile)
for (ts_orig, ts_new) in zip(self.universe.trajectory,
u.trajectory):
assert_almost_equal(
ts_orig._pos,
ts_new._pos,
self.prec,
err_msg="Coordinates disagree at frame {0:d}".format(ts_orig.frame))
class TestChainReaderCommonDt(TestCase):
@dec.skipif(parser_not_found('DCD'),
'DCD parset not available. Are you using python 3?')
def setUp(self):
self.common_dt = 100.0
self.universe = mda.Universe(PSF,
[DCD, CRD, DCD, CRD, DCD, CRD, CRD],
dt=self.common_dt)
self.trajectory = self.universe.trajectory
self.prec = 3
def test_time(self):
# We test this for the beginning, middle and end of the trajectory.
for frame_n in (0, self.trajectory.n_frames // 2, -1):
self.trajectory[frame_n]
assert_almost_equal(self.trajectory.time,
self.trajectory.frame*self.common_dt,
5,
err_msg="Wrong time for frame {0:d}".format(frame_n) )
class TestChainReaderFormats(TestCase):
"""Test of ChainReader with explicit formats (Issue 76)."""
@staticmethod
@attr('issue')
def test_set_all_format_tuples():
universe = mda.Universe(GRO, [(PDB, 'pdb'), (XTC, 'xtc'),
(TRR, 'trr')])
assert_equal(universe.trajectory.n_frames, 21)
@staticmethod
@attr('issue')
@dec.skipif(parser_not_found('DCD'),
'DCD parset not available. Are you using python 3?')
def test_set_one_format_tuple():
universe = mda.Universe(PSF, [(PDB_small, 'pdb'), DCD])
assert_equal(universe.trajectory.n_frames, 99)
@staticmethod
@attr('issue')
def test_set_all_formats():
universe = mda.Universe(PSF, [PDB_small, PDB_closed], format='pdb')
assert_equal(universe.trajectory.n_frames, 2)
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_chainreader.py
|
Python
|
gpl-2.0
| 6,872
|
[
"MDAnalysis"
] |
61b75731a66a12edca907d66e3819da093f04793972071924e448e10edc0d2e2
|
#
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.7.10, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
#
# The example can be run by executing: `ipython tsne.py`
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print("Computing pairwise distances...")
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print("Computing P-values for point %s of %s"%(i,n))
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print("Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta)))
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print("Preprocessing the data using PCA...")
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0, iteration=1000):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print("Error: array X should have type float.")
return -1;
if round(no_dims) != no_dims:
print("Error: number of dimensions should be an integer.")
return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = iteration;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print("Iteration %s, error is %s"%((iter + 1),C))
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print("Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset.")
print("Running example on 2,500 MNIST digits...")
X = Math.loadtxt("mnist2500_X.txt");
labels = Math.loadtxt("mnist2500_labels.txt");
Y = tsne(X, 2, 50, 20.0);
Plot.scatter(Y[:,0], Y[:,1], 20, labels);
Plot.show();
|
dedert/Brand2Vec
|
tsne.py
|
Python
|
mit
| 5,216
|
[
"Gaussian"
] |
f8bf19e0bc13a86762d5513b57c04216baab594a4d9d43f4466b79d1ebf77cd3
|
# Author: Artem Pulkin
"""
This and other `proxy` modules implement the time-dependent mean-field procedure using the existing pyscf
implementations as a black box. The main purpose of these modules is to overcome the existing limitations in pyscf
(i.e. real-only orbitals, davidson diagonalizer, incomplete Bloch space, etc). The primary performance drawback is that,
unlike the original pyscf routines with an implicit construction of the eigenvalue problem, these modules construct TD
matrices explicitly by proxying to pyscf density response routines with a O(N^4) complexity scaling. As a result,
regular `numpy.linalg.eig` can be used to retrieve TD roots. Several variants of proxy-TD are available:
* `pyscf.tdscf.proxy`: the molecular implementation;
* `pyscf.pbc.tdscf.proxy`: PBC (periodic boundary condition) Gamma-point-only implementation;
* `pyscf.pbc.tdscf.kproxy_supercell`: PBC implementation constructing supercells. Works with an arbitrary number of
k-points but has an overhead due to ignoring the momentum conservation law. In addition, works only with
time reversal invariant (TRI) models: i.e. the k-point grid has to be aligned and contain at least one TRI momentum.
* (this module) `pyscf.pbc.tdscf.kproxy`: same as the above but respect the momentum conservation and, thus, diagonlizes smaller
matrices (the performance gain is the total number of k-points in the model).
"""
# Convention for these modules:
# * PhysERI is the proxying class constructing time-dependent matrices
# * vector_to_amplitudes reshapes and normalizes the solution
# * TDProxy provides a container
from functools import reduce
from pyscf.pbc.tdscf import kproxy_supercell, krhf_slow
import numpy
def kov2ov(nocc, nmo, k):
"""
Converts k point pairs into ov mask.
Args:
nocc (Iterable): occupation numbers per k-point;
nmo (Iterable): numbers of orbitals per k-point;
k (ndarray): k-point pairs;
Returns:
An ov-mask. Basis order: [k_o, o, k_v, v].
"""
nocc = numpy.asanyarray(nocc)
nmo = numpy.asanyarray(nmo)
nvirt = nmo - nocc
mask = numpy.zeros((sum(nocc), sum(nvirt)), dtype=bool)
o_e = numpy.cumsum(nocc)
o_s = o_e - o_e[0]
v_e = numpy.cumsum(nvirt)
v_s = v_e - v_e[0]
for k1, k2 in enumerate(k):
mask[o_s[k1]:o_e[k1], v_s[k2]:v_e[k2]] = True
return mask.reshape(-1)
class PhysERI(kproxy_supercell.PhysERI):
def __init__(self, model, proxy, x, mf_constructor, frozen=None, **kwargs):
"""
A proxy class for calculating the TD matrix blocks (k-point version).
Args:
model: the base model with a time reversal-invariant k-point grid;
proxy: a pyscf proxy with TD response function, one of 'hf', 'dft';
x (Iterable): the original k-grid dimensions (numbers of k-points per each axis);
mf_constructor (Callable): a function constructing the mean-field object;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
**kwargs: arguments to `k2s` function constructing supercells;
"""
super(PhysERI, self).__init__(model, proxy, x, mf_constructor, frozen=frozen, **kwargs)
def get_ov_space_mask(self):
"""
Prepares the space mask in the ov form.
Returns:
The mask in the ov form.
"""
return kproxy_supercell.orb2ov(numpy.concatenate(self.space), self.nocc_full, self.nmo_full)
def kov2ov(self, k):
"""
Converts k-ov mask into ov mask.
Args:
k (ndarray): k-point pairs;
Returns:
An ov-mask. Basis order: [k_o, o, k_v, v].
"""
mask = self.get_ov_space_mask()
return numpy.logical_and(mask, kov2ov(self.nocc_full, self.nmo_full, k))
def proxy_response_ov_batch(self, k_row, k_col):
"""
A raw response submatrix corresponding to specific k-points.
Args:
k_row (ndarray): sets of k-point pairs (row index);
k_col (ndarray): sets of k-point pairs (column index);
Returns:
A raw response matrix.
"""
masks_row = tuple(self.kov2ov(i) for i in k_row)
masks_col = tuple(self.kov2ov(i) for i in k_col)
full_mask_row = reduce(numpy.logical_or, masks_row)
full_mask_col = reduce(numpy.logical_or, masks_col)
big = kproxy_supercell.supercell_response_ov(
self.proxy_vind,
(full_mask_row, full_mask_col),
self.nocc_full,
self.nmo_full,
self.proxy_is_double(),
self.model_super.supercell_inv_rotation,
self.model,
)
result = []
for m_row, m_col in zip(masks_row, masks_col):
m_row_red = m_row[full_mask_row]
m_col_red = m_col[full_mask_col]
result.append(tuple(i[m_row_red][:, m_col_red] for i in big))
return tuple(result)
# This is needed for krhf_slow.get_block_k_ix
get_k_ix = krhf_slow.PhysERI.get_k_ix.im_func
def tdhf_primary_form(self, k):
"""
A primary form of TD matrices (full).
Args:
k (tuple, int): momentum transfer: either a pair of k-point indexes specifying the momentum transfer
vector or a single integer with the second index assuming the first index being zero;
Returns:
Output type: "full", and the corresponding matrix.
"""
r1, r2, c1, c2 = krhf_slow.get_block_k_ix(self, k)
(a, _), (_, b), (_, b_star), (a_star, _) = self.proxy_response_ov_batch((r1, r1, r2, r2), (c1, c2, c1, c2))
return "full", numpy.block([[a, b], [-b_star.conj(), -a_star.conj()]])
vector_to_amplitudes = krhf_slow.vector_to_amplitudes
class TDProxy(kproxy_supercell.TDProxy):
v2a = staticmethod(vector_to_amplitudes)
proxy_eri = PhysERI
def __init__(self, mf, proxy, x, mf_constructor, frozen=None, **kwargs):
"""
Performs TD calculation. Roots and eigenvectors are stored in `self.e`, `self.xy`.
Args:
mf: the base model with a time-reversal invariant k-point grid;
proxy: a pyscf proxy with TD response function, one of 'hf', 'dft';
x (Iterable): the original k-grid dimensions (numbers of k-points per each axis);
mf_constructor (Callable): a function constructing the mean-field object;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
**kwargs: arguments to `k2s` function constructing supercells;
"""
super(TDProxy, self).__init__(mf, proxy, x, mf_constructor, frozen=frozen, **kwargs)
self.e = {}
self.xy = {}
def kernel(self, k=None):
"""
Calculates eigenstates and eigenvalues of the TDHF problem.
Args:
k (tuple, int): momentum transfer: either an index specifying the momentum transfer or a list of such
indexes;
Returns:
Positive eigenvalues and eigenvectors.
"""
if k is None:
k = numpy.arange(len(self._scf.kpts))
if isinstance(k, int):
k = [k]
for kk in k:
self.e[kk], self.xy[kk] = self.__kernel__(k=kk)
return self.e, self.xy
|
gkc1000/pyscf
|
pyscf/pbc/tdscf/kproxy.py
|
Python
|
apache-2.0
| 7,386
|
[
"PySCF"
] |
aaa19fc95b214cb7ca682445a7c3bdc67ec148dd4f7ad24e73fdd87ddbb45b60
|
import os
import pickle
import pylab as pl
from operator import itemgetter
import netcdf
import numpy as np
import sys
from operator import mul
from ncdftools import nccopydimension
from Scientific.IO import NetCDF
from array import array
import struct
def nctypecode(dtype):
# purose: netcdf-typecode from array-dtype
if ((dtype == np.dtype('float32')) or (np.dtype == 'float32')):
return 'f'
elif ((dtype == np.dtype('float64')) or (np.dtype == 'float64')):
return 'd'
elif ((dtype == np.dtype('int32')) or (np.dtype == 'int32')):
return 'i'
elif ((dtype == np.dtype('int64')) or (np.dtype == 'int64')):
return 'l'
class SomeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def ncvartypeoffset(ncfile,var):
""" purpose: get binary data type and offset of a variable in netcdf file
unfortunately, getting these properties are not explicitely implemented in scipy, but most of this code is stolen from scipy: /usr/lib/python2.7/dist-packages/scipy/io/netcdf.py
ncfile is a scipy.io.netcdf.netcdf_file
var variable we want to calculate the offset from
"""
oripos=ncfile.fp.tell()
ncfile.fp.seek(0)
magic = ncfile.fp.read(3)
ncfile.__dict__['version_byte'] = np.fromstring(ncfile.fp.read(1), '>b')[0]
# Read file headers and set data.
ncfile._read_numrecs()
ncfile._read_dim_array()
ncfile._read_gatt_array()
header = ncfile.fp.read(4)
count = ncfile._unpack_int()
vars = []
for ic in range(count):
vars.append(list(ncfile._read_var()))
ivar = np.where(np.array(vars) == var)[0][0]
ncfile.fp.seek(oripos)
return vars[ivar][6] , vars[ivar][7]
def rwicecube(filestream,shp,refiter,dimiter,dimpos,refnoiter,dimnoiter,icecube,vtype,vsize,voffset,rwchsize,mode):
"""
read or write data icecube from binary data and put it in an array
filestream: binary file reference
shp: shape of the filestream
refiter: reference to dimensions over which no slice is performed
pos: current index position of the non-sliced dimensions
"""
# e.g. shp = (200,100,50,50,20)
# refiter = (1,3,4)
# dimpos = (5,10,9)
# extend so that structured arrays are read at once
lennoiter = long(1)
for irefnoiter,erefnoiter in enumerate(refnoiter):
lennoiter = lennoiter*dimnoiter[irefnoiter]
fpos = 0
# e.g. fpos = (9)+ 20*(10) + 50*50*20*(5)
for idimpos,edimpos in enumerate(dimpos):
curadd = np.mod(edimpos,dimiter[idimpos])
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
# exclude trivial special case of only 1 iteration step
# --> in that case fpos is just zero.
if refiter != [-1]:
if ((refiter[idimpos] + 1) < len(shp)):
for i in range(refiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fpos = fpos + curadd
# Initialize (for reading) or prepare (for writing) icecube array
if mode == 'read':
icecube = np.zeros((lennoiter,),dtype=vtype)*np.nan
elif mode == 'write':
icecube = np.reshape(icecube,(lennoiter,))
dimnoiterpos = [0]*len(dimnoiter)
# print icecube,dimnoiterpos
j = 0
while j < lennoiter:
fposicecube = fpos
for idimpos,edimpos in enumerate(dimnoiterpos):
curadd = np.mod(edimpos,dimnoiter[idimpos])
# e.g. fposicecube = (1)*52
# e.g. fposicecube = (9)+ 20*(10) + 50*50*20*(5)
if ((refnoiter[idimpos] + 1) < len(shp)):
for i in range(refnoiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fposicecube = fposicecube + curadd
if mode == 'read':
filestream.seek(voffset+vsize*fposicecube)
temp = np.fromfile(filestream,dtype='='+vtype[1],count=rwchsize)
temp.byteswap(True)
icecube[j:(j+rwchsize)] = temp
elif mode == 'write':
filestream.seek(voffset+vsize*fposicecube)
fpointout.seek(voffset+vsize*fposicecube)
# filestream.seek(voffset+vsize*fposicecube)
testdata[fposicecube:(fposicecube+rwchsize)] = np.array(icecube[j:(j+rwchsize)],dtype=vtype[1])
# little = struct.pack('>'+'d'*len(icecube[j:(j+rwchsize)]), *icecube[j:(j+rwchsize)])
# # Seek to offset based on piece index
# #print little
# filestream.write(little)
# filestream.write(np.array(icecube[j:(j+rwchsize)],dtype=vtype))
# # np.array(icecube[j:(j+rwchsize)],dtype=vtype[1]).byteswap().tofile(filestream)
temp = np.array(icecube[j:(j+rwchsize)],dtype='>d')
filestream.write(temp)
fpointout.write(temp)
# # print temp
# # filestream.write(temp[:])
# # little = struct.pack('<'+'B'*len(temp), *temp)
# # print icecube.byteswap().dtype
# # print voffset, vsize, fposicecube, vtype, rwchsize, icecube.dtype# ,icecube[j:(j+rwchsize)]
# go to next data strip
if dimnoiterpos != []:
# rwchsize: allow reading of chunks for the inner dimensions
dimnoiterpos[-1] = dimnoiterpos[-1] + rwchsize
for idimidx,edimidx in enumerate(reversed(dimnoiterpos)):
if idimidx > 0:
while dimnoiterpos[idimidx] >= dimnoiter[idimidx]:
dimnoiterpos[idimidx-1] = dimnoiterpos[idimidx-1] + 1
dimnoiterpos[idimidx] -= dimnoiter[idimidx]
j = j+rwchsize
icecube.shape = dimnoiter
if mode == 'read':
return icecube
def writeicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,data,vtype,vsize,voffset,rwchsize):
"""
write an icecube and perform an in-memory Post Swap of dimensions before (very fast)
hereby, we acquire the order of the icecube dimensions
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,np.transpose(data,trns),vtype,vsize,voffset,rwchsize,'write')
def readicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,vtype,vsize,voffset,rwchsize):
"""
read an icecube by sorting the indices (highest at the back).
perform an in-memory Post Swap of dimensions (very fast) to compensate for the sorting.
we allow reading in chunks according to the inner dimensions. They will be mostly there because we allow an max-icecubesize
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
icecube =rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,None,vtype,vsize,voffset,rwchsize,'read')
# build the 'inverse permutation' operator for tranposition before writeout
inv = range(len(trns))
for itrns, etrns in enumerate(trns):
inv[etrns] = itrns
return np.transpose(icecube,inv)
fnin = '/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf.nc'
print fnin
# fobjin = open(fnin,'rb')
fin = NetCDF.NetCDFFile(fnin,'r')
fnout = '/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf2.nc'
os.system('rm '+fnout)
print fnout
# fobjout = open(fnout,'wb+')
fout = NetCDF.NetCDFFile(fnout,'w')
fnpointout = '/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf4.nc'
os.system('rm '+fnpointout)
print fnpointout
# fobjout = open(fnpointout,'wb+')
fpointout = open(fnpointout,'w')
# we kunnen eens proberen om een variabele aan te maken met een vooraf gespecifieerde dimensie!
datin = [[fin,'QV'],[fin,'rlat']]
datout = [[fout,'QV'],[fout,'TEST']]
# adtypeoutspec = [None,None] # to be obtained automatically from the data output stream (if it already exists)
# selection of function dimension input
func = lambda x, y: (np.array([[[np.mean(x)]],[[np.mean(x)]]],dtype=np.float) , np.array([[[np.mean(x)]],[[np.mean(x)]]],dtype=np.float)) # *(1.+np.zeros(x.shape))
dnamsel = ('rlon','time','t')
# obtain definitions of the variable stream input
vsdin = [] # input variable stream definitions
for idatin,edatin in enumerate(datin):
# read in scipy.netcdf mode to obtain varariable offsets
# obtain file name from open netcdf!! very nasty!!!
ncfn = str(datin[idatin][0])[19:(str(datin[idatin][0]).index("'",19))]
nctemp = netcdf.netcdf_file(ncfn,'r')
# nctemp = datin[idatin][0]
vsdin.append(dict())
vsdin[idatin]['dnams'] = []
for idim,edim in enumerate(nctemp.variables[datin[idatin][1]].dimensions):
vsdin[idatin]['dnams'].append(str(edim))
vsdin[idatin]['dims'] = list(nctemp.variables[datin[idatin][1]].shape)
vsdin[idatin]['itemsize'] = nctemp.variables[datin[idatin][1]].itemsize()
vsdin[idatin]['dtype'] = nctemp.variables[datin[idatin][1]]._dtype
vsdin[idatin]['voffset'] = nctemp.variables[datin[idatin][1]]._voffset
nctemp.close()
# obtain definitions of the variable stream output
vsdout = [] # input variable stream definitions
for idatout,edatout in enumerate(datout):
vsdout.append(dict())
if edatout[1] in edatout[0].variables:
vsdout[idatout]['dnams'] = []
for idim,edim in enumerate(datout[idatout][0].variables[datout[idatout][1]].dimensions):
vsdout[idatout]['dnams'].append(str(edim))
vsdout[idatout]['dims'] = list(datout[idatout][0].variables[datout[idatout][1]].shape)
vsdout[idatout]['itemsize'] = datout[idatout][0].variables[datout[idatout][1]].itemsize()
vsdout[idatout]['dtype']= datout[idatout][0].variables[datout[idatout][1]]._dtype
vsdout[idatout]['voffset'] = datout[idatout][0].variables[datout[idatout][1]]._voffset
else:
# the variable doesn't exists (we will create it afterwards)
vsdout[idatout]['dnams'] = None
vsdout[idatout]['dims'] = None
vsdout[idatout]['itemsize'] = None
vsdout[idatout]['dtype'] = None
# collecting the involved dimensions (will be considered as the standard output dimensions)
dnamsstd = [] # standard output dimensions: list of all output dimensions: this is collected from the input dimensions, the output dimensions and the selected/processed dimensions
dimsstd = [] # maximum length of an output dimension
idimsstd = 0
for ivsdin,evsdin in enumerate(vsdin):
dnaminlast = None
index = 0
for idnam,ednam in reversed(list(enumerate(evsdin['dnams']))):
if ednam not in dnamsstd:
# In dnamsstd, ednam should be just after the dimensions preceding ednams in dnams
# # actually, we also want that, in dnamsstd, ednam should be just before the dimensions succeeding ednams in dnams. Sometimes, this is not possible at the same time. But it will be the case if that is possible when applying one of the criteria
index = 0
# print 'dnamsstd: ', evsdin,dnamsstd
for idnam2,ednam2 in enumerate(dnamsstd):
# print ednam,ednam2,idnam2,evsdin['dnams'][0:idnam2+1]
if ednam2 in evsdin['dnams'][0:(idnam+1)]:
# print index
index = max(index,dnamsstd.index(ednam2) + 1)
dnamsstd.insert(index,ednam)
if ednam not in dnamsel:
dimsstd.insert(index,int(vsdin[ivsdin]['dims'][idnam]))
else:
# In this case, wait for assigning the output dimensions. This actually depends on the specified function
dimsstd.insert(index,None)
else:
if ((vsdin[ivsdin]['dims'][idnam] != 1) & (dimsstd[dnamsstd.index(ednam)] != 1) & \
# we allow non-equal dimension lengths, as long as the dimension is covered/captured by the function
# maybe still allow non-equal dimension length not covered by the function????
(dimsstd[dnamsstd.index(ednam)] != None) & \
(vsdin[ivsdin]['dims'][idnam] != dimsstd[dnamsstd.index(ednam)])):
raise SomeError("The corresponding output dnamensions (index: "+str(dnamsstd.index(ednam))+") of the input variable "+str(ivsdin)+ " "+ str(idnam)+ " "+" have a different length and not equal to 1.")
else:
# None means it's considered by the function
if (dimsstd[dnamsstd.index(ednam)] != None):
dimsstd[dnamsstd.index(ednam)] = max(dimsstd[dnamsstd.index(ednam)],vsdin[ivsdin]['dims'][idnam])
print 'Preliminary output dimensions: ', zip(dnamsstd,dimsstd)
idnam = 0
# add the missing dimensions selected for the function
for idnamsel,ednamsel in enumerate(dnamsel):
if ednamsel not in dnamsstd:
dnamsstd.insert(idnam,ednamsel)
dimsstd.insert(idnam,None) # to be defined from the function
idnam = idnam+1 # moet dit ook hier niet boven geimplementeerd worden?
else:
idnam = dnamsstd.index(ednam)+1
# adimsstd: list the specific output dimensions
# if function dimension: data output dimension should be the same as the function output dimension, but this should be checked afterwards.
# if not function dimension:
# # look what's the output dimension like. If the dimension is not in the output variable, we add a dummy 1-dimension
# we need to create/list adimsstd also before!! And then append them with the missing dimensions, as dummy 1-dimensions. If that is not sufficient, we will just get an error message.
# get references to the standard output dimensions on which the function is applied
refdfuncstd = []
for idnamsel,ednamsel in enumerate(dnamsel):
refdfuncstd.append(dnamsstd.index(ednamsel))
# all output dimensions are now collected...
# add the standard output dimensions that are missing in each seperate input variable as a dummy 1-dimension
for ivsdin,evsdin in enumerate(vsdin):
idnam = 0
for idnamsstd,ednamsstd in enumerate(dnamsstd):
if ednamsstd not in vsdin[ivsdin]['dnams']:
vsdin[ivsdin]['dnams'].insert(idnam,ednamsstd)
vsdin[ivsdin]['dims'].insert(idnam,1)
idnam = idnam + 1
else:
idnam = vsdin[ivsdin]['dnams'].index(ednamsstd) + 1
# do the same for the data output variables
# # vsdin[ivsdin]['refdstd']: references of data stream dimensions (vsdin[..]['dnams'] to the standard dimensions (dnamsstd)
for ivsdin,evsdin in enumerate(vsdin):
vsdin[ivsdin]['refdstd']= list([])
for idim,edim in enumerate(vsdin[ivsdin]['dnams']):
vsdin[ivsdin]['refdstd'].append(dnamsstd.index(edim))
for ivsdout,evsdout in enumerate(vsdout):
if vsdout[ivsdout]['dnams'] == None:
vsdout[ivsdout]['dnams'] = dnamsstd
# adimfuncin: the input dimensions of the function based on the refdfuncstd
# adimfuncin: the dimensions of the function input
adimfuncin = np.zeros((len(vsdin),len(refdfuncstd)),dtype='int32') - 1
alenfuncin = []
for ivsdout in range(len(vsdout)):
if vsdout[ivsdout]['dnams'] == None:
vsdout[ivsdout]['dnams'] == dnamsstd
# vsdout[..]['refdstd']: references of data stream dimensions (vsdout[..]['dnams'] to the standard dimensions (dnamsstd)
for ivsdout,evsdout in enumerate(vsdout):
vsdout[ivsdout]['refdstd'] = list([])
for idim,edim in enumerate(vsdout[ivsdout]['dnams']):
vsdout[ivsdout]['refdstd'].append(dnamsstd.index(edim))
# arefdfuncout: references of the function dimensions to the data output stream dimensions
arefdfuncout = []
for ivsdout,evsdout in enumerate(vsdout):
arefdfuncout.append([])
for idnamsel,ednamsel in enumerate(dnamsel):
arefdfuncout[ivsdout].append(vsdout[ivsdout]['dnams'].index(ednamsel))
# is arefdfuncout[ivsdout][irefdfuncout] == vsdout[ivsdout]['refdstd'].index(erefdfuncstd) ???
# arefdfuncin: references of the function dimensions to the data input stream dimensions
arefdfuncin = []
for ivsdin,evsdin in enumerate(vsdin):
arefdfuncin.append([])
for idnamsel,ednamsel in enumerate(dnamsel):
arefdfuncin[ivsdin].append(vsdin[ivsdin]['dnams'].index(ednamsel))
# to do next:::...
for ivsdin,evsdin in enumerate(vsdin):
for irefdfuncstd,erefdfuncstd in enumerate(refdfuncstd):
adimfuncin[ivsdin,irefdfuncstd] = evsdin['dims'][vsdin[ivsdin]['refdstd'].index(erefdfuncstd)]
alenfuncin.append(reduce(mul,adimfuncin[ivsdin]))
# 'probe' function output dimensions
dummydat = []
for ivsdin,evsdin in enumerate(vsdin):
dummydat.append(np.zeros(adimfuncin[ivsdin]))
ddout = func(*dummydat)
if (type(ddout).__name__ == 'tuple'):
ddout = list(ddout)
if (type(ddout).__name__ != 'list'):
ddout = list([ddout])
# obtain output data type. If not specified, we obtain it from the function output.
# meanwhile, check whether the number of input dimensions are the same as the number of output dimensions.
if len(ddout) != len(vsdout):
raise SomeError('the amount of output variables in from '+ str(func) + ' ('+str(len(ddout))+') is not the same as specified ('+str(len(vsdout))+')')
for iddout in range(len(ddout)):
if type(ddout[iddout] ) != np.ndarray:
ddout[iddout] = np.array(ddout[iddout])
if (len(np.array(ddout[iddout]).shape) != len(adimfuncin[iddout])):
raise SomeError('The amount of input ('+str(len(adimfuncin[iddout]))+') and output dimensions ('+str(len(ddout[iddout].shape))+') of function is not the same')
if vsdout[iddout]['dims'] == None:
vsdout[iddout]['dims'] = dimsstd
# overwrite dimensions with the function output dimensions
for irefdfuncout,erefdfuncout in enumerate(arefdfuncout[iddout]):
vsdout[iddout]['dims'][erefdfuncout] = ddout[iddout].shape[irefdfuncout]
if vsdout[iddout]['dtype'] == None:
# output netcdf variable does not exist... creating
# why does this needs to be little endian????
vsdout[iddout]['dtype'] = '>'+nctypecode(ddout[iddout].dtype)
# try to copy dimension from data input
for idim,edim in enumerate(vsdout[iddout]['dnams']):
if edim not in datout[iddout][0].dimensions:
dimensionfound = False
idatin = 0
# try to copy the dimension from the input data
while ((not dimensionfound) & (idatin < (len(datin) ))):
if edim in datin[idatin][0].dimensions:
if (vsdout[iddout]['dims'][idim] == datin[idatin][0].dimensions[edim]):
print datin[idatin][0],datout[iddout][0], edim
nccopydimension(datin[idatin][0],datout[iddout][0], edim)
dimensionfound = True
idatin = idatin + 1
if dimensionfound == False:
datout[iddout][0].createDimension(edim,vsdout[iddout]['dims'][idim])
datout[iddout][0].createVariable(datout[iddout][1],vsdout[iddout]['dtype'][1],tuple(vsdout[iddout]['dnams']))
# we should check this at the time the dimensions are not created
if (vsdout[iddout]['dims'] != list(datout[iddout][0].variables[datout[iddout][1]].shape)):
raise SomeError("dimensions of output file ( "+str(vsdout[iddout]['dims'])+"; "+ str(vsdout[iddout]['dnams'])+") do not correspond with intended output dimension "+str(datout[iddout][0].variables[datout[iddout][1]].shape)+"; "+str(datout[iddout][0].variables[datout[iddout][1]].dimensions))
for idatin,edatin in enumerate(datin):
# obtain file pointer!! very nasty!!
ncfn = str(datin[idatin][0])[19:(str(datin[idatin][0]).index("'",19))]
vsdin[idatin]['fp'] = open(ncfn,'r')
for idatout,edatout in enumerate(datout):
# obtain file pointer!! very nasty!!
datout[idatout][0].flush()
ncfn = str(datout[idatout][0])[19:(str(datout[idatout][0]).index("'",19))]
vsdout[idatout]['fp'] = open(ncfn,'r+')
# in order to discover variable offsets
nctemp = netcdf.netcdf_file(ncfn,'r')
vsdout[idatout]['itemsize'] = nctemp.variables[datout[idatout][1]].itemsize()
vsdout[idatout]['voffset'] = nctemp.variables[datout[idatout][1]]._voffset
nctemp.close()
# # next: check whether the output variable dimensions (if already present) are not too large, otherwise raise error. + Construct final output dimension specs
# to do next:::...
# adimfuncout: the dimensions of the function output
adimfuncout = np.zeros((len(vsdout),len(refdfuncstd)),dtype='int32') - 1
alenfuncout = []
for ivsdout,evsdout in enumerate(vsdout):
for irefdfuncstd,erefdfuncstd in enumerate(refdfuncstd):
adimfuncout[ivsdout,irefdfuncstd] = evsdout['dims'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)]
# # or ...
# for irefdfuncout,erefdfuncout in enumerate(arefdfuncout[ivsdout]):
# adimfuncout[ivsdout,irefdfuncstd] = evsdout['dims'][erefdfuncout]
alenfuncout.append(reduce(mul,adimfuncout[ivsdout]))
# ???arefdfuncout[ivsdout][irefdfuncout] == vsdout[ivsdout]['refdstd'].index(erefdfuncstd)
# make copies of adimfunc*, alenfunc*, arefdfunc*
# lennoiterstd = list(lenfuncstd)
# dimnoiterstd = list(dimdfuncstd)
refdnoiterstd = list(refdfuncstd)
alendnoiterin = list(alenfuncin)
adimnoiterin = []
arefdnoiterin = []
for ivsdin,evsdin in enumerate(vsdin):
adimnoiterin.append(list(adimfuncin[ivsdin]))
arefdnoiterin.append(list(arefdfuncin[ivsdin]))
alendnoiterout = list(alenfuncout)
adimnoiterout = []
arefdnoiterout = []
for ivsdout,evsdout in enumerate(vsdout):
adimnoiterout.append(list(adimfuncout[ivsdout]))
arefdnoiterout.append(list(arefdfuncout[ivsdout]))
# arefsin: references of the standard dimensions to the data stream dimensions
arefsin = []
for ivsdin,evsdin in enumerate(vsdin):
arefsin.append([None]*len(vsdin[ivsdin]['refdstd']))
# loop over the data stream dimensions
for irefdstd,erefdstd in enumerate(vsdin[ivsdin]['refdstd']):
arefsin[ivsdin][erefdstd] = irefdstd
# arefsout: references of the standard dimensions to the data stream dimensions
arefsout = []
for ivsdout,evsdout in enumerate(vsdout):
arefsout.append([None]*len(vsdout[ivsdout]['refdstd']))
# loop over the data stream dimensions
for irefdstd,erefdstd in enumerate(vsdout[ivsdout]['refdstd']):
arefsout[ivsdout][erefdstd] = irefdstd
dnamselnoiter = list(dnamsel)
# membytes: minimum total memory that will be used. We will the increase usage when possible/allowed.
membytes = 0
for ivsdin,evsdin in enumerate(vsdin):
membytes = membytes + alenfuncin[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
membytes = membytes + alenfuncout[ivsdout] * vsdout[ivsdout]['itemsize']
maxmembytes = 1000000
if membytes > maxmembytes:
print 'Warning, used memory ('+str(membytes)+') exceeds maximum memory ('+str(maxmembytes)+').'
else:
# a temporary copy of alennoiter*
alendnoiterin_tmp = list(alendnoiterin)
alendnoiterout_tmp = list(alendnoiterout)
# we try will to read the data in even larger icecubes to reduce disk access!
idnam = len(dnamsstd) - 1
cont = True
while ((idnam >= 0) & (membytes <= maxmembytes) & cont):
# while loop quite extensive but does what is should-> should be reduced and simplified
cont = False # only continue to the next loop if idnam+1 (in previous loop) was (inserted) in refdnoiterstd
if idnam not in refdnoiterstd:
for ivsdin,evsdin in enumerate(vsdin):
alendnoiterin_tmp[ivsdin] = alendnoiterin_tmp[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
for ivsdout,evsdout in enumerate(vsdout):
alendnoiterout_tmp[ivsdout] = alendnoiterout_tmp[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
# recalculate the amount of bytes
tmpmembytes = 0
for ivsdin,evsdin in enumerate(vsdin):
tmpmembytes = tmpmembytes + alendnoiterin_tmp[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
tmpmembytes = tmpmembytes + alendnoiterout_tmp[ivsdout] * vsdout[ivsdout]['itemsize']
print 'tmpmembytes', tmpmembytes, membytes
# if used memory still below threshold, we add it to the current dimension to the icecubes
if tmpmembytes <= maxmembytes:
refdnoiterstd.insert(0,idnam)
for ivsdin,evsdin in enumerate(vsdin):
arefdnoiterin[ivsdin].insert(0, arefsin[ivsdin][idnam])
adimnoiterin[ivsdin].insert(0,vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]])
alendnoiterin[ivsdin] = alendnoiterin[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
for ivsdout,evsdout in enumerate(vsdout):
arefdnoiterout[ivsdout].insert(0, arefsout[ivsdout][idnam])
adimnoiterout[ivsdout].insert(0,vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]])
alendnoiterout[ivsdout] = alendnoiterout[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
dnamselnoiter.insert(0,dnamsstd[idnam])
# recalculate the amount of bytes
membytes = 0
for ivsdin,evsdin in enumerate(vsdin):
membytes = membytes + alendnoiterin[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
membytes = membytes + alendnoiterout[ivsdout] * vsdout[ivsdout]['itemsize']
print 'membytes',membytes
cont = True
# if used memory still below threshold, we add it to the current dimension to the icecubes
else:
cont = True
idnam = idnam - 1
# adimnoiterin[ivsdin,irefdnoiterstd] = evsdin['dims'][vsdin[ivsdin]['refdstd'].index(erefdnoiterstd)]
# arefdfuncin: references of the function dimensions to the data input stream dimensions
# arefdnoiterin: references of the icecube dimensions to the data input stream dimensions
# # vsdin[ivsdin]['refdstd']: references of data stream dimensions (vsdin[..]['dnams'] to the standard dimensions (dnamsstd)
# dnamselnoiter: references
# guess from residual dimensions that are not in refnoiterin
refditerstd = []
dimiterstd = []
for idim,edim in enumerate(dimsstd):
if idim not in refdnoiterstd:
refditerstd.append(idim)
dimiterstd.append(edim)
# guess from residual dimensions that are not in refnoiterin
arefditerin = []
adimiterin = []
for ivsdin,evsdin in enumerate(vsdin):
arefditerin.append([])
adimiterin.append([])
for idim,edim in enumerate(vsdin[ivsdin]['dims']):
if idim not in arefdnoiterin[ivsdin]:
arefditerin[ivsdin].append(idim)
adimiterin[ivsdin].append(edim)
# guess from residual dimensions that are not in refnoiterin
arefditerout = []
adimiterout = []
for ivsdout,evsdout in enumerate(vsdout):
arefditerout.append([])
adimiterout.append([])
for idim,edim in enumerate(vsdout[ivsdout]['dims']):
if idim not in arefdnoiterout[ivsdout]:
arefditerout[ivsdout].append(idim)
adimiterout[ivsdout].append(edim)
dimitermax = []
for iref,eref in enumerate(refditerstd):
dimitermax.append(1)
for ivsdin,evsdin in enumerate(vsdin):
dimitermax[iref] = max(dimitermax[iref],adimiterin[ivsdin][iref])
print dimitermax[iref], adimiterin[ivsdin][iref]
for ivsdout,evsdout in enumerate(vsdout):
dimitermax[iref] = max(dimitermax[iref],adimiterout[ivsdout][iref])
rwchunksizein = [1]*len(vsdin)
for ivsdin,evsdin in enumerate(vsdin):
idim = len(vsdin[ivsdin]['dims']) -1
while ((idim in arefdnoiterin[ivsdin]) & (idim >= 0)):
# The inner dimensions just have to be referenced so not in correct order. We know that they will be read in the correct order in the end
rwchunksizein[ivsdin] = rwchunksizein[ivsdin]*vsdin[ivsdin]['dims'][idim]
idim = idim - 1
rwchunksizeout = [1]*len(vsdout)
for ivsdout,evsdout in enumerate(vsdout):
idim = len(vsdout[ivsdout]['dims']) -1
while ((idim in arefdnoiterout[ivsdout]) & (idim >= 0)):
# The inner dimensions just have to be referenced so not in correct order. We know that they will be read in the correct order in the end
rwchunksizeout[ivsdout] = rwchunksizeout[ivsdout]*vsdout[ivsdout]['dims'][idim]
idim = idim - 1
adimnoapplyout = []
alennoapplyout = []
for ivsdout,evsdout in enumerate(vsdout):
adimnoapplyout.append([])
alennoapplyout.append(1)
for irefdnoiterout in range(len(arefdnoiterout[ivsdout])-len(arefdfuncout[ivsdout])):
adimnoapplyout[ivsdout].append(adimnoiterout[ivsdout][irefdnoiterout])
alennoapplyout[ivsdout] =alennoapplyout[ivsdout]*adimnoapplyout[ivsdout][-1]
if adimnoapplyout[ivsdout] == []:
adimnoapplyout[ivsdout] = [1]
adimnoapplyin = []
alennoapplyin = []
for ivsdin,evsdin in enumerate(vsdin):
adimnoapplyin.append([])
alennoapplyin.append(1)
for irefdnoiterin in range(len(arefdnoiterin[ivsdin])-len(arefdfuncin[ivsdin])):
adimnoapplyin[ivsdin].append(adimnoiterin[ivsdin][irefdnoiterin])
alennoapplyin[ivsdin] =alennoapplyin[ivsdin]*adimnoapplyin[ivsdin][-1]
if adimnoapplyin[ivsdin] == []:
adimnoapplyin[ivsdin] = [1]
dimnoapplymax = []
for iref in range(len(arefdnoiterout[ivsdout])-len(arefdfuncout[ivsdout])):
dimnoapplymax.append(1)
for ivsdin,evsdin in enumerate(vsdin):
dimnoapplymax[iref] = max(dimnoapplymax[iref],adimnoapplyin[ivsdin][iref])
print dimnoapplymax[iref], adimnoapplyin[ivsdin][iref]
for ivsdout,evsdout in enumerate(vsdout):
dimnoapplymax[iref] = max(dimnoapplymax[iref],adimnoapplyout[ivsdout][iref])
lennoapplymax = reduce(mul,dimnoapplymax)
testdata = np.zeros(vsdout[0]['dims']).ravel()
lenitermax = reduce(mul,dimitermax)
dimiterpos = [0]*len(dimitermax)
print str(0)+'/'+str(lenitermax),
for j in range(lenitermax):
# reading icecube, rearranged in the order of dimensions specified by arefnoiterin
dataicecubein = []
for ivsdin,evsdin in enumerate(vsdin):
# dataicecubein.append(np.zeros((elendnoiterin,),dtype=vsdin[ilendnoiterin]['dtype']))
dataicecubein.append(np.array(readicecubeps(\
vsdin[ivsdin]['fp'],\
vsdin[ivsdin]['dims'],\
arefditerin[ivsdin],\
adimiterin[ivsdin],\
dimiterpos,\
arefdnoiterin[ivsdin],\
adimnoiterin[ivsdin],\
vsdin[ivsdin]['dtype'],\
vsdin[ivsdin]['itemsize'],\
vsdin[ivsdin]['voffset'],\
rwchunksizein[ivsdin],\
), dtype=vsdin[ivsdin]['dtype']).ravel())
dataicecubeout = []
for ilendnoiterout,elendnoiterout in enumerate(alendnoiterout):
dataicecubeout.append(np.zeros((elendnoiterout,),dtype=vsdout[ilendnoiterout]['dtype'][1]))
dimnoapplypos = [0]*len(dimnoapplymax)
for k in range(lennoapplymax):
# actually, this is just the end of the file output already written
ahunkin = []
for ivsdin, evsdin in enumerate(vsdin):
pos = 0
# e.g. pos = (9)+ 20*(10) + 50*50*20*(5)
for idimpos,edimpos in enumerate(dimnoapplypos):
curadd = np.mod(edimpos,adimnoapplyin[ivsdin][idimpos])
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
if ((idimpos + 1) < len(arefdnoiterin[ivsdin])):
for i in range(idimpos + 1,len(arefdnoiterin[ivsdin])) :
# here, we assume that the dimensions of the chunk are already in the order considered by adimsnoiter(out) etc. (cfr. preceeded transposition in readicecubeps)
curadd = curadd * adimnoiterin[ivsdin][i]
# curaddout = curaddout * dimnoiteroutref[i]
pos = pos + curadd
ahunkin.append(dataicecubein[ivsdin][pos:(pos+alenfuncin[ivsdin])])
ahunkin[ivsdin].shape = adimfuncin[ivsdin]
# apply the function
ahunkout = func(*ahunkin)
if (type(ahunkout).__name__ == 'tuple'):
ahunkout = list(ahunkout)
if (type(ahunkout).__name__ != 'list'):
ahunkout = list([ahunkout])
for ihunkout in range(len(ahunkout)):
ahunkout[ihunkout] = np.array(ahunkout[ihunkout])
# e.g. posout = (9)+ 20*(10) + 50*50*20*(5)
posout = 0
for idimpos,edimpos in enumerate(dimnoapplypos):
curadd = np.mod(edimpos,adimnoapplyout[ihunkout][idimpos])
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
if ((idimpos + 1) < len(arefdnoiterout[ihunkout])):
for i in range(idimpos + 1,len(arefdnoiterout[ihunkout])) :
# here, we assume that the idims are in the intended order (cfr. subsequent transposition in writeicecubeps)
curadd = curadd * adimnoiterout[ihunkout][i]
# curaddout = curaddout * dimnoiteroutref[i]
posout = posout + curadd
dataicecubeout[ihunkout][posout:(posout+alenfuncout[ihunkout])] = np.array(ahunkout[ihunkout].ravel(),dtype=vsdout[ihunkout]['dtype'][1])
# go to next data slice
dimnoapplypos[-1] = dimnoapplypos[-1] + 1
for idimidx,edimidx in enumerate(reversed(dimnoapplypos)):
# # alternative (makes 'dimiter' redundant)
# if dimiterpos[idimidx] == shp[refiter[idimidx]]:
if idimidx > 0:
if dimnoapplypos[idimidx] == dimnoapply[idimidx]:
dimnoapplypos[idimidx-1] = dimnoapplypos[idimidx-1] + 1
dimnoapplypos[idimidx] = 0
for idimsout in range(len(dataicecubeout)):
dataicecubeout[idimsout].shape = adimnoiterout[idimsout]
#print dataicecubeout[idimsout].shape
for ivsdout in range(len(vsdout)):
# print dataicecubeout[ivsdout].shape,vsdout[ivsdout]
# print 'ivsdout', ivsdout
writeicecubeps(\
vsdout[ivsdout]['fp'],
vsdout[ivsdout]['dims'],\
arefditerout[ivsdout],\
adimiterout[ivsdout],\
dimiterpos,\
arefdnoiterout[ivsdout],\
adimnoiterout[ivsdout],\
dataicecubeout[ivsdout],\
vsdout[ivsdout]['dtype'],\
vsdout[ivsdout]['itemsize'],\
vsdout[ivsdout]['voffset'],\
rwchunksizeout[ivsdout])
# go to next data slice
dimiterpos[-1] = dimiterpos[-1] + 1
for idimidx,edimidx in enumerate(reversed(dimiterpos)):
# # alternative (makes 'dimiter' redundant)
# if dimiterpos[idimidx] == shp[refiter[idimidx]]:
if dimiterpos[idimidx] == dimitermax[idimidx]:
if idimidx > 0:
dimiterpos[idimidx-1] = dimiterpos[idimidx-1] + 1
dimiterpos[idimidx] = 0
sys.stdout.write ('\b'*(len(str(j)+'/'+str(lenitermax))+1))
sys.stdout.write (str(j+1)+'/'+str(lenitermax))
for ivsdin,evsdin in enumerate(vsdin):
vsdin[ivsdin]['fp'].close()
for ivsdout,evsdout in enumerate(vsdout):
vsdout[ivsdout]['fp'].close()
import pylab as pl
fout.close()
# fin.close()
# fout = NetCDF.NetCDFFile(fnout,'r')
fout = netcdf.netcdf_file(fnout,'r')
fout.fp.seek(vsdout[0]['voffset'])
# fpointout.seek(vsdout[0]['voffset'])
test = np.fromfile(fout.fp,dtype=vsdout[0]['dtype'],count=reduce(mul,vsdout[0]['dims']))
test.shape = (40,340)
fig = pl.figure()
pl.imshow(test)
fig.show()
fig = pl.figure()
testdata.shape = vsdout[0]['dims']
pl.imshow(testdata[0,:,:,0,1])
fig.show()
fout.close()
fout = NetCDF.NetCDFFile(fnout,'r')
fig = pl.figure()
pl.imshow(fout.variables['QV'][0,:,:,0,0])
fig.show()
fout.close()
|
hendrikwout/pynacolada
|
trash/pynacolada-20131101-2.py
|
Python
|
gpl-3.0
| 36,260
|
[
"NetCDF"
] |
2fdc7774f88fd76856e728a9cde4037f230690b2fe03f819c8baecbb2b1cbe7f
|
#! /usr/bin/env python
import sys, os, re, subprocess
from . import anno_formatter
from . import vcf_formatter
from .fisher_info import FisherInfo
def read_hotspot_file(hotspot_file):
tmp_list = []
hIN = open(hotspot_file, 'r')
for line in hIN:
F = line.rstrip('\n').split('\t')
if len(F[3]) == 1 and len(F[4]) == 1 and F[3] in 'ACGT' and F[4] in 'ACGT':
key = F[0] +"\t"+ F[1] +"\t"+ F[2] +"\t"+ F[3] +"\t"+ F[4]
tmp_list.append(key)
hIN.close()
tmp_sorted_list = sorted(set(tmp_list), key=tmp_list.index)
tmp_dict = {}
for line in tmp_sorted_list:
F = line.split('\t')
key = F[0] +"\t"+ F[1] +"\t"+ F[2] +"\t"+ F[3]
if key in tmp_dict:
var = tmp_dict[key]
tmp_dict[key] = var +","+ F[4]
else:
tmp_dict[key] = F[4]
return tmp_dict
def print_anno_header(is_rna, hOUT):
header_str = "Chr\tStart\tEnd\tRef\tAlt\tdepth_tumor\tvariantNum_tumor\tdepth_normal\tvariantNum_normal\tbases_tumor\tbases_normal\tA_C_G_T_tumor\tA_C_G_T_normal\tmisRate_tumor\tstrandRatio_tumor\tmisRate_normal\tstrandRatio_normal\tP-value(fisher)\tscore"
if is_rna:
header_str = header_str +"\tdepth_RNA\tvariant_RNA\tbases_RNA\ttmisRate_RNA"
print(header_str, file=hOUT)
def print_vcf_header(is_rna, sample1, sample2, sample_rna, ref_fa, ref_dict, hOUT):
print('##fileformat=VCFv4.2',file=hOUT)
# print info and format
print('##INFO=<ID=FP,Number=1,Type=Float,Description="Minus logarithm of the p-value by Fishers exact test">',file=hOUT)
print('##INFO=<ID=LS,Number=1,Type=Float,Description="LOD Score of Hotspot Call">',file=hOUT)
print('##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Read Depth">',file=hOUT)
print('##FORMAT=<ID=DPF,Number=1,Type=Integer,Description="Read depth in the forward strand">',file=hOUT)
print('##FORMAT=<ID=DPR,Number=1,Type=Integer,Description="Read depth in the reverse strand">',file=hOUT)
print('##FORMAT=<ID=AD,Number=1,Type=Integer,Description="Allelic depth">',file=hOUT)
print('##FORMAT=<ID=ADF,Number=1,Type=Integer,Description="Allelic depth in the forward strand">',file=hOUT)
print('##FORMAT=<ID=ADR,Number=1,Type=Integer,Description="Allelic depth in the reverse strand">',file=hOUT)
print('##FORMAT=<ID=AF,Number=1,Type=Float,Description="Allele frequency">',file=hOUT)
print('##FORMAT=<ID=SB,Number=1,Type=Float,Description="Strand bias">',file=hOUT)
# print reference information
hIN = open(ref_dict)
for line in hIN:
F = line.rstrip('\n').split('\t')
if F[0] == '@SQ':
ID = F[1].replace('SN:','')
length = F[2].replace('LN:','')
print('##contig=<ID='+ID+',length='+length+'>',file=hOUT)
print('##reference='+ref_fa,file=hOUT)
# print_header
samples = sample1+"\t"+sample2
if is_rna:
samples = samples +"\t"+ sample_rna
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+samples,file=hOUT)
def call(hotspot_file, output_file, bam_tumor, bam_control, mpileup_params, min_tumor_misrate, max_ctrl_misrate, bam_rna, min_lod_score, ratio_ctrl, is_anno, sample1, sample2, sample_rna, ref_fa):
is_rna = True if bam_rna else False
hOUT = open(output_file, 'w')
FNULL = open(os.devnull, 'w')
if is_anno:
print_anno_header(is_rna, hOUT)
else:
ref_name, ext = os.path.splitext(ref_fa)
print_vcf_header(is_rna, sample1, sample2, sample_rna, ref_fa, ref_name+".dict", hOUT)
m_params = mpileup_params.split(" ")
hotspot_dict = read_hotspot_file(hotspot_file)
for key in hotspot_dict:
F = key.split('\t')
mutReg = F[0] +":"+ F[1] +"-"+ F[2]
hotspot_ref = F[3]
hotspot_alts = hotspot_dict[key].split(',')
# TODO: error message
if F[1] != F[2]:
print("Invalid position in the hotspot database: "+ F[0] +"\t"+ F[1] +"\t"+ F[2], file=sys.stderr)
continue
# TODO: error message
for tmp_alt in hotspot_alts:
if tmp_alt not in "ACGTacgt":
print("Invalid Alt in the mutations.bed: "+ F[0] +"\t"+ F[1] +"\t"+ F[2] +"\t"+ hotspot_alts, file=sys.stderr)
mpileup_cmd = ["samtools", "mpileup", "-r", mutReg]
mpileup_cmd.extend(m_params)
seq_filename, seq_ext1 = os.path.splitext(bam_tumor)
seq_filename, seq_ext2 = os.path.splitext(bam_control)
if is_rna:
seq_filename, seq_ext3 = os.path.splitext(bam_rna)
if seq_ext1 == ".cram" or seq_ext2 == ".cram" or seq_ext3 == ".cram":
mpileup_cmd.extend(["-f",ref_fa])
mpileup_cmd.extend([bam_tumor, bam_control, bam_rna])
else:
if seq_ext1 == ".cram" or seq_ext2 == ".cram":
mpileup_cmd.extend(["-f",ref_fa])
mpileup_cmd.extend([bam_tumor, bam_control])
# print mpileup_cmd
pileup = subprocess.Popen(mpileup_cmd, stdout=subprocess.PIPE, stderr = FNULL)
end_of_pipe = pileup.stdout
for mpileup in end_of_pipe:
fi = FisherInfo()
mp_list = mpileup.decode().strip('\n').split( '\t' )
fi.set_ref(hotspot_ref)
fi.set_mpileup_data(mp_list)
for alt in hotspot_alts:
if fi.get_lod_score(alt) < min_lod_score: continue
if fi.get_tumor_misrate(alt) < min_tumor_misrate: continue
if fi.get_ctrl_misrate(alt) > max_ctrl_misrate: continue
if fi.get_ctrl_misrate(alt) > (fi.get_tumor_misrate(alt) * ratio_ctrl): continue
if is_anno:
record = anno_formatter.make_record(fi, alt, is_rna)
else:
record = vcf_formatter.make_record(fi, alt, is_rna)
print(record,file=hOUT)
FNULL.close()
hOUT.close()
|
ken0-1n/GenomonHotspotCall
|
hotspot_call/process_mutation.py
|
Python
|
gpl-3.0
| 5,933
|
[
"ADF"
] |
125cf3e1287515409cc7b1b65f1122ebf1af484d18d09ddd0bff5aad76113be3
|
# NeHe Tutorial Lesson: 41 - Volumetric Fog
#
# Ported to PyOpenGL 2.0 by Brian Leair 18 Jan 2004
#
# This code was created by Jeff Molofee 2000
#
# The port was based on the PyOpenGL tutorials and from
# PyOpenGLContext (tests/glprint.py)
#
# If you've found this code useful, feel free to let me know
# at (Brian Leair telcom_sage@yahoo.com).
#
# See original source and C based tutorial at http://nehe.gamedev.net
#
# Note:
# -----
# This code is not an ideal example of Pythonic coding or use of OO techniques.
# It is a simple and direct exposition of how to use the Open GL API in
# Python via the PyOpenGL package. It also uses GLUT, a high quality
# platform independent library. Due to using these APIs, this code is
# more like a C program using procedural based programming.
#
# To run this example you will need:
# Python - www.python.org (v 2.3 as of 1/2004)
# PyOpenGL - pyopengl.sourceforge.net (v 2.0.1.07 as of 1/2004)
# Numeric Python - (v.22 of "numpy" as of 1/2004) numpy.sourceforge.net
# Python Image Library - http://www.pythonware.com/products/pil/
#
# Make sure to get versions of Numeric, PyOpenGL, and PIL to match your
# version of python.
#
#
#
#
# Topics demonstrated in this tutorial:
# using PIL (Python Image Library) to load a texture from an image file
# (see doc - http://www.pythonware.com/library/pil/handbook/index.htm)
# accessing the extension FOG_COORDINATE_EXTENSION
# (see doc - http://pyopengl.sourceforge.net/documentation/opengl_diffs.html)
#
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import Image # PIL
import sys
from OpenGL.GL.EXT.fog_coord import *
# *********************** Globals ***********************
# Python 2.2 defines these directly
try:
True
except NameError:
True = 1==1
False = 1==0
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = '\033'
# Number of the glut window.
window = 0
fogColor = (0.6, 0.3, 0.0, 1.0); # // Fog Colour
camz = None # // Camera Z Depth
lastTickCount = 0.0
texture = None
def next_p2 (num):
""" If num isn't a power of 2, will return the next higher power of two """
rval = 1
while (rval<num):
rval <<= 1
return rval
def BuildTexture (path):
""" // Load Image And Convert To A Texture
path can be a relative path, or a fully qualified path.
returns False if the requested image couldn't loaded as a texture
returns True and the texture ID if image was loaded
"""
# Catch exception here if image file couldn't be loaded
try:
# Note, NYI, path specified as URL's could be access using python url lib
# OleLoadPicturePath () supports url paths, but that capability isn't critcial to this tutorial.
Picture = Image.open (path)
except:
print "Unable to open image file '%s'." % (path)
return False, 0
glMaxTexDim = glGetIntegerv (GL_MAX_TEXTURE_SIZE)
WidthPixels = Picture.size [0]
HeightPixels = Picture.size [1]
if ((WidthPixels > glMaxTexDim) or (HeightPixels > glMaxTexDim)):
# The image file is too large. Shrink it to fit within the texture dimensions
# support by our rendering context for a GL texture.
# Note, Feel free to experiemnt and force a resize by placing a small val into
# glMaxTexDim (e.g. 32,64,128).
if (WidthPixels > HeightPixels):
# Width is the domainant dimension.
resizeWidthPixels = glMaxTexDim
squash = float (resizeWidthPixels) / float (WidthPixels)
resizeHeightPixels = int (HeighPixels * squash)
else:
resizeHeightPixels = glMaxTexDim
squash = float (resizeHeightPixels) / float (HeightPixels)
resizeWidthPixels = int (WidthPixels * squash)
else:
# // Resize Image To Closest Power Of Two
if (WidthPixels > HeightPixels):
# Width is the domainant dimension.
resizeWidthPixels = next_p2 (WidthPixels)
squash = float (resizeWidthPixels) / float (WidthPixels)
resizeHeightPixels = int (HeighPixels * squash)
else:
resizeHeightPixels = next_p2 (HeightPixels)
squash = float (resizeHeightPixels) / float (HeightPixels)
resizeWidthPixels = int (WidthPixels * squash)
#
# Resize the image to be used as a texture.
# The Python image library provides a handy method resize ().
# Several filtering options are available.
# If you don't specify a filtering option will default NEAREST
Picture = Picture.resize ((resizeWidthPixels, resizeHeightPixels), Image.BICUBIC)
lWidthPixels = next_p2 (resizeWidthPixels)
lHeightPixels = next_p2 (resizeWidthPixels)
# Now we create an image that has the padding needed
newpicture = Image.new ("RGB", (lWidthPixels, lHeightPixels), (0, 0, 0))
newpicture.paste (Picture)
# Create a raw string from the image data - data will be unsigned bytes
# RGBpad, no stride (0), and first line is top of image (-1)
pBits = newpicture.tostring("raw", "RGBX", 0, -1)
# // Typical Texture Generation Using Data From The Bitmap
texid = glGenTextures(1); # // Create The Texture
glBindTexture(GL_TEXTURE_2D, texid); # // Bind To The Texture ID
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); # // (Modify This For The Type Of Filtering You Want)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); # // (Modify This For The Type Of Filtering You Want)
# // (Modify This If You Want Mipmaps)
glTexImage2D(GL_TEXTURE_2D, 0, 3, lWidthPixels, lHeightPixels, 0, GL_RGBA, GL_UNSIGNED_BYTE, pBits);
# Cleanup (python actually handles all memory for you, so this isn't necessary)
# // Decrements IPicture Reference Count
Picture = None
newpicture = None
return True, texid # // Return True (All Good)
def Extension_Init ():
""" Determine if the fog coord extentsion is availble """
# After calling this, we will be able to invoke glFogCoordEXT ()
if (not glInitFogCoordEXT ()):
print "Help! No GL_EXT_ForCoord"
sys.exit(1)
return False
return True
# // Any GL Init Code & User Initialiazation Goes Here
def InitGL(Width, Height): # We call this right after our OpenGL window is created.
global fogColor
global camz
if (not Extension_Init ()): # // Check And Enable Fog Extension If Available
return False; # // Return False If Extension Not Supported
if (not BuildTexture("Wall.bmp")): # // Load The Wall Texture
return False; # // Return False If Loading Failed
glEnable(GL_TEXTURE_2D); # // Enable Texture Mapping
glClearColor (0.0, 0.0, 0.0, 0.5); # // Black Background
glClearDepth (1.0); # // Depth Buffer Setup
glDepthFunc (GL_LEQUAL); # // The Type Of Depth Testing
glEnable (GL_DEPTH_TEST); # // Enable Depth Testing
glShadeModel (GL_SMOOTH); # // Select Smooth Shading
glHint (GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); # // Set Perspective Calculations To Most Accurate
# // Set Up Fog
glEnable(GL_FOG); # // Enable Fog
glFogi(GL_FOG_MODE, GL_LINEAR); # // Fog Fade Is Linear
glFogfv(GL_FOG_COLOR, fogColor); # // Set The Fog Color
glFogf(GL_FOG_START, 0.0); # // Set The Fog Start
glFogf(GL_FOG_END, 1.0); # // Set The Fog End
glHint(GL_FOG_HINT, GL_NICEST); # // Per-Pixel Fog Calculation
glFogi(GL_FOG_COORDINATE_SOURCE_EXT, GL_FOG_COORDINATE_EXT) # // Set Fog Based On Vertice Coordinates
camz = -19.0; # // Set Camera Z Position To -19.0f
return True; # // Return TRUE (Initialization Successful)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# // field of view, aspect ratio, near and far
# This will squash and stretch our objects as the window is resized.
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# The main drawing function.
def DrawGLScene():
global camz
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); # // Clear Screen And Depth Buffer
glLoadIdentity (); # // Reset The Modelview Matrix
glTranslatef(0.0, 0.0, camz); # // Move To Our Camera Z Position
glBegin(GL_QUADS); # // Back Wall
glFogCoordfEXT( 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-2.5,-2.5,-15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 0.0); glVertex3f( 2.5,-2.5,-15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 1.0); glVertex3f( 2.5, 2.5,-15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-2.5, 2.5,-15.0);
# PyOpenGL 2.0.1.07 has a bug. Swig generated code for FogCoordfExt ()
# uses wrong error check macro. Macro falsely sets exception due to
# call during glBegin (). Fixed in later versions.
try:
glEnd();
except:
pass
glBegin(GL_QUADS); # // Floor
glFogCoordfEXT( 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-2.5,-2.5,-15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 0.0); glVertex3f( 2.5,-2.5,-15.0);
glFogCoordfEXT( 0.0); glTexCoord2f(1.0, 1.0); glVertex3f( 2.5,-2.5, 15.0);
glFogCoordfEXT( 0.0); glTexCoord2f(0.0, 1.0); glVertex3f(-2.5,-2.5, 15.0);
try:
glEnd();
except:
pass
glBegin(GL_QUADS); # // Roof
glFogCoordfEXT( 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-2.5, 2.5,-15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 0.0); glVertex3f( 2.5, 2.5,-15.0);
glFogCoordfEXT( 0.0); glTexCoord2f(1.0, 1.0); glVertex3f( 2.5, 2.5, 15.0);
glFogCoordfEXT( 0.0); glTexCoord2f(0.0, 1.0); glVertex3f(-2.5, 2.5, 15.0);
try:
glEnd();
except:
pass
glBegin(GL_QUADS); # // Right Wall
glFogCoordfEXT( 0.0); glTexCoord2f(0.0, 0.0); glVertex3f( 2.5,-2.5, 15.0);
glFogCoordfEXT( 0.0); glTexCoord2f(0.0, 1.0); glVertex3f( 2.5, 2.5, 15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 1.0); glVertex3f( 2.5, 2.5,-15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 0.0); glVertex3f( 2.5,-2.5,-15.0);
try:
glEnd();
except:
pass
glBegin(GL_QUADS); # // Left Wall
glFogCoordfEXT( 0.0); glTexCoord2f(0.0, 0.0); glVertex3f(-2.5,-2.5, 15.0);
glFogCoordfEXT( 0.0); glTexCoord2f(0.0, 1.0); glVertex3f(-2.5, 2.5, 15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-2.5, 2.5,-15.0);
glFogCoordfEXT( 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-2.5,-2.5,-15.0);
try:
glEnd();
except:
pass
glutSwapBuffers() # // Flush The GL Rendering Pipeline
return True
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
global window
global camz
global lastTickCount
tickCount = glutGet (GLUT_ELAPSED_TIME)
milliseconds = (tickCount - lastTickCount)
lastTickCount = tickCount
if (milliseconds > 200):
lastTickCount = tickCount
milliseconds = 20
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
sys.exit ()
if ((args[0] == GLUT_KEY_UP) and (camz < 14.0)):
camz += milliseconds / 100.0 # // Move Object Closer (Move Forwards Through Hallway)
if (args[0] == GLUT_KEY_DOWN and camz > -19.0):
camz -= milliseconds / 100.0 # // Move Object Closer (Move Backwards Through Hallway)
return
def main():
global window, lastTickCount
# pass arguments to init
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python, remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("NeHe's Volumetric Fog & IPicture Image Loading Tutorial")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
# The call setup glutSpecialFunc () is needed to receive
# "keyboard function or directional keys."
glutKeyboardFunc(keyPressed)
glutSpecialFunc(keyPressed)
# We've told Glut the type of window we want, and we've told glut about
# various functions that we want invoked (idle, resizing, keyboard events).
# Glut has done the hard work of building up thw windows DC context and
# tying in a rendering context, so we are ready to start making immediate mode
# GL calls.
# Call to perform inital GL setup (the clear colors, enabling modes, and most releveant -
# consturct the displays lists for the bitmap font.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
# Print message to console, and kick off the main to get it rolling.
if __name__ == "__main__":
print "Hit ESC key to quit."
main()
|
mgood7123/UPM
|
Sources/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/NeHe/lesson41.py
|
Python
|
gpl-3.0
| 13,490
|
[
"Brian"
] |
a8390ee9211f6d427c7fbce7c09083c33de1200db8a22a77e995716557205c2f
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Union
from typing import Callable
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.logger import Logger
from kivy.clock import Clock
from ORCA.ui.BasePopup import cBasePopup,SettingSpacer
from ORCA.vars.Replace import ReplaceVars
from ORCA.vars.Access import SetVar
from ORCA.vars.Access import GetVar
from ORCA.widgets.core.MultiLineButton import cMultiLineButton
import ORCA.Globals as Globals
__all__ = ['ShowKeyBoard','cInputKeyboard']
class cInputKeyboard(cBasePopup):
""" Shows an Input keyboard """
def __init__(self):
super(cInputKeyboard, self).__init__()
self.oTextInput:Union[TextInput,None] = None
self.oButtonCancel:Union[cMultiLineButton,None] = None
self.uDestVar:str = u''
self.oFktNotify:Union[Callable,None] = None
# noinspection PyUnusedLocal
def ScheduledSetFocus(self, *largs) -> None:
""" sets the focus to the input field, called by the clock, to avoid timing problems """
self.oTextInput.focus=True
def ShowKeyBoard(self,*,uDestVar:str,oFktNotify:Union[Callable,None], uTitle:str):
""" create popup layout """
uText:str
oBtn: cMultiLineButton
self.oFktNotify = oFktNotify
self.uDestVar = uDestVar
oContent:BoxLayout = BoxLayout(orientation='vertical', spacing='5dp')
self.oPopup = Popup(title=uTitle,content=oContent, size_hint=(None, None), size=(Globals.iAppWidth*0.9,Globals.iAppHeight*0.35),auto_dismiss=False, pos_hint={'x': .05, 'top': 1})
uText=GetVar(uVarName = self.uDestVar)
if uText is None:
uText=u''
Logger.debug("InputKeyboard: Preassigning Value [%s]" % uText)
self.oTextInput = TextInput(text=uText,multiline=False, size_hint_y=None, height='30dp')
self.oTextInput.bind(on_text_validate=self.On_Enter)
# construct the content, widget are used as a spacer
oContent.add_widget(Widget())
oContent.add_widget(self.oTextInput)
oContent.add_widget(Widget())
oContent.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
oBtnlayout:BoxLayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
oBtn = cMultiLineButton(text=ReplaceVars('$lvar(5008)'), halign='center', valign='middle')
oBtn.bind (on_release=self.On_Enter)
oBtnlayout.add_widget(oBtn)
self.oButtonCancel = cMultiLineButton(text=ReplaceVars('$lvar(5009)'), halign='center', valign='middle')
self.oButtonCancel.bind(on_release=self.On_Cancel)
oBtnlayout.add_widget(self.oButtonCancel)
oContent.add_widget(oBtnlayout)
#if we do setup the focus before the popup is shown, no vkeyboard is shown
self.oPopup.bind(on_open=self.ScheduledSetFocus)
self.oPopup.open()
# noinspection PyUnusedLocal
def On_Cancel(self,oMultiLineButton:cMultiLineButton) -> None:
""" Reacts to pressing the cancel button """
if self.oTextInput:
self.oTextInput.focus = False
cBasePopup.ClosePopup(self)
def ClosePopup(self) -> None:
"""will be called by keyhandler, if esc has been pressed"""
self.On_Cancel(self.oButtonCancel)
# noinspection PyUnusedLocal
def On_Enter(self,oMultiLineButton:cMultiLineButton) -> None:
"""
Will be Called by Input Popup, if the users press Enter
Closes the Popoup
Hides Keyboard
Passes Input string to destination variable
Call Notifier Function
"""
SetVar(uVarName = self.uDestVar, oVarValue = self.oTextInput.text)
self.ClosePopup()
# schedule action, after PopUp disappears
if self.oFktNotify:
Clock.schedule_once(self.fDoNotify, 0)
#self.fDoNotify(self.oTextInput.text)
# noinspection PyUnusedLocal
def fDoNotify(self, *largs) -> None:
""" helper """
self.oFktNotify(self.oTextInput.text)
def ShowKeyBoard(*,uDestVar:str,oFktNotify:Union[Callable,None], uTitle:str = "*") -> cInputKeyboard:
""" convenience abstraction to show the keyboard class """
if uTitle == "*":
uTitle = ReplaceVars("$lvar(1072)")
oInputKeyboard:cInputKeyboard = cInputKeyboard()
oInputKeyboard.ShowKeyBoard(uDestVar=uDestVar,oFktNotify=oFktNotify, uTitle=uTitle)
return oInputKeyboard
|
thica/ORCA-Remote
|
src/ORCA/ui/InputKeyboard.py
|
Python
|
gpl-3.0
| 5,771
|
[
"ORCA"
] |
f3fc40867f88611625b07936ac1f1d5339d9682342ed24b33970ae2bc2dddafa
|
#
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 1011 $
# $Date: 2009-02-12 00:29:54 -0500 (Thu, 12 Feb 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-library.googlecode.com/svn/trunk/projects/packages/examples/mines-sarsa-python/sample_sarsa_agent.py $
import random
import sys
import copy
import pickle
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
from rlglue.types import Observation
from rlglue.utils import TaskSpecVRLGLUE3
from random import Random
# This is a very simple Sarsa agent for discrete-action, discrete-state
# environments. It uses epsilon-greedy exploration.
#
# We've made a decision to store the previous action and observation in
# their raw form, as structures. This code could be simplified and you
# could store them just as ints.
# TO USE THIS Agent [order doesn't matter]
# NOTE: I'm assuming the Python codec is installed an is in your Python path
# - Start the rl_glue executable socket server on your computer
# - Run the SampleMinesEnvironment and SampleExperiment from this or a
# different codec (Matlab, Python, Java, C, Lisp should all be fine)
# - Start this agent like:
# $> python sample_sarsa_agent.py
class sarsa_agent(Agent):
randGenerator=Random()
lastAction=Action()
lastObservation=Observation()
sarsa_stepsize = 0.1
sarsa_epsilon = 0.1
sarsa_gamma = 1.0
numStates = 0
numActions = 0
value_function = None
policyFrozen=False
exploringFrozen=False
def agent_init(self,taskSpecString):
TaskSpec = TaskSpecVRLGLUE3.TaskSpecParser(taskSpecString)
if TaskSpec.valid:
assert len(TaskSpec.getIntObservations())==1, "expecting 1-dimensional discrete observations"
assert len(TaskSpec.getDoubleObservations())==0, "expecting no continuous observations"
assert not TaskSpec.isSpecial(TaskSpec.getIntObservations()[0][0]), " expecting min observation to be a number not a special value"
assert not TaskSpec.isSpecial(TaskSpec.getIntObservations()[0][1]), " expecting max observation to be a number not a special value"
self.numStates=TaskSpec.getIntObservations()[0][1]+1;
assert len(TaskSpec.getIntActions())==1, "expecting 1-dimensional discrete actions"
assert len(TaskSpec.getDoubleActions())==0, "expecting no continuous actions"
assert not TaskSpec.isSpecial(TaskSpec.getIntActions()[0][0]), " expecting min action to be a number not a special value"
assert not TaskSpec.isSpecial(TaskSpec.getIntActions()[0][1]), " expecting max action to be a number not a special value"
self.numActions=TaskSpec.getIntActions()[0][1]+1;
self.value_function=[self.numActions*[0.0] for i in range(self.numStates)]
else:
print "Task Spec could not be parsed: "+taskSpecString;
self.lastAction=Action()
self.lastObservation=Observation()
def egreedy(self, state):
maxIndex=0
a=1
if not self.exploringFrozen and self.randGenerator.random()<self.sarsa_epsilon:
return self.randGenerator.randint(0,self.numActions-1)
return self.value_function[state].index(max(self.value_function[state]))
def agent_start(self,observation):
theState=observation.intArray[0]
thisIntAction=self.egreedy(theState)
returnAction=Action()
returnAction.intArray=[thisIntAction]
self.lastAction=copy.deepcopy(returnAction)
self.lastObservation=copy.deepcopy(observation)
return returnAction
def agent_step(self,reward, observation):
newState=observation.intArray[0]
lastState=self.lastObservation.intArray[0]
lastAction=self.lastAction.intArray[0]
newIntAction=self.egreedy(newState)
Q_sa=self.value_function[lastState][lastAction]
Q_sprime_aprime=self.value_function[newState][newIntAction]
new_Q_sa=Q_sa + self.sarsa_stepsize * (reward + self.sarsa_gamma * Q_sprime_aprime - Q_sa)
if not self.policyFrozen:
self.value_function[lastState][lastAction]=new_Q_sa
returnAction=Action()
returnAction.intArray=[newIntAction]
self.lastAction=copy.deepcopy(returnAction)
self.lastObservation=copy.deepcopy(observation)
return returnAction
def agent_end(self,reward):
lastState=self.lastObservation.intArray[0]
lastAction=self.lastAction.intArray[0]
Q_sa=self.value_function[lastState][lastAction]
new_Q_sa=Q_sa + self.sarsa_stepsize * (reward - Q_sa)
if not self.policyFrozen:
self.value_function[lastState][lastAction]=new_Q_sa
def agent_cleanup(self):
pass
def save_value_function(self, fileName):
theFile = open(fileName, "w")
pickle.dump(self.value_function, theFile)
theFile.close()
def load_value_function(self, fileName):
theFile = open(fileName, "r")
self.value_function=pickle.load(theFile)
theFile.close()
def agent_message(self,inMessage):
# Message Description
# 'freeze learning'
# Action: Set flag to stop updating policy
#
if inMessage.startswith("freeze learning"):
self.policyFrozen=True
return "message understood, policy frozen"
# Message Description
# unfreeze learning
# Action: Set flag to resume updating policy
#
if inMessage.startswith("unfreeze learning"):
self.policyFrozen=False
return "message understood, policy unfrozen"
#Message Description
# freeze exploring
# Action: Set flag to stop exploring (greedy actions only)
#
if inMessage.startswith("freeze exploring"):
self.exploringFrozen=True
return "message understood, exploring frozen"
#Message Description
# unfreeze exploring
# Action: Set flag to resume exploring (e-greedy actions)
#
if inMessage.startswith("unfreeze exploring"):
self.exploringFrozen=False
return "message understood, exploring frozen"
#Message Description
# save_policy FILENAME
# Action: Save current value function in binary format to
# file called FILENAME
#
if inMessage.startswith("save_policy"):
splitString=inMessage.split(" ");
self.save_value_function(splitString[1]);
print "Saved.";
return "message understood, saving policy"
#Message Description
# load_policy FILENAME
# Action: Load value function in binary format from
# file called FILENAME
#
if inMessage.startswith("load_policy"):
splitString=inMessage.split(" ")
self.load_value_function(splitString[1])
print "Loaded."
return "message understood, loading policy"
return "SampleSarsaAgent(Python) does not understand your message."
if __name__=="__main__":
AgentLoader.loadAgent(sarsa_agent())
|
shiwalimohan/RLInfiniteMario
|
system/codecs/Python/examples/mines-sarsa-example/sample_sarsa_agent.py
|
Python
|
gpl-2.0
| 7,076
|
[
"Brian"
] |
b93b7ce768c8e1632bb38f10cba7674c626a9882f9a4c5f57926c9a2bef59653
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import calendar
from datetime import timedelta ,datetime
from time import strptime
from glob import glob
import re
import string
import os
from ocw.dataset import Dataset
import ocw.utils as utils
import netCDF4
import numpy
import numpy.ma as ma
LAT_NAMES = ['x', 'rlat', 'rlats', 'lat', 'lats', 'latitude', 'latitudes']
LON_NAMES = ['y', 'rlon', 'rlons', 'lon', 'lons', 'longitude', 'longitudes']
TIME_NAMES = ['time', 'times', 'date', 'dates', 'julian']
def _get_netcdf_variable_name(valid_var_names, netcdf, netcdf_var):
''' Determine if one of a set of variable names are in a NetCDF Dataset.
Looks for an occurrence of a valid_var_name in the NetCDF variable data.
This is useful for automatically determining the names of the lat, lon,
and time variable names inside of a dataset object.
:param valid_var_names: The possible variable names to search for in
the netCDF object.
:type valid_var_names: List of Strings
:param netcdf: The netCDF Dataset object in which to check for
valid_var_names.
:type netcdf: netcdf4.Dataset
:param netcdf_var: The relevant variable name to search over in the
netcdf object. This is used to narrow down the search for valid
variable names by first checking the desired variable's dimension
values for one or more of the valid variable names.
:returns: The variable from valid_var_names that it locates in
the netCDF object.
:raises ValueError: When unable to locate a single matching variable
name in the NetCDF Dataset from the supplied list of valid variable
names.
'''
# Check for valid variable names in netCDF variable dimensions
dimensions = netcdf.variables[netcdf_var].dimensions
dims_lower = [dim.encode().lower() for dim in dimensions]
intersect = set(valid_var_names).intersection(dims_lower)
if len(intersect) == 1:
# Retrieve the name of the dimension where we found the matching
# variable name
index = dims_lower.index(intersect.pop())
dimension_name = dimensions[index].encode()
# Locate all of the variables that share the dimension that we matched
# earlier. If the dimension's name matches then that variable is
# potentially what we want to return to the user.
possible_vars = []
for var in netcdf.variables.keys():
var_dimensions = netcdf.variables[var].dimensions
# Skip any dimensions are > 1D
if len(var_dimensions) != 1:
continue
if var_dimensions[0].encode() == dimension_name:
possible_vars.append(var)
# If there are multiple variables with matching dimension names then we
# aren't able to determining the correct variable name using the
# variable dimensions. We need to try a different approach. Otherwise,
# we're done!
if len(possible_vars) == 1:
return possible_vars[0]
# Check for valid variable names in netCDF variable names
variables = netcdf.variables.keys()
vars_lower = [var.encode().lower() for var in variables]
intersect = set(valid_var_names).intersection(vars_lower)
if len(intersect) == 1:
index = vars_lower.index(intersect.pop())
return variables[index]
# If we couldn't locate a single matching valid variable then we're unable
# to automatically determine the variable names for the user.
error = (
"Unable to locate a single matching variable name from the "
"supplied list of valid variable names. "
)
raise ValueError(error)
def load_WRF_2d_files(file_path,
filename_pattern,
variable_name,
name=''):
''' Load multiple WRF (or nuWRF) original output files containing 2D fields such as precipitation and surface variables into a Dataset.
The dataset can be spatially subset.
:param file_path: Directory to the NetCDF file to load.
:type file_path: :mod:`string`
:param filename_pattern: Path to the NetCDF file to load.
:type filename_pattern: :list:`string`
:param variable_name: The variable name to load from the NetCDF file.
:type variable_name: :mod:`string`
:param name: (Optional) A name for the loaded dataset.
:type name: :mod:`string`
:returns: An OCW Dataset object with the requested variable's data from
the NetCDF file.
:rtype: :class:`dataset.Dataset`
:raises ValueError:
'''
WRF_files = []
for pattern in filename_pattern:
WRF_files.extend(glob(file_path + pattern))
WRF_files.sort()
file_object_first = netCDF4.Dataset(WRF_files[0])
lats = file_object_first.variables['XLAT'][0,:]
lons = file_object_first.variables['XLONG'][0,:]
times = []
for ifile, file in enumerate(WRF_files):
file_object = netCDF4.Dataset(file)
time_struct_parsed = strptime(file[-19:],"%Y-%m-%d_%H:%M:%S")
for ihour in numpy.arange(24):
times.append(datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
values0= file_object.variables[variable_name][:]
if ifile == 0:
values = file_object.variables[variable_name][:]
else:
values = numpy.concatenate((values, file_object.variables[variable_name][:]))
file_object.close()
times = numpy.array(times)
return Dataset(lats, lons, times, values, variable_name, name=name)
def load_file(file_path,
variable_name,
variable_unit = None,
elevation_index=0,
name='',
lat_name=None,
lon_name=None,
time_name=None):
''' Load a NetCDF file into a Dataset.
:param file_path: Path to the NetCDF file to load.
:type file_path: :mod:`string`
:param variable_name: The variable name to load from the NetCDF file.
:type variable_name: :mod:`string`
:param variable_unit: (Optional) The variable unit to load from the NetCDF file.
:type variable_unit: :mod:`string`
:param elevation_index: (Optional) The elevation index for which data should
be returned. Climate data is often times 4 dimensional data. Some
datasets will have readins at different height/elevation levels. OCW
expects 3D data so a single layer needs to be stripped out when loading.
By default, the first elevation layer is used. If desired you may
specify the elevation value to use.
:type elevation_index: :class:`int`
:param name: (Optional) A name for the loaded dataset.
:type name: :mod:`string`
:param lat_name: (Optional) The latitude variable name to extract from the
dataset.
:type lat_name: :mod:`string`
:param lon_name: (Optional) The longitude variable name to extract from the
dataset.
:type lon_name: :mod:`string`
:param time_name: (Optional) The time variable name to extract from the
dataset.
:type time_name: :mod:`string`
:returns: An OCW Dataset object with the requested variable's data from
the NetCDF file.
:rtype: :class:`dataset.Dataset`
:raises ValueError: When the specified file path cannot be loaded by ndfCDF4
or when the lat/lon/time variable name cannot be determined
automatically.
'''
try:
netcdf = netCDF4.Dataset(file_path, mode='r')
except RuntimeError:
err = "Dataset filepath is invalid. Please ensure it is correct."
raise ValueError(err)
except:
err = (
"The given file cannot be loaded. Please ensure that it is a valid "
"NetCDF file. If problems persist, report them to the project's "
"mailing list."
)
raise ValueError(err)
if not lat_name:
lat_name = _get_netcdf_variable_name(LAT_NAMES, netcdf, variable_name)
if not lon_name:
lon_name = _get_netcdf_variable_name(LON_NAMES, netcdf, variable_name)
if not time_name:
time_name = _get_netcdf_variable_name(TIME_NAMES, netcdf, variable_name)
lats = netcdf.variables[lat_name][:]
lons = netcdf.variables[lon_name][:]
time_raw_values = netcdf.variables[time_name][:]
times = utils.decode_time_values(netcdf, time_name)
times = numpy.array(times)
values = ma.array(netcdf.variables[variable_name][:])
variable_unit = netcdf.variables[variable_name].units
# If the values are 4D then we need to strip out the elevation index
if len(values.shape) == 4:
# Determine the set of possible elevation dimension names excluding
# the list of names that are used for the lat, lon, and time values.
dims = netcdf.variables[variable_name].dimensions
dimension_names = [dim_name.encode() for dim_name in dims]
lat_lon_time_var_names = [lat_name, lon_name, time_name]
elev_names = set(dimension_names) - set(lat_lon_time_var_names)
# Grab the index value for the elevation values
level_index = dimension_names.index(elev_names.pop())
# Strip out the elevation values so we're left with a 3D array.
if level_index == 0:
values = values [elevation_index,:,:,:]
elif level_index == 1:
values = values [:,elevation_index,:,:]
elif level_index == 2:
values = values [:,:,elevation_index,:]
else:
values = values [:,:,:,elevation_index]
origin = {
'source': 'local',
'path': file_path,
'lat_name': lat_name,
'lon_name': lon_name,
'time_name': time_name
}
if elevation_index != 0: origin['elevation_index'] = elevation_index
return Dataset(lats, lons, times, values, variable=variable_name,
units=variable_unit, name=name, origin=origin)
def load_multiple_files(file_path,
filename_pattern,
variable_name,
dataset_name='ref',
variable_unit=None,
lat_name=None,
lon_name=None,
time_name=None):
''' load multiple netcdf files with common filename pattern and return an array of OCW datasets
:param file_path: directory name where the NetCDF files to load are stored.
:type file_path: :mod:`string`
:param filename_pattern: common file name patterns
:type filename_pattern: :list:`string`
:param dataset_name: a name of dataset when reading a single file
:type dataset_name: :mod:'string'
:param variable_name: The variable name to load from the NetCDF file.
:type variable_name: :mod:`string`
:param variable_unit: (Optional) The variable unit to load from the NetCDF file.
:type variable_unit: :mod:`string`
:param elevation_index: (Optional) The elevation index for which data should
be returned. Climate data is often times 4 dimensional data. Some
datasets will have readins at different height/elevation levels. OCW
expects 3D data so a single layer needs to be stripped out when loading.
By default, the first elevation layer is used. If desired you may
specify the elevation value to use.
:param lat_name: (Optional) The latitude variable name to extract from the
dataset.
:type lat_name: :mod:`string`
:param lon_name: (Optional) The longitude variable name to extract from the
dataset.
:type lon_name: :mod:`string`
:param time_name: (Optional) The time variable name to extract from the
dataset.
:type time_name: :mod:`string`
:returns: An array of OCW Dataset objects, an array of dataset names
:rtype: :class:`list`
'''
data_filenames = []
for pattern in filename_pattern:
data_filenames.extend(glob(file_path + pattern))
data_filenames.sort()
# number of files
ndata = len(data_filenames)
if ndata == 1:
data_name = [dataset_name]
else:
data_name = []
data_filenames_reversed = []
for element in data_filenames:
data_filenames_reversed.append(element[::-1])
prefix = os.path.commonprefix(data_filenames)
postfix = os.path.commonprefix(data_filenames_reversed)[::-1]
for element in data_filenames:
data_name.append(element.replace(prefix,'').replace(postfix,''))
datasets = []
for ifile,filename in enumerate(data_filenames):
datasets.append(load_file(filename, variable_name, variable_unit, name=data_name[ifile],
lat_name=lat_name, lon_name=lon_name, time_name=time_name))
return datasets
|
riverma/climate
|
ocw/data_source/local.py
|
Python
|
apache-2.0
| 13,561
|
[
"NetCDF"
] |
abbb8da465a2fe18ce9323b392cc314706dde340af8a53e422943832e57a59de
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
cache: jsonfile
short_description: JSON formatted files.
description:
- This cache uses JSON formatted, per host, files saved to the filesystem.
version_added: "1.9"
author: Brian Coca (@bcoca)
'''
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
try:
import simplejson as json
except ImportError:
import json
from ansible.parsing.utils.jsonify import jsonify
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by json files.
"""
def _load(self, filepath):
# Valid JSON is always UTF-8 encoded.
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return json.load(f)
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
f.write(jsonify(value, format=True))
|
Tatsh-ansible/ansible
|
lib/ansible/plugins/cache/jsonfile.py
|
Python
|
gpl-3.0
| 1,679
|
[
"Brian"
] |
0cc021b587a4c3e9a4bd52bd2399c3bd152bcc225cfceb63e764a20e512dd7e6
|
# Version: 0.16+dev
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.3, 3.4, 3.5, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601-like format.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16+dev (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": keywords.get("date", "").strip()}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date
pieces["date"] = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": pieces.get("date", "")}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date", "")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": keywords.get("date", "").strip()}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date
pieces["date"] = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.16+dev) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": pieces.get("date", "")}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date", "")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
geometalab/geoconverter
|
versioneer.py
|
Python
|
mit
| 65,737
|
[
"Brian"
] |
b3862395fff704ef5428fb454ecbbc8b02bfa24468d6ce4260712d20e85cea4e
|
# Demonstrates pseudo-likelihood estimation
# for a large system of continuous variables
from plmrf import *
import numpy as np
import scipy
import time
# Generating a big ring by sampling variables independently,
# then sampling based on each configuration's 'true' potential
nvars = 1000
nsamp = 1000
print("Generating data ...")
indep_data = dict()
for vindex in range(nvars):
samples = np.random.normal(size=nsamp*10)
varname = "x{0}".format(vindex)
indep_data[varname] = samples
# potentials functions are Gaussian kernels
def potential(pindex):
return (1.0/nvars) * np.exp(-np.abs(indep_data["x{0}".format(vindex)], indep_data["x{0}".format((vindex+1) % nvars)]))
unnormalized_density = np.exp(np.sum([potential(p) for p in range(nvars)], axis=0))
relative_density = unnormalized_density / unnormalized_density.sum()
samp_indices = np.random.choice(range(nsamp*10), size=nsamp, p=relative_density)
print("Setting up potentials and variable definitions ...")
data = dict()
var_defs = []
for vindex in range(nvars):
varname = "x{0}".format(vindex)
next_var = "x{0}".format((vindex+1) % nvars)
samples = indep_data[varname][samp_indices]
data[varname] = samples
var_defs.append(VariableDef(varname, samples=samples, num_int_points=10))
potentials = []
tied_params = [[], []]
for vindex in range(nvars):
varname = "x{0}".format(vindex)
next_var = "x{0}".format((vindex+1) % nvars)
potentials.append(GaussianPotential([varname], samples=data, location=0))
potentials.append(GaussianPotential([varname, next_var], samples=data))
tied_params[0].append(len(potentials)-2)
tied_params[1].append(len(potentials)-1)
for p in potentials:
if p.bandwidth < 1e-16:
print(p)
network = LogLinearMarkovNetwork(potentials, var_defs, tied_weights=tied_params)
print("Fitting parameters ...")
start = time.time()
mple_result = network.fit(data, log=True)
end = time.time()
print("Parameter estimation completed in {0} seconds".format(end - start))
print("MPLE optimization result:")
print(mple_result)
|
dgarant/pl-markov-network
|
examples/large_non_gaussian.py
|
Python
|
mit
| 2,076
|
[
"Gaussian"
] |
d423aa2c9ce9279e2589bcea713e68da7daf0b5048833642a233f20e12eab264
|
#===============================================================================
# LICENSE Bartsidee Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import mc, bz2, binascii, os
from time import time
import cPickle as pickle
from beautifulsoup.BeautifulSoup import BeautifulSoup
#===============================================================================
# Global Variables
#===============================================================================
http = mc.Http()
http.SetUserAgent("Mozilla/5.0 (Windows; U; Windows NT 6.1; nl; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13")
http.SetHttpHeader('Accept', 'text/javascript, text/html, application/xml, text/xml, */*')
config = mc.GetApp().GetLocalConfig()
#===============================================================================
# Function to Retrieve and Cache a Http request
# Input:
# url - reqest url
# cacheTime - amount of time to keep the http result in the cache (seconds)
# xhr - make a xhr ajax request (boolean)
# params - parameters to POST if empty a GET request is executed
#===============================================================================
def FetchUrl(url, cacheTime=0, xhr=False, params="", cookie=""):
dbid = "cache_" + url
if Cache(dbid+'{0}', cacheTime): return bz2.decompress(binascii.unhexlify(config.GetValue(dbid+'{1}')))
if xhr: http.SetHttpHeader('X-Requested-With', 'XMLHttpRequest')
if cookie: http.SetHttpHeader('Cookie', cookie)
if params: data = http.Post(url, params)
else: data = http.Get(url)
if cacheTime != 0:
config.Reset(dbid)
config.Reset(dbid)
config.PushBackValue(dbid, str(time()).split('.')[0])
config.PushBackValue(dbid, binascii.hexlify(bz2.compress(data)))
return data
#===============================================================================
# Function to determine if a cache is expired, returns a boolean
# Input:
# var - user agent variable as string
#===============================================================================
def UserAgent(var):
http.SetUserAgent(var)
#===============================================================================
# Function to determine if a cache is expired, returns a boolean
# Input:
# dbid - variable ame id in database to check
# cacheTime - amount of time to keep the variable in the cache (seconds)
#===============================================================================
def Cache(dbid, cacheTime):
if cacheTime == 0: return False
urltime = config.GetValue(dbid)
if urltime == "": urltime = 0
expiresAt = int(urltime) + int(cacheTime)
if time() < expiresAt:
return True
else:
return False
#===============================================================================
# Function to clean the app database: speeds it up and saves storage space
# Input:
# interval - amount of time before a new cleanup (seconds)
# save - array containing the variable names to exclude from deleting
#===============================================================================
def CleanDb(interval, save=[]):
latest = config.GetValue('clean')
if latest == "": latest = 0
expiresAt = int(latest) + int(interval)
if time() > expiresAt:
var_save = []
for i in range(len(save)):
var_save.append(config.GetValue(save[i]))
config.ResetAll()
config.SetValue('clean', str(time()).split('.')[0])
for i in range(len(save)):
config.SetValue(str(save[i]), str(var_save[i]))
|
bartsidee/bartsidee-boxee
|
sources/podcast/libs/ba.py
|
Python
|
gpl-3.0
| 4,003
|
[
"VisIt"
] |
558d4fe6a41aa5d4b4edd5119d4dafef93d4ed89b6cb151f360a1bee646e791c
|
'''module to provide git integration with jasp.
The intent is to provide a hook function that puts a vasp directory
under version control and commits changes to the repository
things to do:
1. figure out how to specify the git repository you want, or have the
CWD as the default directory.
2. figure out how to get more meaningful commit messages, maybe
containing diff information or which files were committed.
'''
import os, time
from git import *
CWD = os.getcwd()
def under_vc(self):
'''test if current directory is under version control or not'''
try:
repo = Repo('.')
return True
except InvalidGitRepositoryError:
return False
def commit(self):
'''add VASP files to a repository and commit them'''
if not under_vc(self):
repo = Repo.init(CWD)
else:
repo = Repo('.')
index = repo.index
vasp_files = ['POSCAR', 'INCAR', 'KPOINTS',
'OUTCAR', 'vasprun.xml']
for f in vasp_files:
if os.path.exists(f):
index.add([f])
message = 'jasp.vc.vgit commit - {0}'.format(time.asctime())
print index.commit(message)
if __name__ == '__main__':
print under_vc(1)
|
jkitchin/jasp
|
jasp/vc/vgit.py
|
Python
|
gpl-2.0
| 1,195
|
[
"VASP"
] |
226a3902e460f030a9df67c70199934d892b28286bd03d702f36086ffa936aa1
|
import enum
import inspect
import pydoc
import unittest
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, unique
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
tomato = 1
banana = 2
cherry = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None,
*, protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
if target is None:
target = source
for protocol in range(start, stop+1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj,
*, protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
for protocol in range(start, stop+1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
self.assertNotIn(3, Season)
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_exception(
self.assertRaises, PicklingError, self.NestedEnum.twigs,
protocol=(0, 3))
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs,
protocol=(4, HIGHEST_PROTOCOL))
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, 4))
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y, protocol=(4, 4))
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
expected_help_output = """
Help on class Color in module %s:
class Color(enum.Enum)
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping.
""".strip()
class TestStdLib(unittest.TestCase):
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
expected_text = expected_help_output % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', None),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object=None),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
if __name__ == '__main__':
unittest.main()
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_enum.py
|
Python
|
gpl-2.0
| 57,132
|
[
"MOE"
] |
70731b0fc8928da4cebe93461f8a8984a68290846c669d58dcd4057e647454d5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.