repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
listlengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
listlengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
|---|---|---|---|---|---|---|---|---|---|---|
scour-project/scour
|
scour/scour.py
|
removeUnreferencedIDs
|
def removeUnreferencedIDs(referencedIDs, identifiedElements):
"""
Removes the unreferenced ID attributes.
Returns the number of ID attributes removed
"""
global _num_ids_removed
keepTags = ['font']
num = 0
for id in identifiedElements:
node = identifiedElements[id]
if id not in referencedIDs and node.nodeName not in keepTags:
node.removeAttribute('id')
_num_ids_removed += 1
num += 1
return num
|
python
|
def removeUnreferencedIDs(referencedIDs, identifiedElements):
"""
Removes the unreferenced ID attributes.
Returns the number of ID attributes removed
"""
global _num_ids_removed
keepTags = ['font']
num = 0
for id in identifiedElements:
node = identifiedElements[id]
if id not in referencedIDs and node.nodeName not in keepTags:
node.removeAttribute('id')
_num_ids_removed += 1
num += 1
return num
|
[
"def",
"removeUnreferencedIDs",
"(",
"referencedIDs",
",",
"identifiedElements",
")",
":",
"global",
"_num_ids_removed",
"keepTags",
"=",
"[",
"'font'",
"]",
"num",
"=",
"0",
"for",
"id",
"in",
"identifiedElements",
":",
"node",
"=",
"identifiedElements",
"[",
"id",
"]",
"if",
"id",
"not",
"in",
"referencedIDs",
"and",
"node",
".",
"nodeName",
"not",
"in",
"keepTags",
":",
"node",
".",
"removeAttribute",
"(",
"'id'",
")",
"_num_ids_removed",
"+=",
"1",
"num",
"+=",
"1",
"return",
"num"
] |
Removes the unreferenced ID attributes.
Returns the number of ID attributes removed
|
[
"Removes",
"the",
"unreferenced",
"ID",
"attributes",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L857-L872
|
scour-project/scour
|
scour/scour.py
|
removeNestedGroups
|
def removeNestedGroups(node):
"""
This walks further and further down the tree, removing groups
which do not have any attributes or a title/desc child and
promoting their children up one level
"""
global _num_elements_removed
num = 0
groupsToRemove = []
# Only consider <g> elements for promotion if this element isn't a <switch>.
# (partial fix for bug 594930, required by the SVG spec however)
if not (node.nodeType == Node.ELEMENT_NODE and node.nodeName == 'switch'):
for child in node.childNodes:
if child.nodeName == 'g' and child.namespaceURI == NS['SVG'] and len(child.attributes) == 0:
# only collapse group if it does not have a title or desc as a direct descendant,
for grandchild in child.childNodes:
if grandchild.nodeType == Node.ELEMENT_NODE and grandchild.namespaceURI == NS['SVG'] and \
grandchild.nodeName in ['title', 'desc']:
break
else:
groupsToRemove.append(child)
for g in groupsToRemove:
while g.childNodes.length > 0:
g.parentNode.insertBefore(g.firstChild, g)
g.parentNode.removeChild(g)
_num_elements_removed += 1
num += 1
# now recurse for children
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += removeNestedGroups(child)
return num
|
python
|
def removeNestedGroups(node):
"""
This walks further and further down the tree, removing groups
which do not have any attributes or a title/desc child and
promoting their children up one level
"""
global _num_elements_removed
num = 0
groupsToRemove = []
# Only consider <g> elements for promotion if this element isn't a <switch>.
# (partial fix for bug 594930, required by the SVG spec however)
if not (node.nodeType == Node.ELEMENT_NODE and node.nodeName == 'switch'):
for child in node.childNodes:
if child.nodeName == 'g' and child.namespaceURI == NS['SVG'] and len(child.attributes) == 0:
# only collapse group if it does not have a title or desc as a direct descendant,
for grandchild in child.childNodes:
if grandchild.nodeType == Node.ELEMENT_NODE and grandchild.namespaceURI == NS['SVG'] and \
grandchild.nodeName in ['title', 'desc']:
break
else:
groupsToRemove.append(child)
for g in groupsToRemove:
while g.childNodes.length > 0:
g.parentNode.insertBefore(g.firstChild, g)
g.parentNode.removeChild(g)
_num_elements_removed += 1
num += 1
# now recurse for children
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += removeNestedGroups(child)
return num
|
[
"def",
"removeNestedGroups",
"(",
"node",
")",
":",
"global",
"_num_elements_removed",
"num",
"=",
"0",
"groupsToRemove",
"=",
"[",
"]",
"# Only consider <g> elements for promotion if this element isn't a <switch>.",
"# (partial fix for bug 594930, required by the SVG spec however)",
"if",
"not",
"(",
"node",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
"and",
"node",
".",
"nodeName",
"==",
"'switch'",
")",
":",
"for",
"child",
"in",
"node",
".",
"childNodes",
":",
"if",
"child",
".",
"nodeName",
"==",
"'g'",
"and",
"child",
".",
"namespaceURI",
"==",
"NS",
"[",
"'SVG'",
"]",
"and",
"len",
"(",
"child",
".",
"attributes",
")",
"==",
"0",
":",
"# only collapse group if it does not have a title or desc as a direct descendant,",
"for",
"grandchild",
"in",
"child",
".",
"childNodes",
":",
"if",
"grandchild",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
"and",
"grandchild",
".",
"namespaceURI",
"==",
"NS",
"[",
"'SVG'",
"]",
"and",
"grandchild",
".",
"nodeName",
"in",
"[",
"'title'",
",",
"'desc'",
"]",
":",
"break",
"else",
":",
"groupsToRemove",
".",
"append",
"(",
"child",
")",
"for",
"g",
"in",
"groupsToRemove",
":",
"while",
"g",
".",
"childNodes",
".",
"length",
">",
"0",
":",
"g",
".",
"parentNode",
".",
"insertBefore",
"(",
"g",
".",
"firstChild",
",",
"g",
")",
"g",
".",
"parentNode",
".",
"removeChild",
"(",
"g",
")",
"_num_elements_removed",
"+=",
"1",
"num",
"+=",
"1",
"# now recurse for children",
"for",
"child",
"in",
"node",
".",
"childNodes",
":",
"if",
"child",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"num",
"+=",
"removeNestedGroups",
"(",
"child",
")",
"return",
"num"
] |
This walks further and further down the tree, removing groups
which do not have any attributes or a title/desc child and
promoting their children up one level
|
[
"This",
"walks",
"further",
"and",
"further",
"down",
"the",
"tree",
"removing",
"groups",
"which",
"do",
"not",
"have",
"any",
"attributes",
"or",
"a",
"title",
"/",
"desc",
"child",
"and",
"promoting",
"their",
"children",
"up",
"one",
"level"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L946-L980
|
scour-project/scour
|
scour/scour.py
|
moveCommonAttributesToParentGroup
|
def moveCommonAttributesToParentGroup(elem, referencedElements):
"""
This recursively calls this function on all children of the passed in element
and then iterates over all child elements and removes common inheritable attributes
from the children and places them in the parent group. But only if the parent contains
nothing but element children and whitespace. The attributes are only removed from the
children if the children are not referenced by other elements in the document.
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
# only add and recurse if the child is not referenced elsewhere
if not child.getAttribute('id') in referencedElements:
childElements.append(child)
num += moveCommonAttributesToParentGroup(child, referencedElements)
# else if the parent has non-whitespace text children, do not
# try to move common attributes
elif child.nodeType == Node.TEXT_NODE and child.nodeValue.strip():
return num
# only process the children if there are more than one element
if len(childElements) <= 1:
return num
commonAttrs = {}
# add all inheritable properties of the first child element
# FIXME: Note there is a chance that the first child is a set/animate in which case
# its fill attribute is not what we want to look at, we should look for the first
# non-animate/set element
attrList = childElements[0].attributes
for index in range(attrList.length):
attr = attrList.item(index)
# this is most of the inheritable properties from http://www.w3.org/TR/SVG11/propidx.html
# and http://www.w3.org/TR/SVGTiny12/attributeTable.html
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# we just add all the attributes from the first child
commonAttrs[attr.nodeName] = attr.nodeValue
# for each subsequent child element
for childNum in range(len(childElements)):
# skip first child
if childNum == 0:
continue
child = childElements[childNum]
# if we are on an animateXXX/set element, ignore it (due to the 'fill' attribute)
if child.localName in ['set', 'animate', 'animateColor', 'animateTransform', 'animateMotion']:
continue
distinctAttrs = []
# loop through all current 'common' attributes
for name in commonAttrs:
# if this child doesn't match that attribute, schedule it for removal
if child.getAttribute(name) != commonAttrs[name]:
distinctAttrs.append(name)
# remove those attributes which are not common
for name in distinctAttrs:
del commonAttrs[name]
# commonAttrs now has all the inheritable attributes which are common among all child elements
for name in commonAttrs:
for child in childElements:
child.removeAttribute(name)
elem.setAttribute(name, commonAttrs[name])
# update our statistic (we remove N*M attributes and add back in M attributes)
num += (len(childElements) - 1) * len(commonAttrs)
return num
|
python
|
def moveCommonAttributesToParentGroup(elem, referencedElements):
"""
This recursively calls this function on all children of the passed in element
and then iterates over all child elements and removes common inheritable attributes
from the children and places them in the parent group. But only if the parent contains
nothing but element children and whitespace. The attributes are only removed from the
children if the children are not referenced by other elements in the document.
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
# only add and recurse if the child is not referenced elsewhere
if not child.getAttribute('id') in referencedElements:
childElements.append(child)
num += moveCommonAttributesToParentGroup(child, referencedElements)
# else if the parent has non-whitespace text children, do not
# try to move common attributes
elif child.nodeType == Node.TEXT_NODE and child.nodeValue.strip():
return num
# only process the children if there are more than one element
if len(childElements) <= 1:
return num
commonAttrs = {}
# add all inheritable properties of the first child element
# FIXME: Note there is a chance that the first child is a set/animate in which case
# its fill attribute is not what we want to look at, we should look for the first
# non-animate/set element
attrList = childElements[0].attributes
for index in range(attrList.length):
attr = attrList.item(index)
# this is most of the inheritable properties from http://www.w3.org/TR/SVG11/propidx.html
# and http://www.w3.org/TR/SVGTiny12/attributeTable.html
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# we just add all the attributes from the first child
commonAttrs[attr.nodeName] = attr.nodeValue
# for each subsequent child element
for childNum in range(len(childElements)):
# skip first child
if childNum == 0:
continue
child = childElements[childNum]
# if we are on an animateXXX/set element, ignore it (due to the 'fill' attribute)
if child.localName in ['set', 'animate', 'animateColor', 'animateTransform', 'animateMotion']:
continue
distinctAttrs = []
# loop through all current 'common' attributes
for name in commonAttrs:
# if this child doesn't match that attribute, schedule it for removal
if child.getAttribute(name) != commonAttrs[name]:
distinctAttrs.append(name)
# remove those attributes which are not common
for name in distinctAttrs:
del commonAttrs[name]
# commonAttrs now has all the inheritable attributes which are common among all child elements
for name in commonAttrs:
for child in childElements:
child.removeAttribute(name)
elem.setAttribute(name, commonAttrs[name])
# update our statistic (we remove N*M attributes and add back in M attributes)
num += (len(childElements) - 1) * len(commonAttrs)
return num
|
[
"def",
"moveCommonAttributesToParentGroup",
"(",
"elem",
",",
"referencedElements",
")",
":",
"num",
"=",
"0",
"childElements",
"=",
"[",
"]",
"# recurse first into the children (depth-first)",
"for",
"child",
"in",
"elem",
".",
"childNodes",
":",
"if",
"child",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"# only add and recurse if the child is not referenced elsewhere",
"if",
"not",
"child",
".",
"getAttribute",
"(",
"'id'",
")",
"in",
"referencedElements",
":",
"childElements",
".",
"append",
"(",
"child",
")",
"num",
"+=",
"moveCommonAttributesToParentGroup",
"(",
"child",
",",
"referencedElements",
")",
"# else if the parent has non-whitespace text children, do not",
"# try to move common attributes",
"elif",
"child",
".",
"nodeType",
"==",
"Node",
".",
"TEXT_NODE",
"and",
"child",
".",
"nodeValue",
".",
"strip",
"(",
")",
":",
"return",
"num",
"# only process the children if there are more than one element",
"if",
"len",
"(",
"childElements",
")",
"<=",
"1",
":",
"return",
"num",
"commonAttrs",
"=",
"{",
"}",
"# add all inheritable properties of the first child element",
"# FIXME: Note there is a chance that the first child is a set/animate in which case",
"# its fill attribute is not what we want to look at, we should look for the first",
"# non-animate/set element",
"attrList",
"=",
"childElements",
"[",
"0",
"]",
".",
"attributes",
"for",
"index",
"in",
"range",
"(",
"attrList",
".",
"length",
")",
":",
"attr",
"=",
"attrList",
".",
"item",
"(",
"index",
")",
"# this is most of the inheritable properties from http://www.w3.org/TR/SVG11/propidx.html",
"# and http://www.w3.org/TR/SVGTiny12/attributeTable.html",
"if",
"attr",
".",
"nodeName",
"in",
"[",
"'clip-rule'",
",",
"'display-align'",
",",
"'fill'",
",",
"'fill-opacity'",
",",
"'fill-rule'",
",",
"'font'",
",",
"'font-family'",
",",
"'font-size'",
",",
"'font-size-adjust'",
",",
"'font-stretch'",
",",
"'font-style'",
",",
"'font-variant'",
",",
"'font-weight'",
",",
"'letter-spacing'",
",",
"'pointer-events'",
",",
"'shape-rendering'",
",",
"'stroke'",
",",
"'stroke-dasharray'",
",",
"'stroke-dashoffset'",
",",
"'stroke-linecap'",
",",
"'stroke-linejoin'",
",",
"'stroke-miterlimit'",
",",
"'stroke-opacity'",
",",
"'stroke-width'",
",",
"'text-anchor'",
",",
"'text-decoration'",
",",
"'text-rendering'",
",",
"'visibility'",
",",
"'word-spacing'",
",",
"'writing-mode'",
"]",
":",
"# we just add all the attributes from the first child",
"commonAttrs",
"[",
"attr",
".",
"nodeName",
"]",
"=",
"attr",
".",
"nodeValue",
"# for each subsequent child element",
"for",
"childNum",
"in",
"range",
"(",
"len",
"(",
"childElements",
")",
")",
":",
"# skip first child",
"if",
"childNum",
"==",
"0",
":",
"continue",
"child",
"=",
"childElements",
"[",
"childNum",
"]",
"# if we are on an animateXXX/set element, ignore it (due to the 'fill' attribute)",
"if",
"child",
".",
"localName",
"in",
"[",
"'set'",
",",
"'animate'",
",",
"'animateColor'",
",",
"'animateTransform'",
",",
"'animateMotion'",
"]",
":",
"continue",
"distinctAttrs",
"=",
"[",
"]",
"# loop through all current 'common' attributes",
"for",
"name",
"in",
"commonAttrs",
":",
"# if this child doesn't match that attribute, schedule it for removal",
"if",
"child",
".",
"getAttribute",
"(",
"name",
")",
"!=",
"commonAttrs",
"[",
"name",
"]",
":",
"distinctAttrs",
".",
"append",
"(",
"name",
")",
"# remove those attributes which are not common",
"for",
"name",
"in",
"distinctAttrs",
":",
"del",
"commonAttrs",
"[",
"name",
"]",
"# commonAttrs now has all the inheritable attributes which are common among all child elements",
"for",
"name",
"in",
"commonAttrs",
":",
"for",
"child",
"in",
"childElements",
":",
"child",
".",
"removeAttribute",
"(",
"name",
")",
"elem",
".",
"setAttribute",
"(",
"name",
",",
"commonAttrs",
"[",
"name",
"]",
")",
"# update our statistic (we remove N*M attributes and add back in M attributes)",
"num",
"+=",
"(",
"len",
"(",
"childElements",
")",
"-",
"1",
")",
"*",
"len",
"(",
"commonAttrs",
")",
"return",
"num"
] |
This recursively calls this function on all children of the passed in element
and then iterates over all child elements and removes common inheritable attributes
from the children and places them in the parent group. But only if the parent contains
nothing but element children and whitespace. The attributes are only removed from the
children if the children are not referenced by other elements in the document.
|
[
"This",
"recursively",
"calls",
"this",
"function",
"on",
"all",
"children",
"of",
"the",
"passed",
"in",
"element",
"and",
"then",
"iterates",
"over",
"all",
"child",
"elements",
"and",
"removes",
"common",
"inheritable",
"attributes",
"from",
"the",
"children",
"and",
"places",
"them",
"in",
"the",
"parent",
"group",
".",
"But",
"only",
"if",
"the",
"parent",
"contains",
"nothing",
"but",
"element",
"children",
"and",
"whitespace",
".",
"The",
"attributes",
"are",
"only",
"removed",
"from",
"the",
"children",
"if",
"the",
"children",
"are",
"not",
"referenced",
"by",
"other",
"elements",
"in",
"the",
"document",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L983-L1063
|
scour-project/scour
|
scour/scour.py
|
createGroupsForCommonAttributes
|
def createGroupsForCommonAttributes(elem):
"""
Creates <g> elements to contain runs of 3 or more
consecutive child elements having at least one common attribute.
Common attributes are not promoted to the <g> by this function.
This is handled by moveCommonAttributesToParentGroup.
If all children have a common attribute, an extra <g> is not created.
This function acts recursively on the given element.
"""
num = 0
global _num_elements_removed
# TODO perhaps all of the Presentation attributes in http://www.w3.org/TR/SVG/struct.html#GElement
# could be added here
# Cyn: These attributes are the same as in moveAttributesToParentGroup, and must always be
for curAttr in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# Iterate through the children in reverse order, so item(i) for
# items we have yet to visit still returns the correct nodes.
curChild = elem.childNodes.length - 1
while curChild >= 0:
childNode = elem.childNodes.item(curChild)
if (
childNode.nodeType == Node.ELEMENT_NODE and
childNode.getAttribute(curAttr) != '' and
childNode.nodeName in [
# only attempt to group elements that the content model allows to be children of a <g>
# SVG 1.1 (see https://www.w3.org/TR/SVG/struct.html#GElement)
'animate', 'animateColor', 'animateMotion', 'animateTransform', 'set', # animation elements
'desc', 'metadata', 'title', # descriptive elements
'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect', # shape elements
'defs', 'g', 'svg', 'symbol', 'use', # structural elements
'linearGradient', 'radialGradient', # gradient elements
'a', 'altGlyphDef', 'clipPath', 'color-profile', 'cursor', 'filter',
'font', 'font-face', 'foreignObject', 'image', 'marker', 'mask',
'pattern', 'script', 'style', 'switch', 'text', 'view',
# SVG 1.2 (see https://www.w3.org/TR/SVGTiny12/elementTable.html)
'animation', 'audio', 'discard', 'handler', 'listener',
'prefetch', 'solidColor', 'textArea', 'video'
]
):
# We're in a possible run! Track the value and run length.
value = childNode.getAttribute(curAttr)
runStart, runEnd = curChild, curChild
# Run elements includes only element tags, no whitespace/comments/etc.
# Later, we calculate a run length which includes these.
runElements = 1
# Backtrack to get all the nodes having the same
# attribute value, preserving any nodes in-between.
while runStart > 0:
nextNode = elem.childNodes.item(runStart - 1)
if nextNode.nodeType == Node.ELEMENT_NODE:
if nextNode.getAttribute(curAttr) != value:
break
else:
runElements += 1
runStart -= 1
else:
runStart -= 1
if runElements >= 3:
# Include whitespace/comment/etc. nodes in the run.
while runEnd < elem.childNodes.length - 1:
if elem.childNodes.item(runEnd + 1).nodeType == Node.ELEMENT_NODE:
break
else:
runEnd += 1
runLength = runEnd - runStart + 1
if runLength == elem.childNodes.length: # Every child has this
# If the current parent is a <g> already,
if elem.nodeName == 'g' and elem.namespaceURI == NS['SVG']:
# do not act altogether on this attribute; all the
# children have it in common.
# Let moveCommonAttributesToParentGroup do it.
curChild = -1
continue
# otherwise, it might be an <svg> element, and
# even if all children have the same attribute value,
# it's going to be worth making the <g> since
# <svg> doesn't support attributes like 'stroke'.
# Fall through.
# Create a <g> element from scratch.
# We need the Document for this.
document = elem.ownerDocument
group = document.createElementNS(NS['SVG'], 'g')
# Move the run of elements to the group.
# a) ADD the nodes to the new group.
group.childNodes[:] = elem.childNodes[runStart:runEnd + 1]
for child in group.childNodes:
child.parentNode = group
# b) REMOVE the nodes from the element.
elem.childNodes[runStart:runEnd + 1] = []
# Include the group in elem's children.
elem.childNodes.insert(runStart, group)
group.parentNode = elem
num += 1
curChild = runStart - 1
_num_elements_removed -= 1
else:
curChild -= 1
else:
curChild -= 1
# each child gets the same treatment, recursively
for childNode in elem.childNodes:
if childNode.nodeType == Node.ELEMENT_NODE:
num += createGroupsForCommonAttributes(childNode)
return num
|
python
|
def createGroupsForCommonAttributes(elem):
"""
Creates <g> elements to contain runs of 3 or more
consecutive child elements having at least one common attribute.
Common attributes are not promoted to the <g> by this function.
This is handled by moveCommonAttributesToParentGroup.
If all children have a common attribute, an extra <g> is not created.
This function acts recursively on the given element.
"""
num = 0
global _num_elements_removed
# TODO perhaps all of the Presentation attributes in http://www.w3.org/TR/SVG/struct.html#GElement
# could be added here
# Cyn: These attributes are the same as in moveAttributesToParentGroup, and must always be
for curAttr in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# Iterate through the children in reverse order, so item(i) for
# items we have yet to visit still returns the correct nodes.
curChild = elem.childNodes.length - 1
while curChild >= 0:
childNode = elem.childNodes.item(curChild)
if (
childNode.nodeType == Node.ELEMENT_NODE and
childNode.getAttribute(curAttr) != '' and
childNode.nodeName in [
# only attempt to group elements that the content model allows to be children of a <g>
# SVG 1.1 (see https://www.w3.org/TR/SVG/struct.html#GElement)
'animate', 'animateColor', 'animateMotion', 'animateTransform', 'set', # animation elements
'desc', 'metadata', 'title', # descriptive elements
'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect', # shape elements
'defs', 'g', 'svg', 'symbol', 'use', # structural elements
'linearGradient', 'radialGradient', # gradient elements
'a', 'altGlyphDef', 'clipPath', 'color-profile', 'cursor', 'filter',
'font', 'font-face', 'foreignObject', 'image', 'marker', 'mask',
'pattern', 'script', 'style', 'switch', 'text', 'view',
# SVG 1.2 (see https://www.w3.org/TR/SVGTiny12/elementTable.html)
'animation', 'audio', 'discard', 'handler', 'listener',
'prefetch', 'solidColor', 'textArea', 'video'
]
):
# We're in a possible run! Track the value and run length.
value = childNode.getAttribute(curAttr)
runStart, runEnd = curChild, curChild
# Run elements includes only element tags, no whitespace/comments/etc.
# Later, we calculate a run length which includes these.
runElements = 1
# Backtrack to get all the nodes having the same
# attribute value, preserving any nodes in-between.
while runStart > 0:
nextNode = elem.childNodes.item(runStart - 1)
if nextNode.nodeType == Node.ELEMENT_NODE:
if nextNode.getAttribute(curAttr) != value:
break
else:
runElements += 1
runStart -= 1
else:
runStart -= 1
if runElements >= 3:
# Include whitespace/comment/etc. nodes in the run.
while runEnd < elem.childNodes.length - 1:
if elem.childNodes.item(runEnd + 1).nodeType == Node.ELEMENT_NODE:
break
else:
runEnd += 1
runLength = runEnd - runStart + 1
if runLength == elem.childNodes.length: # Every child has this
# If the current parent is a <g> already,
if elem.nodeName == 'g' and elem.namespaceURI == NS['SVG']:
# do not act altogether on this attribute; all the
# children have it in common.
# Let moveCommonAttributesToParentGroup do it.
curChild = -1
continue
# otherwise, it might be an <svg> element, and
# even if all children have the same attribute value,
# it's going to be worth making the <g> since
# <svg> doesn't support attributes like 'stroke'.
# Fall through.
# Create a <g> element from scratch.
# We need the Document for this.
document = elem.ownerDocument
group = document.createElementNS(NS['SVG'], 'g')
# Move the run of elements to the group.
# a) ADD the nodes to the new group.
group.childNodes[:] = elem.childNodes[runStart:runEnd + 1]
for child in group.childNodes:
child.parentNode = group
# b) REMOVE the nodes from the element.
elem.childNodes[runStart:runEnd + 1] = []
# Include the group in elem's children.
elem.childNodes.insert(runStart, group)
group.parentNode = elem
num += 1
curChild = runStart - 1
_num_elements_removed -= 1
else:
curChild -= 1
else:
curChild -= 1
# each child gets the same treatment, recursively
for childNode in elem.childNodes:
if childNode.nodeType == Node.ELEMENT_NODE:
num += createGroupsForCommonAttributes(childNode)
return num
|
[
"def",
"createGroupsForCommonAttributes",
"(",
"elem",
")",
":",
"num",
"=",
"0",
"global",
"_num_elements_removed",
"# TODO perhaps all of the Presentation attributes in http://www.w3.org/TR/SVG/struct.html#GElement",
"# could be added here",
"# Cyn: These attributes are the same as in moveAttributesToParentGroup, and must always be",
"for",
"curAttr",
"in",
"[",
"'clip-rule'",
",",
"'display-align'",
",",
"'fill'",
",",
"'fill-opacity'",
",",
"'fill-rule'",
",",
"'font'",
",",
"'font-family'",
",",
"'font-size'",
",",
"'font-size-adjust'",
",",
"'font-stretch'",
",",
"'font-style'",
",",
"'font-variant'",
",",
"'font-weight'",
",",
"'letter-spacing'",
",",
"'pointer-events'",
",",
"'shape-rendering'",
",",
"'stroke'",
",",
"'stroke-dasharray'",
",",
"'stroke-dashoffset'",
",",
"'stroke-linecap'",
",",
"'stroke-linejoin'",
",",
"'stroke-miterlimit'",
",",
"'stroke-opacity'",
",",
"'stroke-width'",
",",
"'text-anchor'",
",",
"'text-decoration'",
",",
"'text-rendering'",
",",
"'visibility'",
",",
"'word-spacing'",
",",
"'writing-mode'",
"]",
":",
"# Iterate through the children in reverse order, so item(i) for",
"# items we have yet to visit still returns the correct nodes.",
"curChild",
"=",
"elem",
".",
"childNodes",
".",
"length",
"-",
"1",
"while",
"curChild",
">=",
"0",
":",
"childNode",
"=",
"elem",
".",
"childNodes",
".",
"item",
"(",
"curChild",
")",
"if",
"(",
"childNode",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
"and",
"childNode",
".",
"getAttribute",
"(",
"curAttr",
")",
"!=",
"''",
"and",
"childNode",
".",
"nodeName",
"in",
"[",
"# only attempt to group elements that the content model allows to be children of a <g>",
"# SVG 1.1 (see https://www.w3.org/TR/SVG/struct.html#GElement)",
"'animate'",
",",
"'animateColor'",
",",
"'animateMotion'",
",",
"'animateTransform'",
",",
"'set'",
",",
"# animation elements",
"'desc'",
",",
"'metadata'",
",",
"'title'",
",",
"# descriptive elements",
"'circle'",
",",
"'ellipse'",
",",
"'line'",
",",
"'path'",
",",
"'polygon'",
",",
"'polyline'",
",",
"'rect'",
",",
"# shape elements",
"'defs'",
",",
"'g'",
",",
"'svg'",
",",
"'symbol'",
",",
"'use'",
",",
"# structural elements",
"'linearGradient'",
",",
"'radialGradient'",
",",
"# gradient elements",
"'a'",
",",
"'altGlyphDef'",
",",
"'clipPath'",
",",
"'color-profile'",
",",
"'cursor'",
",",
"'filter'",
",",
"'font'",
",",
"'font-face'",
",",
"'foreignObject'",
",",
"'image'",
",",
"'marker'",
",",
"'mask'",
",",
"'pattern'",
",",
"'script'",
",",
"'style'",
",",
"'switch'",
",",
"'text'",
",",
"'view'",
",",
"# SVG 1.2 (see https://www.w3.org/TR/SVGTiny12/elementTable.html)",
"'animation'",
",",
"'audio'",
",",
"'discard'",
",",
"'handler'",
",",
"'listener'",
",",
"'prefetch'",
",",
"'solidColor'",
",",
"'textArea'",
",",
"'video'",
"]",
")",
":",
"# We're in a possible run! Track the value and run length.",
"value",
"=",
"childNode",
".",
"getAttribute",
"(",
"curAttr",
")",
"runStart",
",",
"runEnd",
"=",
"curChild",
",",
"curChild",
"# Run elements includes only element tags, no whitespace/comments/etc.",
"# Later, we calculate a run length which includes these.",
"runElements",
"=",
"1",
"# Backtrack to get all the nodes having the same",
"# attribute value, preserving any nodes in-between.",
"while",
"runStart",
">",
"0",
":",
"nextNode",
"=",
"elem",
".",
"childNodes",
".",
"item",
"(",
"runStart",
"-",
"1",
")",
"if",
"nextNode",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"if",
"nextNode",
".",
"getAttribute",
"(",
"curAttr",
")",
"!=",
"value",
":",
"break",
"else",
":",
"runElements",
"+=",
"1",
"runStart",
"-=",
"1",
"else",
":",
"runStart",
"-=",
"1",
"if",
"runElements",
">=",
"3",
":",
"# Include whitespace/comment/etc. nodes in the run.",
"while",
"runEnd",
"<",
"elem",
".",
"childNodes",
".",
"length",
"-",
"1",
":",
"if",
"elem",
".",
"childNodes",
".",
"item",
"(",
"runEnd",
"+",
"1",
")",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"break",
"else",
":",
"runEnd",
"+=",
"1",
"runLength",
"=",
"runEnd",
"-",
"runStart",
"+",
"1",
"if",
"runLength",
"==",
"elem",
".",
"childNodes",
".",
"length",
":",
"# Every child has this",
"# If the current parent is a <g> already,",
"if",
"elem",
".",
"nodeName",
"==",
"'g'",
"and",
"elem",
".",
"namespaceURI",
"==",
"NS",
"[",
"'SVG'",
"]",
":",
"# do not act altogether on this attribute; all the",
"# children have it in common.",
"# Let moveCommonAttributesToParentGroup do it.",
"curChild",
"=",
"-",
"1",
"continue",
"# otherwise, it might be an <svg> element, and",
"# even if all children have the same attribute value,",
"# it's going to be worth making the <g> since",
"# <svg> doesn't support attributes like 'stroke'.",
"# Fall through.",
"# Create a <g> element from scratch.",
"# We need the Document for this.",
"document",
"=",
"elem",
".",
"ownerDocument",
"group",
"=",
"document",
".",
"createElementNS",
"(",
"NS",
"[",
"'SVG'",
"]",
",",
"'g'",
")",
"# Move the run of elements to the group.",
"# a) ADD the nodes to the new group.",
"group",
".",
"childNodes",
"[",
":",
"]",
"=",
"elem",
".",
"childNodes",
"[",
"runStart",
":",
"runEnd",
"+",
"1",
"]",
"for",
"child",
"in",
"group",
".",
"childNodes",
":",
"child",
".",
"parentNode",
"=",
"group",
"# b) REMOVE the nodes from the element.",
"elem",
".",
"childNodes",
"[",
"runStart",
":",
"runEnd",
"+",
"1",
"]",
"=",
"[",
"]",
"# Include the group in elem's children.",
"elem",
".",
"childNodes",
".",
"insert",
"(",
"runStart",
",",
"group",
")",
"group",
".",
"parentNode",
"=",
"elem",
"num",
"+=",
"1",
"curChild",
"=",
"runStart",
"-",
"1",
"_num_elements_removed",
"-=",
"1",
"else",
":",
"curChild",
"-=",
"1",
"else",
":",
"curChild",
"-=",
"1",
"# each child gets the same treatment, recursively",
"for",
"childNode",
"in",
"elem",
".",
"childNodes",
":",
"if",
"childNode",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"num",
"+=",
"createGroupsForCommonAttributes",
"(",
"childNode",
")",
"return",
"num"
] |
Creates <g> elements to contain runs of 3 or more
consecutive child elements having at least one common attribute.
Common attributes are not promoted to the <g> by this function.
This is handled by moveCommonAttributesToParentGroup.
If all children have a common attribute, an extra <g> is not created.
This function acts recursively on the given element.
|
[
"Creates",
"<g",
">",
"elements",
"to",
"contain",
"runs",
"of",
"3",
"or",
"more",
"consecutive",
"child",
"elements",
"having",
"at",
"least",
"one",
"common",
"attribute",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1066-L1192
|
scour-project/scour
|
scour/scour.py
|
removeUnusedAttributesOnParent
|
def removeUnusedAttributesOnParent(elem):
"""
This recursively calls this function on all children of the element passed in,
then removes any unused attributes on this elem if none of the children inherit it
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
childElements.append(child)
num += removeUnusedAttributesOnParent(child)
# only process the children if there are more than one element
if len(childElements) <= 1:
return num
# get all attribute values on this parent
attrList = elem.attributes
unusedAttrs = {}
for index in range(attrList.length):
attr = attrList.item(index)
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
unusedAttrs[attr.nodeName] = attr.nodeValue
# for each child, if at least one child inherits the parent's attribute, then remove
for childNum in range(len(childElements)):
child = childElements[childNum]
inheritedAttrs = []
for name in unusedAttrs:
val = child.getAttribute(name)
if val == '' or val is None or val == 'inherit':
inheritedAttrs.append(name)
for a in inheritedAttrs:
del unusedAttrs[a]
# unusedAttrs now has all the parent attributes that are unused
for name in unusedAttrs:
elem.removeAttribute(name)
num += 1
return num
|
python
|
def removeUnusedAttributesOnParent(elem):
"""
This recursively calls this function on all children of the element passed in,
then removes any unused attributes on this elem if none of the children inherit it
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
childElements.append(child)
num += removeUnusedAttributesOnParent(child)
# only process the children if there are more than one element
if len(childElements) <= 1:
return num
# get all attribute values on this parent
attrList = elem.attributes
unusedAttrs = {}
for index in range(attrList.length):
attr = attrList.item(index)
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
unusedAttrs[attr.nodeName] = attr.nodeValue
# for each child, if at least one child inherits the parent's attribute, then remove
for childNum in range(len(childElements)):
child = childElements[childNum]
inheritedAttrs = []
for name in unusedAttrs:
val = child.getAttribute(name)
if val == '' or val is None or val == 'inherit':
inheritedAttrs.append(name)
for a in inheritedAttrs:
del unusedAttrs[a]
# unusedAttrs now has all the parent attributes that are unused
for name in unusedAttrs:
elem.removeAttribute(name)
num += 1
return num
|
[
"def",
"removeUnusedAttributesOnParent",
"(",
"elem",
")",
":",
"num",
"=",
"0",
"childElements",
"=",
"[",
"]",
"# recurse first into the children (depth-first)",
"for",
"child",
"in",
"elem",
".",
"childNodes",
":",
"if",
"child",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"childElements",
".",
"append",
"(",
"child",
")",
"num",
"+=",
"removeUnusedAttributesOnParent",
"(",
"child",
")",
"# only process the children if there are more than one element",
"if",
"len",
"(",
"childElements",
")",
"<=",
"1",
":",
"return",
"num",
"# get all attribute values on this parent",
"attrList",
"=",
"elem",
".",
"attributes",
"unusedAttrs",
"=",
"{",
"}",
"for",
"index",
"in",
"range",
"(",
"attrList",
".",
"length",
")",
":",
"attr",
"=",
"attrList",
".",
"item",
"(",
"index",
")",
"if",
"attr",
".",
"nodeName",
"in",
"[",
"'clip-rule'",
",",
"'display-align'",
",",
"'fill'",
",",
"'fill-opacity'",
",",
"'fill-rule'",
",",
"'font'",
",",
"'font-family'",
",",
"'font-size'",
",",
"'font-size-adjust'",
",",
"'font-stretch'",
",",
"'font-style'",
",",
"'font-variant'",
",",
"'font-weight'",
",",
"'letter-spacing'",
",",
"'pointer-events'",
",",
"'shape-rendering'",
",",
"'stroke'",
",",
"'stroke-dasharray'",
",",
"'stroke-dashoffset'",
",",
"'stroke-linecap'",
",",
"'stroke-linejoin'",
",",
"'stroke-miterlimit'",
",",
"'stroke-opacity'",
",",
"'stroke-width'",
",",
"'text-anchor'",
",",
"'text-decoration'",
",",
"'text-rendering'",
",",
"'visibility'",
",",
"'word-spacing'",
",",
"'writing-mode'",
"]",
":",
"unusedAttrs",
"[",
"attr",
".",
"nodeName",
"]",
"=",
"attr",
".",
"nodeValue",
"# for each child, if at least one child inherits the parent's attribute, then remove",
"for",
"childNum",
"in",
"range",
"(",
"len",
"(",
"childElements",
")",
")",
":",
"child",
"=",
"childElements",
"[",
"childNum",
"]",
"inheritedAttrs",
"=",
"[",
"]",
"for",
"name",
"in",
"unusedAttrs",
":",
"val",
"=",
"child",
".",
"getAttribute",
"(",
"name",
")",
"if",
"val",
"==",
"''",
"or",
"val",
"is",
"None",
"or",
"val",
"==",
"'inherit'",
":",
"inheritedAttrs",
".",
"append",
"(",
"name",
")",
"for",
"a",
"in",
"inheritedAttrs",
":",
"del",
"unusedAttrs",
"[",
"a",
"]",
"# unusedAttrs now has all the parent attributes that are unused",
"for",
"name",
"in",
"unusedAttrs",
":",
"elem",
".",
"removeAttribute",
"(",
"name",
")",
"num",
"+=",
"1",
"return",
"num"
] |
This recursively calls this function on all children of the element passed in,
then removes any unused attributes on this elem if none of the children inherit it
|
[
"This",
"recursively",
"calls",
"this",
"function",
"on",
"all",
"children",
"of",
"the",
"element",
"passed",
"in",
"then",
"removes",
"any",
"unused",
"attributes",
"on",
"this",
"elem",
"if",
"none",
"of",
"the",
"children",
"inherit",
"it"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1195-L1247
|
scour-project/scour
|
scour/scour.py
|
_getStyle
|
def _getStyle(node):
u"""Returns the style attribute of a node as a dictionary."""
if node.nodeType == Node.ELEMENT_NODE and len(node.getAttribute('style')) > 0:
styleMap = {}
rawStyles = node.getAttribute('style').split(';')
for style in rawStyles:
propval = style.split(':')
if len(propval) == 2:
styleMap[propval[0].strip()] = propval[1].strip()
return styleMap
else:
return {}
|
python
|
def _getStyle(node):
u"""Returns the style attribute of a node as a dictionary."""
if node.nodeType == Node.ELEMENT_NODE and len(node.getAttribute('style')) > 0:
styleMap = {}
rawStyles = node.getAttribute('style').split(';')
for style in rawStyles:
propval = style.split(':')
if len(propval) == 2:
styleMap[propval[0].strip()] = propval[1].strip()
return styleMap
else:
return {}
|
[
"def",
"_getStyle",
"(",
"node",
")",
":",
"if",
"node",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
"and",
"len",
"(",
"node",
".",
"getAttribute",
"(",
"'style'",
")",
")",
">",
"0",
":",
"styleMap",
"=",
"{",
"}",
"rawStyles",
"=",
"node",
".",
"getAttribute",
"(",
"'style'",
")",
".",
"split",
"(",
"';'",
")",
"for",
"style",
"in",
"rawStyles",
":",
"propval",
"=",
"style",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"propval",
")",
"==",
"2",
":",
"styleMap",
"[",
"propval",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"]",
"=",
"propval",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"styleMap",
"else",
":",
"return",
"{",
"}"
] |
u"""Returns the style attribute of a node as a dictionary.
|
[
"u",
"Returns",
"the",
"style",
"attribute",
"of",
"a",
"node",
"as",
"a",
"dictionary",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1443-L1454
|
scour-project/scour
|
scour/scour.py
|
_setStyle
|
def _setStyle(node, styleMap):
u"""Sets the style attribute of a node to the dictionary ``styleMap``."""
fixedStyle = ';'.join([prop + ':' + styleMap[prop] for prop in styleMap])
if fixedStyle != '':
node.setAttribute('style', fixedStyle)
elif node.getAttribute('style'):
node.removeAttribute('style')
return node
|
python
|
def _setStyle(node, styleMap):
u"""Sets the style attribute of a node to the dictionary ``styleMap``."""
fixedStyle = ';'.join([prop + ':' + styleMap[prop] for prop in styleMap])
if fixedStyle != '':
node.setAttribute('style', fixedStyle)
elif node.getAttribute('style'):
node.removeAttribute('style')
return node
|
[
"def",
"_setStyle",
"(",
"node",
",",
"styleMap",
")",
":",
"fixedStyle",
"=",
"';'",
".",
"join",
"(",
"[",
"prop",
"+",
"':'",
"+",
"styleMap",
"[",
"prop",
"]",
"for",
"prop",
"in",
"styleMap",
"]",
")",
"if",
"fixedStyle",
"!=",
"''",
":",
"node",
".",
"setAttribute",
"(",
"'style'",
",",
"fixedStyle",
")",
"elif",
"node",
".",
"getAttribute",
"(",
"'style'",
")",
":",
"node",
".",
"removeAttribute",
"(",
"'style'",
")",
"return",
"node"
] |
u"""Sets the style attribute of a node to the dictionary ``styleMap``.
|
[
"u",
"Sets",
"the",
"style",
"attribute",
"of",
"a",
"node",
"to",
"the",
"dictionary",
"styleMap",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1457-L1464
|
scour-project/scour
|
scour/scour.py
|
styleInheritedFromParent
|
def styleInheritedFromParent(node, style):
"""
Returns the value of 'style' that is inherited from the parents of the passed-in node
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
"""
parentNode = node.parentNode
# return None if we reached the Document element
if parentNode.nodeType == Node.DOCUMENT_NODE:
return None
# check styles first (they take precedence over presentation attributes)
styles = _getStyle(parentNode)
if style in styles:
value = styles[style]
if not value == 'inherit':
return value
# check attributes
value = parentNode.getAttribute(style)
if value not in ['', 'inherit']:
return parentNode.getAttribute(style)
# check the next parent recursively if we did not find a value yet
return styleInheritedFromParent(parentNode, style)
|
python
|
def styleInheritedFromParent(node, style):
"""
Returns the value of 'style' that is inherited from the parents of the passed-in node
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
"""
parentNode = node.parentNode
# return None if we reached the Document element
if parentNode.nodeType == Node.DOCUMENT_NODE:
return None
# check styles first (they take precedence over presentation attributes)
styles = _getStyle(parentNode)
if style in styles:
value = styles[style]
if not value == 'inherit':
return value
# check attributes
value = parentNode.getAttribute(style)
if value not in ['', 'inherit']:
return parentNode.getAttribute(style)
# check the next parent recursively if we did not find a value yet
return styleInheritedFromParent(parentNode, style)
|
[
"def",
"styleInheritedFromParent",
"(",
"node",
",",
"style",
")",
":",
"parentNode",
"=",
"node",
".",
"parentNode",
"# return None if we reached the Document element",
"if",
"parentNode",
".",
"nodeType",
"==",
"Node",
".",
"DOCUMENT_NODE",
":",
"return",
"None",
"# check styles first (they take precedence over presentation attributes)",
"styles",
"=",
"_getStyle",
"(",
"parentNode",
")",
"if",
"style",
"in",
"styles",
":",
"value",
"=",
"styles",
"[",
"style",
"]",
"if",
"not",
"value",
"==",
"'inherit'",
":",
"return",
"value",
"# check attributes",
"value",
"=",
"parentNode",
".",
"getAttribute",
"(",
"style",
")",
"if",
"value",
"not",
"in",
"[",
"''",
",",
"'inherit'",
"]",
":",
"return",
"parentNode",
".",
"getAttribute",
"(",
"style",
")",
"# check the next parent recursively if we did not find a value yet",
"return",
"styleInheritedFromParent",
"(",
"parentNode",
",",
"style",
")"
] |
Returns the value of 'style' that is inherited from the parents of the passed-in node
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
|
[
"Returns",
"the",
"value",
"of",
"style",
"that",
"is",
"inherited",
"from",
"the",
"parents",
"of",
"the",
"passed",
"-",
"in",
"node"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1601-L1627
|
scour-project/scour
|
scour/scour.py
|
styleInheritedByChild
|
def styleInheritedByChild(node, style, nodeIsChild=False):
"""
Returns whether 'style' is inherited by any children of the passed-in node
If False is returned, it is guaranteed that 'style' can safely be removed
from the passed-in node without influencing visual output of it's children
If True is returned, the passed-in node should not have its text-based
attributes removed.
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
"""
# Comment, text and CDATA nodes don't have attributes and aren't containers so they can't inherit attributes
if node.nodeType != Node.ELEMENT_NODE:
return False
if nodeIsChild:
# if the current child node sets a new value for 'style'
# we can stop the search in the current branch of the DOM tree
# check attributes
if node.getAttribute(style) not in ['', 'inherit']:
return False
# check styles
styles = _getStyle(node)
if (style in styles) and not (styles[style] == 'inherit'):
return False
else:
# if the passed-in node does not have any children 'style' can obviously not be inherited
if not node.childNodes:
return False
# If we have child nodes recursively check those
if node.childNodes:
for child in node.childNodes:
if styleInheritedByChild(child, style, True):
return True
# If the current element is a container element the inherited style is meaningless
# (since we made sure it's not inherited by any of its children)
if node.nodeName in ['a', 'defs', 'glyph', 'g', 'marker', 'mask',
'missing-glyph', 'pattern', 'svg', 'switch', 'symbol']:
return False
# in all other cases we have to assume the inherited value of 'style' is meaningfull and has to be kept
# (e.g nodes without children at the end of the DOM tree, text nodes, ...)
return True
|
python
|
def styleInheritedByChild(node, style, nodeIsChild=False):
"""
Returns whether 'style' is inherited by any children of the passed-in node
If False is returned, it is guaranteed that 'style' can safely be removed
from the passed-in node without influencing visual output of it's children
If True is returned, the passed-in node should not have its text-based
attributes removed.
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
"""
# Comment, text and CDATA nodes don't have attributes and aren't containers so they can't inherit attributes
if node.nodeType != Node.ELEMENT_NODE:
return False
if nodeIsChild:
# if the current child node sets a new value for 'style'
# we can stop the search in the current branch of the DOM tree
# check attributes
if node.getAttribute(style) not in ['', 'inherit']:
return False
# check styles
styles = _getStyle(node)
if (style in styles) and not (styles[style] == 'inherit'):
return False
else:
# if the passed-in node does not have any children 'style' can obviously not be inherited
if not node.childNodes:
return False
# If we have child nodes recursively check those
if node.childNodes:
for child in node.childNodes:
if styleInheritedByChild(child, style, True):
return True
# If the current element is a container element the inherited style is meaningless
# (since we made sure it's not inherited by any of its children)
if node.nodeName in ['a', 'defs', 'glyph', 'g', 'marker', 'mask',
'missing-glyph', 'pattern', 'svg', 'switch', 'symbol']:
return False
# in all other cases we have to assume the inherited value of 'style' is meaningfull and has to be kept
# (e.g nodes without children at the end of the DOM tree, text nodes, ...)
return True
|
[
"def",
"styleInheritedByChild",
"(",
"node",
",",
"style",
",",
"nodeIsChild",
"=",
"False",
")",
":",
"# Comment, text and CDATA nodes don't have attributes and aren't containers so they can't inherit attributes",
"if",
"node",
".",
"nodeType",
"!=",
"Node",
".",
"ELEMENT_NODE",
":",
"return",
"False",
"if",
"nodeIsChild",
":",
"# if the current child node sets a new value for 'style'",
"# we can stop the search in the current branch of the DOM tree",
"# check attributes",
"if",
"node",
".",
"getAttribute",
"(",
"style",
")",
"not",
"in",
"[",
"''",
",",
"'inherit'",
"]",
":",
"return",
"False",
"# check styles",
"styles",
"=",
"_getStyle",
"(",
"node",
")",
"if",
"(",
"style",
"in",
"styles",
")",
"and",
"not",
"(",
"styles",
"[",
"style",
"]",
"==",
"'inherit'",
")",
":",
"return",
"False",
"else",
":",
"# if the passed-in node does not have any children 'style' can obviously not be inherited",
"if",
"not",
"node",
".",
"childNodes",
":",
"return",
"False",
"# If we have child nodes recursively check those",
"if",
"node",
".",
"childNodes",
":",
"for",
"child",
"in",
"node",
".",
"childNodes",
":",
"if",
"styleInheritedByChild",
"(",
"child",
",",
"style",
",",
"True",
")",
":",
"return",
"True",
"# If the current element is a container element the inherited style is meaningless",
"# (since we made sure it's not inherited by any of its children)",
"if",
"node",
".",
"nodeName",
"in",
"[",
"'a'",
",",
"'defs'",
",",
"'glyph'",
",",
"'g'",
",",
"'marker'",
",",
"'mask'",
",",
"'missing-glyph'",
",",
"'pattern'",
",",
"'svg'",
",",
"'switch'",
",",
"'symbol'",
"]",
":",
"return",
"False",
"# in all other cases we have to assume the inherited value of 'style' is meaningfull and has to be kept",
"# (e.g nodes without children at the end of the DOM tree, text nodes, ...)",
"return",
"True"
] |
Returns whether 'style' is inherited by any children of the passed-in node
If False is returned, it is guaranteed that 'style' can safely be removed
from the passed-in node without influencing visual output of it's children
If True is returned, the passed-in node should not have its text-based
attributes removed.
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
|
[
"Returns",
"whether",
"style",
"is",
"inherited",
"by",
"any",
"children",
"of",
"the",
"passed",
"-",
"in",
"node"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1630-L1677
|
scour-project/scour
|
scour/scour.py
|
mayContainTextNodes
|
def mayContainTextNodes(node):
"""
Returns True if the passed-in node is probably a text element, or at least
one of its descendants is probably a text element.
If False is returned, it is guaranteed that the passed-in node has no
business having text-based attributes.
If True is returned, the passed-in node should not have its text-based
attributes removed.
"""
# Cached result of a prior call?
try:
return node.mayContainTextNodes
except AttributeError:
pass
result = True # Default value
# Comment, text and CDATA nodes don't have attributes and aren't containers
if node.nodeType != Node.ELEMENT_NODE:
result = False
# Non-SVG elements? Unknown elements!
elif node.namespaceURI != NS['SVG']:
result = True
# Blacklisted elements. Those are guaranteed not to be text elements.
elif node.nodeName in ['rect', 'circle', 'ellipse', 'line', 'polygon',
'polyline', 'path', 'image', 'stop']:
result = False
# Group elements. If we're missing any here, the default of True is used.
elif node.nodeName in ['g', 'clipPath', 'marker', 'mask', 'pattern',
'linearGradient', 'radialGradient', 'symbol']:
result = False
for child in node.childNodes:
if mayContainTextNodes(child):
result = True
# Everything else should be considered a future SVG-version text element
# at best, or an unknown element at worst. result will stay True.
# Cache this result before returning it.
node.mayContainTextNodes = result
return result
|
python
|
def mayContainTextNodes(node):
"""
Returns True if the passed-in node is probably a text element, or at least
one of its descendants is probably a text element.
If False is returned, it is guaranteed that the passed-in node has no
business having text-based attributes.
If True is returned, the passed-in node should not have its text-based
attributes removed.
"""
# Cached result of a prior call?
try:
return node.mayContainTextNodes
except AttributeError:
pass
result = True # Default value
# Comment, text and CDATA nodes don't have attributes and aren't containers
if node.nodeType != Node.ELEMENT_NODE:
result = False
# Non-SVG elements? Unknown elements!
elif node.namespaceURI != NS['SVG']:
result = True
# Blacklisted elements. Those are guaranteed not to be text elements.
elif node.nodeName in ['rect', 'circle', 'ellipse', 'line', 'polygon',
'polyline', 'path', 'image', 'stop']:
result = False
# Group elements. If we're missing any here, the default of True is used.
elif node.nodeName in ['g', 'clipPath', 'marker', 'mask', 'pattern',
'linearGradient', 'radialGradient', 'symbol']:
result = False
for child in node.childNodes:
if mayContainTextNodes(child):
result = True
# Everything else should be considered a future SVG-version text element
# at best, or an unknown element at worst. result will stay True.
# Cache this result before returning it.
node.mayContainTextNodes = result
return result
|
[
"def",
"mayContainTextNodes",
"(",
"node",
")",
":",
"# Cached result of a prior call?",
"try",
":",
"return",
"node",
".",
"mayContainTextNodes",
"except",
"AttributeError",
":",
"pass",
"result",
"=",
"True",
"# Default value",
"# Comment, text and CDATA nodes don't have attributes and aren't containers",
"if",
"node",
".",
"nodeType",
"!=",
"Node",
".",
"ELEMENT_NODE",
":",
"result",
"=",
"False",
"# Non-SVG elements? Unknown elements!",
"elif",
"node",
".",
"namespaceURI",
"!=",
"NS",
"[",
"'SVG'",
"]",
":",
"result",
"=",
"True",
"# Blacklisted elements. Those are guaranteed not to be text elements.",
"elif",
"node",
".",
"nodeName",
"in",
"[",
"'rect'",
",",
"'circle'",
",",
"'ellipse'",
",",
"'line'",
",",
"'polygon'",
",",
"'polyline'",
",",
"'path'",
",",
"'image'",
",",
"'stop'",
"]",
":",
"result",
"=",
"False",
"# Group elements. If we're missing any here, the default of True is used.",
"elif",
"node",
".",
"nodeName",
"in",
"[",
"'g'",
",",
"'clipPath'",
",",
"'marker'",
",",
"'mask'",
",",
"'pattern'",
",",
"'linearGradient'",
",",
"'radialGradient'",
",",
"'symbol'",
"]",
":",
"result",
"=",
"False",
"for",
"child",
"in",
"node",
".",
"childNodes",
":",
"if",
"mayContainTextNodes",
"(",
"child",
")",
":",
"result",
"=",
"True",
"# Everything else should be considered a future SVG-version text element",
"# at best, or an unknown element at worst. result will stay True.",
"# Cache this result before returning it.",
"node",
".",
"mayContainTextNodes",
"=",
"result",
"return",
"result"
] |
Returns True if the passed-in node is probably a text element, or at least
one of its descendants is probably a text element.
If False is returned, it is guaranteed that the passed-in node has no
business having text-based attributes.
If True is returned, the passed-in node should not have its text-based
attributes removed.
|
[
"Returns",
"True",
"if",
"the",
"passed",
"-",
"in",
"node",
"is",
"probably",
"a",
"text",
"element",
"or",
"at",
"least",
"one",
"of",
"its",
"descendants",
"is",
"probably",
"a",
"text",
"element",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1680-L1720
|
scour-project/scour
|
scour/scour.py
|
taint
|
def taint(taintedSet, taintedAttribute):
u"""Adds an attribute to a set of attributes.
Related attributes are also included."""
taintedSet.add(taintedAttribute)
if taintedAttribute == 'marker':
taintedSet |= set(['marker-start', 'marker-mid', 'marker-end'])
if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']:
taintedSet.add('marker')
return taintedSet
|
python
|
def taint(taintedSet, taintedAttribute):
u"""Adds an attribute to a set of attributes.
Related attributes are also included."""
taintedSet.add(taintedAttribute)
if taintedAttribute == 'marker':
taintedSet |= set(['marker-start', 'marker-mid', 'marker-end'])
if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']:
taintedSet.add('marker')
return taintedSet
|
[
"def",
"taint",
"(",
"taintedSet",
",",
"taintedAttribute",
")",
":",
"taintedSet",
".",
"add",
"(",
"taintedAttribute",
")",
"if",
"taintedAttribute",
"==",
"'marker'",
":",
"taintedSet",
"|=",
"set",
"(",
"[",
"'marker-start'",
",",
"'marker-mid'",
",",
"'marker-end'",
"]",
")",
"if",
"taintedAttribute",
"in",
"[",
"'marker-start'",
",",
"'marker-mid'",
",",
"'marker-end'",
"]",
":",
"taintedSet",
".",
"add",
"(",
"'marker'",
")",
"return",
"taintedSet"
] |
u"""Adds an attribute to a set of attributes.
Related attributes are also included.
|
[
"u",
"Adds",
"an",
"attribute",
"to",
"a",
"set",
"of",
"attributes",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1885-L1894
|
scour-project/scour
|
scour/scour.py
|
removeDefaultAttributeValue
|
def removeDefaultAttributeValue(node, attribute):
"""
Removes the DefaultAttribute 'attribute' from 'node' if specified conditions are fulfilled
Warning: Does NOT check if the attribute is actually valid for the passed element type for increased preformance!
"""
if not node.hasAttribute(attribute.name):
return 0
# differentiate between text and numeric values
if isinstance(attribute.value, str):
if node.getAttribute(attribute.name) == attribute.value:
if (attribute.conditions is None) or attribute.conditions(node):
node.removeAttribute(attribute.name)
return 1
else:
nodeValue = SVGLength(node.getAttribute(attribute.name))
if ((attribute.value is None)
or ((nodeValue.value == attribute.value) and not (nodeValue.units == Unit.INVALID))):
if ((attribute.units is None)
or (nodeValue.units == attribute.units)
or (isinstance(attribute.units, list) and nodeValue.units in attribute.units)):
if (attribute.conditions is None) or attribute.conditions(node):
node.removeAttribute(attribute.name)
return 1
return 0
|
python
|
def removeDefaultAttributeValue(node, attribute):
"""
Removes the DefaultAttribute 'attribute' from 'node' if specified conditions are fulfilled
Warning: Does NOT check if the attribute is actually valid for the passed element type for increased preformance!
"""
if not node.hasAttribute(attribute.name):
return 0
# differentiate between text and numeric values
if isinstance(attribute.value, str):
if node.getAttribute(attribute.name) == attribute.value:
if (attribute.conditions is None) or attribute.conditions(node):
node.removeAttribute(attribute.name)
return 1
else:
nodeValue = SVGLength(node.getAttribute(attribute.name))
if ((attribute.value is None)
or ((nodeValue.value == attribute.value) and not (nodeValue.units == Unit.INVALID))):
if ((attribute.units is None)
or (nodeValue.units == attribute.units)
or (isinstance(attribute.units, list) and nodeValue.units in attribute.units)):
if (attribute.conditions is None) or attribute.conditions(node):
node.removeAttribute(attribute.name)
return 1
return 0
|
[
"def",
"removeDefaultAttributeValue",
"(",
"node",
",",
"attribute",
")",
":",
"if",
"not",
"node",
".",
"hasAttribute",
"(",
"attribute",
".",
"name",
")",
":",
"return",
"0",
"# differentiate between text and numeric values",
"if",
"isinstance",
"(",
"attribute",
".",
"value",
",",
"str",
")",
":",
"if",
"node",
".",
"getAttribute",
"(",
"attribute",
".",
"name",
")",
"==",
"attribute",
".",
"value",
":",
"if",
"(",
"attribute",
".",
"conditions",
"is",
"None",
")",
"or",
"attribute",
".",
"conditions",
"(",
"node",
")",
":",
"node",
".",
"removeAttribute",
"(",
"attribute",
".",
"name",
")",
"return",
"1",
"else",
":",
"nodeValue",
"=",
"SVGLength",
"(",
"node",
".",
"getAttribute",
"(",
"attribute",
".",
"name",
")",
")",
"if",
"(",
"(",
"attribute",
".",
"value",
"is",
"None",
")",
"or",
"(",
"(",
"nodeValue",
".",
"value",
"==",
"attribute",
".",
"value",
")",
"and",
"not",
"(",
"nodeValue",
".",
"units",
"==",
"Unit",
".",
"INVALID",
")",
")",
")",
":",
"if",
"(",
"(",
"attribute",
".",
"units",
"is",
"None",
")",
"or",
"(",
"nodeValue",
".",
"units",
"==",
"attribute",
".",
"units",
")",
"or",
"(",
"isinstance",
"(",
"attribute",
".",
"units",
",",
"list",
")",
"and",
"nodeValue",
".",
"units",
"in",
"attribute",
".",
"units",
")",
")",
":",
"if",
"(",
"attribute",
".",
"conditions",
"is",
"None",
")",
"or",
"attribute",
".",
"conditions",
"(",
"node",
")",
":",
"node",
".",
"removeAttribute",
"(",
"attribute",
".",
"name",
")",
"return",
"1",
"return",
"0"
] |
Removes the DefaultAttribute 'attribute' from 'node' if specified conditions are fulfilled
Warning: Does NOT check if the attribute is actually valid for the passed element type for increased preformance!
|
[
"Removes",
"the",
"DefaultAttribute",
"attribute",
"from",
"node",
"if",
"specified",
"conditions",
"are",
"fulfilled"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1897-L1923
|
scour-project/scour
|
scour/scour.py
|
removeDefaultAttributeValues
|
def removeDefaultAttributeValues(node, options, tainted=set()):
u"""'tainted' keeps a set of attributes defined in parent nodes.
For such attributes, we don't delete attributes with default values."""
num = 0
if node.nodeType != Node.ELEMENT_NODE:
return 0
# Conditionally remove all default attributes defined in 'default_attributes' (a list of 'DefaultAttribute's)
#
# For increased performance do not iterate the whole list for each element but run only on valid subsets
# - 'default_attributes_universal' (attributes valid for all elements)
# - 'default_attributes_per_element' (attributes specific to one specific element type)
for attribute in default_attributes_universal:
num += removeDefaultAttributeValue(node, attribute)
if node.nodeName in default_attributes_per_element:
for attribute in default_attributes_per_element[node.nodeName]:
num += removeDefaultAttributeValue(node, attribute)
# Summarily get rid of default properties
attributes = [node.attributes.item(i).nodeName for i in range(node.attributes.length)]
for attribute in attributes:
if attribute not in tainted:
if attribute in default_properties:
if node.getAttribute(attribute) == default_properties[attribute]:
node.removeAttribute(attribute)
num += 1
else:
tainted = taint(tainted, attribute)
# Properties might also occur as styles, remove them too
styles = _getStyle(node)
for attribute in list(styles):
if attribute not in tainted:
if attribute in default_properties:
if styles[attribute] == default_properties[attribute]:
del styles[attribute]
num += 1
else:
tainted = taint(tainted, attribute)
_setStyle(node, styles)
# recurse for our child elements
for child in node.childNodes:
num += removeDefaultAttributeValues(child, options, tainted.copy())
return num
|
python
|
def removeDefaultAttributeValues(node, options, tainted=set()):
u"""'tainted' keeps a set of attributes defined in parent nodes.
For such attributes, we don't delete attributes with default values."""
num = 0
if node.nodeType != Node.ELEMENT_NODE:
return 0
# Conditionally remove all default attributes defined in 'default_attributes' (a list of 'DefaultAttribute's)
#
# For increased performance do not iterate the whole list for each element but run only on valid subsets
# - 'default_attributes_universal' (attributes valid for all elements)
# - 'default_attributes_per_element' (attributes specific to one specific element type)
for attribute in default_attributes_universal:
num += removeDefaultAttributeValue(node, attribute)
if node.nodeName in default_attributes_per_element:
for attribute in default_attributes_per_element[node.nodeName]:
num += removeDefaultAttributeValue(node, attribute)
# Summarily get rid of default properties
attributes = [node.attributes.item(i).nodeName for i in range(node.attributes.length)]
for attribute in attributes:
if attribute not in tainted:
if attribute in default_properties:
if node.getAttribute(attribute) == default_properties[attribute]:
node.removeAttribute(attribute)
num += 1
else:
tainted = taint(tainted, attribute)
# Properties might also occur as styles, remove them too
styles = _getStyle(node)
for attribute in list(styles):
if attribute not in tainted:
if attribute in default_properties:
if styles[attribute] == default_properties[attribute]:
del styles[attribute]
num += 1
else:
tainted = taint(tainted, attribute)
_setStyle(node, styles)
# recurse for our child elements
for child in node.childNodes:
num += removeDefaultAttributeValues(child, options, tainted.copy())
return num
|
[
"def",
"removeDefaultAttributeValues",
"(",
"node",
",",
"options",
",",
"tainted",
"=",
"set",
"(",
")",
")",
":",
"num",
"=",
"0",
"if",
"node",
".",
"nodeType",
"!=",
"Node",
".",
"ELEMENT_NODE",
":",
"return",
"0",
"# Conditionally remove all default attributes defined in 'default_attributes' (a list of 'DefaultAttribute's)",
"#",
"# For increased performance do not iterate the whole list for each element but run only on valid subsets",
"# - 'default_attributes_universal' (attributes valid for all elements)",
"# - 'default_attributes_per_element' (attributes specific to one specific element type)",
"for",
"attribute",
"in",
"default_attributes_universal",
":",
"num",
"+=",
"removeDefaultAttributeValue",
"(",
"node",
",",
"attribute",
")",
"if",
"node",
".",
"nodeName",
"in",
"default_attributes_per_element",
":",
"for",
"attribute",
"in",
"default_attributes_per_element",
"[",
"node",
".",
"nodeName",
"]",
":",
"num",
"+=",
"removeDefaultAttributeValue",
"(",
"node",
",",
"attribute",
")",
"# Summarily get rid of default properties",
"attributes",
"=",
"[",
"node",
".",
"attributes",
".",
"item",
"(",
"i",
")",
".",
"nodeName",
"for",
"i",
"in",
"range",
"(",
"node",
".",
"attributes",
".",
"length",
")",
"]",
"for",
"attribute",
"in",
"attributes",
":",
"if",
"attribute",
"not",
"in",
"tainted",
":",
"if",
"attribute",
"in",
"default_properties",
":",
"if",
"node",
".",
"getAttribute",
"(",
"attribute",
")",
"==",
"default_properties",
"[",
"attribute",
"]",
":",
"node",
".",
"removeAttribute",
"(",
"attribute",
")",
"num",
"+=",
"1",
"else",
":",
"tainted",
"=",
"taint",
"(",
"tainted",
",",
"attribute",
")",
"# Properties might also occur as styles, remove them too",
"styles",
"=",
"_getStyle",
"(",
"node",
")",
"for",
"attribute",
"in",
"list",
"(",
"styles",
")",
":",
"if",
"attribute",
"not",
"in",
"tainted",
":",
"if",
"attribute",
"in",
"default_properties",
":",
"if",
"styles",
"[",
"attribute",
"]",
"==",
"default_properties",
"[",
"attribute",
"]",
":",
"del",
"styles",
"[",
"attribute",
"]",
"num",
"+=",
"1",
"else",
":",
"tainted",
"=",
"taint",
"(",
"tainted",
",",
"attribute",
")",
"_setStyle",
"(",
"node",
",",
"styles",
")",
"# recurse for our child elements",
"for",
"child",
"in",
"node",
".",
"childNodes",
":",
"num",
"+=",
"removeDefaultAttributeValues",
"(",
"child",
",",
"options",
",",
"tainted",
".",
"copy",
"(",
")",
")",
"return",
"num"
] |
u"""'tainted' keeps a set of attributes defined in parent nodes.
For such attributes, we don't delete attributes with default values.
|
[
"u",
"tainted",
"keeps",
"a",
"set",
"of",
"attributes",
"defined",
"in",
"parent",
"nodes",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1926-L1971
|
scour-project/scour
|
scour/scour.py
|
convertColor
|
def convertColor(value):
"""
Converts the input color string and returns a #RRGGBB (or #RGB if possible) string
"""
s = value
if s in colors:
s = colors[s]
rgbpMatch = rgbp.match(s)
if rgbpMatch is not None:
r = int(float(rgbpMatch.group(1)) * 255.0 / 100.0)
g = int(float(rgbpMatch.group(2)) * 255.0 / 100.0)
b = int(float(rgbpMatch.group(3)) * 255.0 / 100.0)
s = '#%02x%02x%02x' % (r, g, b)
else:
rgbMatch = rgb.match(s)
if rgbMatch is not None:
r = int(rgbMatch.group(1))
g = int(rgbMatch.group(2))
b = int(rgbMatch.group(3))
s = '#%02x%02x%02x' % (r, g, b)
if s[0] == '#':
s = s.lower()
if len(s) == 7 and s[1] == s[2] and s[3] == s[4] and s[5] == s[6]:
s = '#' + s[1] + s[3] + s[5]
return s
|
python
|
def convertColor(value):
"""
Converts the input color string and returns a #RRGGBB (or #RGB if possible) string
"""
s = value
if s in colors:
s = colors[s]
rgbpMatch = rgbp.match(s)
if rgbpMatch is not None:
r = int(float(rgbpMatch.group(1)) * 255.0 / 100.0)
g = int(float(rgbpMatch.group(2)) * 255.0 / 100.0)
b = int(float(rgbpMatch.group(3)) * 255.0 / 100.0)
s = '#%02x%02x%02x' % (r, g, b)
else:
rgbMatch = rgb.match(s)
if rgbMatch is not None:
r = int(rgbMatch.group(1))
g = int(rgbMatch.group(2))
b = int(rgbMatch.group(3))
s = '#%02x%02x%02x' % (r, g, b)
if s[0] == '#':
s = s.lower()
if len(s) == 7 and s[1] == s[2] and s[3] == s[4] and s[5] == s[6]:
s = '#' + s[1] + s[3] + s[5]
return s
|
[
"def",
"convertColor",
"(",
"value",
")",
":",
"s",
"=",
"value",
"if",
"s",
"in",
"colors",
":",
"s",
"=",
"colors",
"[",
"s",
"]",
"rgbpMatch",
"=",
"rgbp",
".",
"match",
"(",
"s",
")",
"if",
"rgbpMatch",
"is",
"not",
"None",
":",
"r",
"=",
"int",
"(",
"float",
"(",
"rgbpMatch",
".",
"group",
"(",
"1",
")",
")",
"*",
"255.0",
"/",
"100.0",
")",
"g",
"=",
"int",
"(",
"float",
"(",
"rgbpMatch",
".",
"group",
"(",
"2",
")",
")",
"*",
"255.0",
"/",
"100.0",
")",
"b",
"=",
"int",
"(",
"float",
"(",
"rgbpMatch",
".",
"group",
"(",
"3",
")",
")",
"*",
"255.0",
"/",
"100.0",
")",
"s",
"=",
"'#%02x%02x%02x'",
"%",
"(",
"r",
",",
"g",
",",
"b",
")",
"else",
":",
"rgbMatch",
"=",
"rgb",
".",
"match",
"(",
"s",
")",
"if",
"rgbMatch",
"is",
"not",
"None",
":",
"r",
"=",
"int",
"(",
"rgbMatch",
".",
"group",
"(",
"1",
")",
")",
"g",
"=",
"int",
"(",
"rgbMatch",
".",
"group",
"(",
"2",
")",
")",
"b",
"=",
"int",
"(",
"rgbMatch",
".",
"group",
"(",
"3",
")",
")",
"s",
"=",
"'#%02x%02x%02x'",
"%",
"(",
"r",
",",
"g",
",",
"b",
")",
"if",
"s",
"[",
"0",
"]",
"==",
"'#'",
":",
"s",
"=",
"s",
".",
"lower",
"(",
")",
"if",
"len",
"(",
"s",
")",
"==",
"7",
"and",
"s",
"[",
"1",
"]",
"==",
"s",
"[",
"2",
"]",
"and",
"s",
"[",
"3",
"]",
"==",
"s",
"[",
"4",
"]",
"and",
"s",
"[",
"5",
"]",
"==",
"s",
"[",
"6",
"]",
":",
"s",
"=",
"'#'",
"+",
"s",
"[",
"1",
"]",
"+",
"s",
"[",
"3",
"]",
"+",
"s",
"[",
"5",
"]",
"return",
"s"
] |
Converts the input color string and returns a #RRGGBB (or #RGB if possible) string
|
[
"Converts",
"the",
"input",
"color",
"string",
"and",
"returns",
"a",
"#RRGGBB",
"(",
"or",
"#RGB",
"if",
"possible",
")",
"string"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1978-L2006
|
scour-project/scour
|
scour/scour.py
|
convertColors
|
def convertColors(element):
"""
Recursively converts all color properties into #RRGGBB format if shorter
"""
numBytes = 0
if element.nodeType != Node.ELEMENT_NODE:
return 0
# set up list of color attributes for each element type
attrsToConvert = []
if element.nodeName in ['rect', 'circle', 'ellipse', 'polygon',
'line', 'polyline', 'path', 'g', 'a']:
attrsToConvert = ['fill', 'stroke']
elif element.nodeName in ['stop']:
attrsToConvert = ['stop-color']
elif element.nodeName in ['solidColor']:
attrsToConvert = ['solid-color']
# now convert all the color formats
styles = _getStyle(element)
for attr in attrsToConvert:
oldColorValue = element.getAttribute(attr)
if oldColorValue != '':
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
element.setAttribute(attr, newColorValue)
numBytes += (oldBytes - len(element.getAttribute(attr)))
# colors might also hide in styles
if attr in styles:
oldColorValue = styles[attr]
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
styles[attr] = newColorValue
numBytes += (oldBytes - len(element.getAttribute(attr)))
_setStyle(element, styles)
# now recurse for our child elements
for child in element.childNodes:
numBytes += convertColors(child)
return numBytes
|
python
|
def convertColors(element):
"""
Recursively converts all color properties into #RRGGBB format if shorter
"""
numBytes = 0
if element.nodeType != Node.ELEMENT_NODE:
return 0
# set up list of color attributes for each element type
attrsToConvert = []
if element.nodeName in ['rect', 'circle', 'ellipse', 'polygon',
'line', 'polyline', 'path', 'g', 'a']:
attrsToConvert = ['fill', 'stroke']
elif element.nodeName in ['stop']:
attrsToConvert = ['stop-color']
elif element.nodeName in ['solidColor']:
attrsToConvert = ['solid-color']
# now convert all the color formats
styles = _getStyle(element)
for attr in attrsToConvert:
oldColorValue = element.getAttribute(attr)
if oldColorValue != '':
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
element.setAttribute(attr, newColorValue)
numBytes += (oldBytes - len(element.getAttribute(attr)))
# colors might also hide in styles
if attr in styles:
oldColorValue = styles[attr]
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
styles[attr] = newColorValue
numBytes += (oldBytes - len(element.getAttribute(attr)))
_setStyle(element, styles)
# now recurse for our child elements
for child in element.childNodes:
numBytes += convertColors(child)
return numBytes
|
[
"def",
"convertColors",
"(",
"element",
")",
":",
"numBytes",
"=",
"0",
"if",
"element",
".",
"nodeType",
"!=",
"Node",
".",
"ELEMENT_NODE",
":",
"return",
"0",
"# set up list of color attributes for each element type",
"attrsToConvert",
"=",
"[",
"]",
"if",
"element",
".",
"nodeName",
"in",
"[",
"'rect'",
",",
"'circle'",
",",
"'ellipse'",
",",
"'polygon'",
",",
"'line'",
",",
"'polyline'",
",",
"'path'",
",",
"'g'",
",",
"'a'",
"]",
":",
"attrsToConvert",
"=",
"[",
"'fill'",
",",
"'stroke'",
"]",
"elif",
"element",
".",
"nodeName",
"in",
"[",
"'stop'",
"]",
":",
"attrsToConvert",
"=",
"[",
"'stop-color'",
"]",
"elif",
"element",
".",
"nodeName",
"in",
"[",
"'solidColor'",
"]",
":",
"attrsToConvert",
"=",
"[",
"'solid-color'",
"]",
"# now convert all the color formats",
"styles",
"=",
"_getStyle",
"(",
"element",
")",
"for",
"attr",
"in",
"attrsToConvert",
":",
"oldColorValue",
"=",
"element",
".",
"getAttribute",
"(",
"attr",
")",
"if",
"oldColorValue",
"!=",
"''",
":",
"newColorValue",
"=",
"convertColor",
"(",
"oldColorValue",
")",
"oldBytes",
"=",
"len",
"(",
"oldColorValue",
")",
"newBytes",
"=",
"len",
"(",
"newColorValue",
")",
"if",
"oldBytes",
">",
"newBytes",
":",
"element",
".",
"setAttribute",
"(",
"attr",
",",
"newColorValue",
")",
"numBytes",
"+=",
"(",
"oldBytes",
"-",
"len",
"(",
"element",
".",
"getAttribute",
"(",
"attr",
")",
")",
")",
"# colors might also hide in styles",
"if",
"attr",
"in",
"styles",
":",
"oldColorValue",
"=",
"styles",
"[",
"attr",
"]",
"newColorValue",
"=",
"convertColor",
"(",
"oldColorValue",
")",
"oldBytes",
"=",
"len",
"(",
"oldColorValue",
")",
"newBytes",
"=",
"len",
"(",
"newColorValue",
")",
"if",
"oldBytes",
">",
"newBytes",
":",
"styles",
"[",
"attr",
"]",
"=",
"newColorValue",
"numBytes",
"+=",
"(",
"oldBytes",
"-",
"len",
"(",
"element",
".",
"getAttribute",
"(",
"attr",
")",
")",
")",
"_setStyle",
"(",
"element",
",",
"styles",
")",
"# now recurse for our child elements",
"for",
"child",
"in",
"element",
".",
"childNodes",
":",
"numBytes",
"+=",
"convertColors",
"(",
"child",
")",
"return",
"numBytes"
] |
Recursively converts all color properties into #RRGGBB format if shorter
|
[
"Recursively",
"converts",
"all",
"color",
"properties",
"into",
"#RRGGBB",
"format",
"if",
"shorter"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2009-L2054
|
scour-project/scour
|
scour/scour.py
|
cleanPath
|
def cleanPath(element, options):
"""
Cleans the path string (d attribute) of the element
"""
global _num_bytes_saved_in_path_data
global _num_path_segments_removed
# this gets the parser object from svg_regex.py
oldPathStr = element.getAttribute('d')
path = svg_parser.parse(oldPathStr)
style = _getStyle(element)
# This determines whether the stroke has round or square linecaps. If it does, we do not want to collapse empty
# segments, as they are actually rendered (as circles or squares with diameter/dimension matching the path-width).
has_round_or_square_linecaps = (
element.getAttribute('stroke-linecap') in ['round', 'square']
or 'stroke-linecap' in style and style['stroke-linecap'] in ['round', 'square']
)
# This determines whether the stroke has intermediate markers. If it does, we do not want to collapse
# straight segments running in the same direction, as markers are rendered on the intermediate nodes.
has_intermediate_markers = (
element.hasAttribute('marker')
or element.hasAttribute('marker-mid')
or 'marker' in style
or 'marker-mid' in style
)
# The first command must be a moveto, and whether it's relative (m)
# or absolute (M), the first set of coordinates *is* absolute. So
# the first iteration of the loop below will get x,y and startx,starty.
# convert absolute coordinates into relative ones.
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists since we're not adding or removing any.
x = y = 0
for pathIndex in range(len(path)):
cmd, data = path[pathIndex] # Changes to cmd don't get through to the data structure
i = 0
# adjust abs to rel
# only the A command has some values that we don't want to adjust (radii, rotation, flags)
if cmd == 'A':
for i in range(i, len(data), 7):
data[i + 5] -= x
data[i + 6] -= y
x += data[i + 5]
y += data[i + 6]
path[pathIndex] = ('a', data)
elif cmd == 'a':
x += sum(data[5::7])
y += sum(data[6::7])
elif cmd == 'H':
for i in range(i, len(data)):
data[i] -= x
x += data[i]
path[pathIndex] = ('h', data)
elif cmd == 'h':
x += sum(data)
elif cmd == 'V':
for i in range(i, len(data)):
data[i] -= y
y += data[i]
path[pathIndex] = ('v', data)
elif cmd == 'v':
y += sum(data)
elif cmd == 'M':
startx, starty = data[0], data[1]
# If this is a path starter, don't convert its first
# coordinate to relative; that would just make it (0, 0)
if pathIndex != 0:
data[0] -= x
data[1] -= y
x, y = startx, starty
i = 2
for i in range(i, len(data), 2):
data[i] -= x
data[i + 1] -= y
x += data[i]
y += data[i + 1]
path[pathIndex] = ('m', data)
elif cmd in ['L', 'T']:
for i in range(i, len(data), 2):
data[i] -= x
data[i + 1] -= y
x += data[i]
y += data[i + 1]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['m']:
if pathIndex == 0:
# START OF PATH - this is an absolute moveto
# followed by relative linetos
startx, starty = data[0], data[1]
x, y = startx, starty
i = 2
else:
startx = x + data[0]
starty = y + data[1]
for i in range(i, len(data), 2):
x += data[i]
y += data[i + 1]
elif cmd in ['l', 't']:
x += sum(data[0::2])
y += sum(data[1::2])
elif cmd in ['S', 'Q']:
for i in range(i, len(data), 4):
data[i] -= x
data[i + 1] -= y
data[i + 2] -= x
data[i + 3] -= y
x += data[i + 2]
y += data[i + 3]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['s', 'q']:
x += sum(data[2::4])
y += sum(data[3::4])
elif cmd == 'C':
for i in range(i, len(data), 6):
data[i] -= x
data[i + 1] -= y
data[i + 2] -= x
data[i + 3] -= y
data[i + 4] -= x
data[i + 5] -= y
x += data[i + 4]
y += data[i + 5]
path[pathIndex] = ('c', data)
elif cmd == 'c':
x += sum(data[4::6])
y += sum(data[5::6])
elif cmd in ['z', 'Z']:
x, y = startx, starty
path[pathIndex] = ('z', data)
# remove empty segments and redundant commands
# Reuse the data structure 'path' and the coordinate lists, even if we're
# deleting items, because these deletions are relatively cheap.
if not has_round_or_square_linecaps:
# remove empty path segments
for pathIndex in range(len(path)):
cmd, data = path[pathIndex]
i = 0
if cmd in ['m', 'l', 't']:
if cmd == 'm':
# It might be tempting to rewrite "m0 0 ..." into
# "l..." here. However, this is an unsound
# optimization in general as "m0 0 ... z" is
# different from "l...z".
#
# To do such a rewrite, we need to understand the
# full subpath. This logic happens after this
# loop.
i = 2
while i < len(data):
if data[i] == data[i + 1] == 0:
del data[i:i + 2]
_num_path_segments_removed += 1
else:
i += 2
elif cmd == 'c':
while i < len(data):
if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == data[i + 4] == data[i + 5] == 0:
del data[i:i + 6]
_num_path_segments_removed += 1
else:
i += 6
elif cmd == 'a':
while i < len(data):
if data[i + 5] == data[i + 6] == 0:
del data[i:i + 7]
_num_path_segments_removed += 1
else:
i += 7
elif cmd == 'q':
while i < len(data):
if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == 0:
del data[i:i + 4]
_num_path_segments_removed += 1
else:
i += 4
elif cmd in ['h', 'v']:
oldLen = len(data)
path[pathIndex] = (cmd, [coord for coord in data if coord != 0])
_num_path_segments_removed += len(path[pathIndex][1]) - oldLen
# remove no-op commands
pathIndex = len(path)
subpath_needs_anchor = False
# NB: We can never rewrite the first m/M command (expect if it
# is the only command)
while pathIndex > 1:
pathIndex -= 1
cmd, data = path[pathIndex]
if cmd == 'z':
next_cmd, next_data = path[pathIndex - 1]
if next_cmd == 'm' and len(next_data) == 2:
# mX Yz -> mX Y
# note the len check on next_data as it is not
# safe to rewrite "m0 0 1 1z" in general (it is a
# question of where the "pen" ends - you can
# continue a draw on the same subpath after a
# "z").
del path[pathIndex]
_num_path_segments_removed += 1
else:
# it is not safe to rewrite "m0 0 ..." to "l..."
# because of this "z" command.
subpath_needs_anchor = True
elif cmd == 'm':
if len(path) - 1 == pathIndex and len(data) == 2:
# Ends with an empty move (but no line/draw
# following it)
del path[pathIndex]
_num_path_segments_removed += 1
continue
if subpath_needs_anchor:
subpath_needs_anchor = False
elif data[0] == data[1] == 0:
# unanchored, i.e. we can replace "m0 0 ..." with
# "l..." as there is no "z" after it.
path[pathIndex] = ('l', data[2:])
_num_path_segments_removed += 1
# fixup: Delete subcommands having no coordinates.
path = [elem for elem in path if len(elem[1]) > 0 or elem[0] == 'z']
# convert straight curves into lines
newPath = [path[0]]
for (cmd, data) in path[1:]:
i = 0
newData = data
if cmd == 'c':
newData = []
while i < len(data):
# since all commands are now relative, we can think of previous point as (0,0)
# and new point (dx,dy) is (data[i+4],data[i+5])
# eqn of line will be y = (dy/dx)*x or if dx=0 then eqn of line is x=0
(p1x, p1y) = (data[i], data[i + 1])
(p2x, p2y) = (data[i + 2], data[i + 3])
dx = data[i + 4]
dy = data[i + 5]
foundStraightCurve = False
if dx == 0:
if p1x == 0 and p2x == 0:
foundStraightCurve = True
else:
m = dy / dx
if p1y == m * p1x and p2y == m * p2x:
foundStraightCurve = True
if foundStraightCurve:
# flush any existing curve coords first
if newData:
newPath.append((cmd, newData))
newData = []
# now create a straight line segment
newPath.append(('l', [dx, dy]))
else:
newData.extend(data[i:i + 6])
i += 6
if newData or cmd == 'z' or cmd == 'Z':
newPath.append((cmd, newData))
path = newPath
# collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = []
for (cmd, data) in path:
if prevCmd == '':
# initialize with current path cmd and data
prevCmd = cmd
prevData = data
else:
# collapse if
# - cmd is not moveto (explicit moveto commands are not drawn)
# - the previous and current commands are the same type,
# - the previous command is moveto and the current is lineto
# (subsequent moveto pairs are treated as implicit lineto commands)
if cmd != 'm' and (cmd == prevCmd or (cmd == 'l' and prevCmd == 'm')):
prevData.extend(data)
# else flush the previous command if it is not the same type as the current command
else:
newPath.append((prevCmd, prevData))
prevCmd = cmd
prevData = data
# flush last command and data
newPath.append((prevCmd, prevData))
path = newPath
# convert to shorthand path segments where possible
newPath = []
for (cmd, data) in path:
# convert line segments into h,v where possible
if cmd == 'l':
i = 0
lineTuples = []
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing line command
newPath.append(('l', lineTuples))
lineTuples = []
# append the v and then the remaining line coords
newPath.append(('v', [data[i + 1]]))
_num_path_segments_removed += 1
elif data[i + 1] == 0:
if lineTuples:
# flush the line command, then append the h and then the remaining line coords
newPath.append(('l', lineTuples))
lineTuples = []
newPath.append(('h', [data[i]]))
_num_path_segments_removed += 1
else:
lineTuples.extend(data[i:i + 2])
i += 2
if lineTuples:
newPath.append(('l', lineTuples))
# also handle implied relative linetos
elif cmd == 'm':
i = 2
lineTuples = [data[0], data[1]]
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing m/l command
newPath.append((cmd, lineTuples))
lineTuples = []
cmd = 'l' # dealing with linetos now
# append the v and then the remaining line coords
newPath.append(('v', [data[i + 1]]))
_num_path_segments_removed += 1
elif data[i + 1] == 0:
if lineTuples:
# flush the m/l command, then append the h and then the remaining line coords
newPath.append((cmd, lineTuples))
lineTuples = []
cmd = 'l' # dealing with linetos now
newPath.append(('h', [data[i]]))
_num_path_segments_removed += 1
else:
lineTuples.extend(data[i:i + 2])
i += 2
if lineTuples:
newPath.append((cmd, lineTuples))
# convert Bézier curve segments into s where possible
elif cmd == 'c':
# set up the assumed bezier control point as the current point,
# i.e. (0,0) since we're using relative coords
bez_ctl_pt = (0, 0)
# however if the previous command was 's'
# the assumed control point is a reflection of the previous control point at the current point
if len(newPath):
(prevCmd, prevData) = newPath[-1]
if prevCmd == 's':
bez_ctl_pt = (prevData[-2] - prevData[-4], prevData[-1] - prevData[-3])
i = 0
curveTuples = []
while i < len(data):
# rotate by 180deg means negate both coordinates
# if the previous control point is equal then we can substitute a
# shorthand bezier command
if bez_ctl_pt[0] == data[i] and bez_ctl_pt[1] == data[i + 1]:
if curveTuples:
newPath.append(('c', curveTuples))
curveTuples = []
# append the s command
newPath.append(('s', [data[i + 2], data[i + 3], data[i + 4], data[i + 5]]))
_num_path_segments_removed += 1
else:
j = 0
while j <= 5:
curveTuples.append(data[i + j])
j += 1
# set up control point for next curve segment
bez_ctl_pt = (data[i + 4] - data[i + 2], data[i + 5] - data[i + 3])
i += 6
if curveTuples:
newPath.append(('c', curveTuples))
# convert quadratic curve segments into t where possible
elif cmd == 'q':
quad_ctl_pt = (0, 0)
i = 0
curveTuples = []
while i < len(data):
if quad_ctl_pt[0] == data[i] and quad_ctl_pt[1] == data[i + 1]:
if curveTuples:
newPath.append(('q', curveTuples))
curveTuples = []
# append the t command
newPath.append(('t', [data[i + 2], data[i + 3]]))
_num_path_segments_removed += 1
else:
j = 0
while j <= 3:
curveTuples.append(data[i + j])
j += 1
quad_ctl_pt = (data[i + 2] - data[i], data[i + 3] - data[i + 1])
i += 4
if curveTuples:
newPath.append(('q', curveTuples))
else:
newPath.append((cmd, data))
path = newPath
# For each m, l, h or v, collapse unnecessary coordinates that run in the same direction
# i.e. "h-100-100" becomes "h-200" but "h300-100" does not change.
# If the path has intermediate markers we have to preserve intermediate nodes, though.
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists, even if we're deleting items, because these
# deletions are relatively cheap.
if not has_intermediate_markers:
for pathIndex in range(len(path)):
cmd, data = path[pathIndex]
# h / v expects only one parameter and we start drawing with the first (so we need at least 2)
if cmd in ['h', 'v'] and len(data) >= 2:
coordIndex = 0
while coordIndex+1 < len(data):
if is_same_sign(data[coordIndex], data[coordIndex+1]):
data[coordIndex] += data[coordIndex+1]
del data[coordIndex+1]
_num_path_segments_removed += 1
else:
coordIndex += 1
# l expects two parameters and we start drawing with the first (so we need at least 4)
elif cmd == 'l' and len(data) >= 4:
coordIndex = 0
while coordIndex+2 < len(data):
if is_same_direction(*data[coordIndex:coordIndex+4]):
data[coordIndex] += data[coordIndex+2]
data[coordIndex+1] += data[coordIndex+3]
del data[coordIndex+2] # delete the next two elements
del data[coordIndex+2]
_num_path_segments_removed += 1
else:
coordIndex += 2
# m expects two parameters but we have to skip the first pair as it's not drawn (so we need at least 6)
elif cmd == 'm' and len(data) >= 6:
coordIndex = 2
while coordIndex+2 < len(data):
if is_same_direction(*data[coordIndex:coordIndex+4]):
data[coordIndex] += data[coordIndex+2]
data[coordIndex+1] += data[coordIndex+3]
del data[coordIndex+2] # delete the next two elements
del data[coordIndex+2]
_num_path_segments_removed += 1
else:
coordIndex += 2
# it is possible that we have consecutive h, v, c, t commands now
# so again collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = [path[0]]
for (cmd, data) in path[1:]:
# flush the previous command if it is not the same type as the current command
if prevCmd != '':
if cmd != prevCmd or cmd == 'm':
newPath.append((prevCmd, prevData))
prevCmd = ''
prevData = []
# if the previous and current commands are the same type, collapse
if cmd == prevCmd and cmd != 'm':
prevData.extend(data)
# save last command and data
else:
prevCmd = cmd
prevData = data
# flush last command and data
if prevCmd != '':
newPath.append((prevCmd, prevData))
path = newPath
newPathStr = serializePath(path, options)
# if for whatever reason we actually made the path longer don't use it
# TODO: maybe we could compare path lengths after each optimization step and use the shortest
if len(newPathStr) <= len(oldPathStr):
_num_bytes_saved_in_path_data += (len(oldPathStr) - len(newPathStr))
element.setAttribute('d', newPathStr)
|
python
|
def cleanPath(element, options):
"""
Cleans the path string (d attribute) of the element
"""
global _num_bytes_saved_in_path_data
global _num_path_segments_removed
# this gets the parser object from svg_regex.py
oldPathStr = element.getAttribute('d')
path = svg_parser.parse(oldPathStr)
style = _getStyle(element)
# This determines whether the stroke has round or square linecaps. If it does, we do not want to collapse empty
# segments, as they are actually rendered (as circles or squares with diameter/dimension matching the path-width).
has_round_or_square_linecaps = (
element.getAttribute('stroke-linecap') in ['round', 'square']
or 'stroke-linecap' in style and style['stroke-linecap'] in ['round', 'square']
)
# This determines whether the stroke has intermediate markers. If it does, we do not want to collapse
# straight segments running in the same direction, as markers are rendered on the intermediate nodes.
has_intermediate_markers = (
element.hasAttribute('marker')
or element.hasAttribute('marker-mid')
or 'marker' in style
or 'marker-mid' in style
)
# The first command must be a moveto, and whether it's relative (m)
# or absolute (M), the first set of coordinates *is* absolute. So
# the first iteration of the loop below will get x,y and startx,starty.
# convert absolute coordinates into relative ones.
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists since we're not adding or removing any.
x = y = 0
for pathIndex in range(len(path)):
cmd, data = path[pathIndex] # Changes to cmd don't get through to the data structure
i = 0
# adjust abs to rel
# only the A command has some values that we don't want to adjust (radii, rotation, flags)
if cmd == 'A':
for i in range(i, len(data), 7):
data[i + 5] -= x
data[i + 6] -= y
x += data[i + 5]
y += data[i + 6]
path[pathIndex] = ('a', data)
elif cmd == 'a':
x += sum(data[5::7])
y += sum(data[6::7])
elif cmd == 'H':
for i in range(i, len(data)):
data[i] -= x
x += data[i]
path[pathIndex] = ('h', data)
elif cmd == 'h':
x += sum(data)
elif cmd == 'V':
for i in range(i, len(data)):
data[i] -= y
y += data[i]
path[pathIndex] = ('v', data)
elif cmd == 'v':
y += sum(data)
elif cmd == 'M':
startx, starty = data[0], data[1]
# If this is a path starter, don't convert its first
# coordinate to relative; that would just make it (0, 0)
if pathIndex != 0:
data[0] -= x
data[1] -= y
x, y = startx, starty
i = 2
for i in range(i, len(data), 2):
data[i] -= x
data[i + 1] -= y
x += data[i]
y += data[i + 1]
path[pathIndex] = ('m', data)
elif cmd in ['L', 'T']:
for i in range(i, len(data), 2):
data[i] -= x
data[i + 1] -= y
x += data[i]
y += data[i + 1]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['m']:
if pathIndex == 0:
# START OF PATH - this is an absolute moveto
# followed by relative linetos
startx, starty = data[0], data[1]
x, y = startx, starty
i = 2
else:
startx = x + data[0]
starty = y + data[1]
for i in range(i, len(data), 2):
x += data[i]
y += data[i + 1]
elif cmd in ['l', 't']:
x += sum(data[0::2])
y += sum(data[1::2])
elif cmd in ['S', 'Q']:
for i in range(i, len(data), 4):
data[i] -= x
data[i + 1] -= y
data[i + 2] -= x
data[i + 3] -= y
x += data[i + 2]
y += data[i + 3]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['s', 'q']:
x += sum(data[2::4])
y += sum(data[3::4])
elif cmd == 'C':
for i in range(i, len(data), 6):
data[i] -= x
data[i + 1] -= y
data[i + 2] -= x
data[i + 3] -= y
data[i + 4] -= x
data[i + 5] -= y
x += data[i + 4]
y += data[i + 5]
path[pathIndex] = ('c', data)
elif cmd == 'c':
x += sum(data[4::6])
y += sum(data[5::6])
elif cmd in ['z', 'Z']:
x, y = startx, starty
path[pathIndex] = ('z', data)
# remove empty segments and redundant commands
# Reuse the data structure 'path' and the coordinate lists, even if we're
# deleting items, because these deletions are relatively cheap.
if not has_round_or_square_linecaps:
# remove empty path segments
for pathIndex in range(len(path)):
cmd, data = path[pathIndex]
i = 0
if cmd in ['m', 'l', 't']:
if cmd == 'm':
# It might be tempting to rewrite "m0 0 ..." into
# "l..." here. However, this is an unsound
# optimization in general as "m0 0 ... z" is
# different from "l...z".
#
# To do such a rewrite, we need to understand the
# full subpath. This logic happens after this
# loop.
i = 2
while i < len(data):
if data[i] == data[i + 1] == 0:
del data[i:i + 2]
_num_path_segments_removed += 1
else:
i += 2
elif cmd == 'c':
while i < len(data):
if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == data[i + 4] == data[i + 5] == 0:
del data[i:i + 6]
_num_path_segments_removed += 1
else:
i += 6
elif cmd == 'a':
while i < len(data):
if data[i + 5] == data[i + 6] == 0:
del data[i:i + 7]
_num_path_segments_removed += 1
else:
i += 7
elif cmd == 'q':
while i < len(data):
if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == 0:
del data[i:i + 4]
_num_path_segments_removed += 1
else:
i += 4
elif cmd in ['h', 'v']:
oldLen = len(data)
path[pathIndex] = (cmd, [coord for coord in data if coord != 0])
_num_path_segments_removed += len(path[pathIndex][1]) - oldLen
# remove no-op commands
pathIndex = len(path)
subpath_needs_anchor = False
# NB: We can never rewrite the first m/M command (expect if it
# is the only command)
while pathIndex > 1:
pathIndex -= 1
cmd, data = path[pathIndex]
if cmd == 'z':
next_cmd, next_data = path[pathIndex - 1]
if next_cmd == 'm' and len(next_data) == 2:
# mX Yz -> mX Y
# note the len check on next_data as it is not
# safe to rewrite "m0 0 1 1z" in general (it is a
# question of where the "pen" ends - you can
# continue a draw on the same subpath after a
# "z").
del path[pathIndex]
_num_path_segments_removed += 1
else:
# it is not safe to rewrite "m0 0 ..." to "l..."
# because of this "z" command.
subpath_needs_anchor = True
elif cmd == 'm':
if len(path) - 1 == pathIndex and len(data) == 2:
# Ends with an empty move (but no line/draw
# following it)
del path[pathIndex]
_num_path_segments_removed += 1
continue
if subpath_needs_anchor:
subpath_needs_anchor = False
elif data[0] == data[1] == 0:
# unanchored, i.e. we can replace "m0 0 ..." with
# "l..." as there is no "z" after it.
path[pathIndex] = ('l', data[2:])
_num_path_segments_removed += 1
# fixup: Delete subcommands having no coordinates.
path = [elem for elem in path if len(elem[1]) > 0 or elem[0] == 'z']
# convert straight curves into lines
newPath = [path[0]]
for (cmd, data) in path[1:]:
i = 0
newData = data
if cmd == 'c':
newData = []
while i < len(data):
# since all commands are now relative, we can think of previous point as (0,0)
# and new point (dx,dy) is (data[i+4],data[i+5])
# eqn of line will be y = (dy/dx)*x or if dx=0 then eqn of line is x=0
(p1x, p1y) = (data[i], data[i + 1])
(p2x, p2y) = (data[i + 2], data[i + 3])
dx = data[i + 4]
dy = data[i + 5]
foundStraightCurve = False
if dx == 0:
if p1x == 0 and p2x == 0:
foundStraightCurve = True
else:
m = dy / dx
if p1y == m * p1x and p2y == m * p2x:
foundStraightCurve = True
if foundStraightCurve:
# flush any existing curve coords first
if newData:
newPath.append((cmd, newData))
newData = []
# now create a straight line segment
newPath.append(('l', [dx, dy]))
else:
newData.extend(data[i:i + 6])
i += 6
if newData or cmd == 'z' or cmd == 'Z':
newPath.append((cmd, newData))
path = newPath
# collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = []
for (cmd, data) in path:
if prevCmd == '':
# initialize with current path cmd and data
prevCmd = cmd
prevData = data
else:
# collapse if
# - cmd is not moveto (explicit moveto commands are not drawn)
# - the previous and current commands are the same type,
# - the previous command is moveto and the current is lineto
# (subsequent moveto pairs are treated as implicit lineto commands)
if cmd != 'm' and (cmd == prevCmd or (cmd == 'l' and prevCmd == 'm')):
prevData.extend(data)
# else flush the previous command if it is not the same type as the current command
else:
newPath.append((prevCmd, prevData))
prevCmd = cmd
prevData = data
# flush last command and data
newPath.append((prevCmd, prevData))
path = newPath
# convert to shorthand path segments where possible
newPath = []
for (cmd, data) in path:
# convert line segments into h,v where possible
if cmd == 'l':
i = 0
lineTuples = []
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing line command
newPath.append(('l', lineTuples))
lineTuples = []
# append the v and then the remaining line coords
newPath.append(('v', [data[i + 1]]))
_num_path_segments_removed += 1
elif data[i + 1] == 0:
if lineTuples:
# flush the line command, then append the h and then the remaining line coords
newPath.append(('l', lineTuples))
lineTuples = []
newPath.append(('h', [data[i]]))
_num_path_segments_removed += 1
else:
lineTuples.extend(data[i:i + 2])
i += 2
if lineTuples:
newPath.append(('l', lineTuples))
# also handle implied relative linetos
elif cmd == 'm':
i = 2
lineTuples = [data[0], data[1]]
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing m/l command
newPath.append((cmd, lineTuples))
lineTuples = []
cmd = 'l' # dealing with linetos now
# append the v and then the remaining line coords
newPath.append(('v', [data[i + 1]]))
_num_path_segments_removed += 1
elif data[i + 1] == 0:
if lineTuples:
# flush the m/l command, then append the h and then the remaining line coords
newPath.append((cmd, lineTuples))
lineTuples = []
cmd = 'l' # dealing with linetos now
newPath.append(('h', [data[i]]))
_num_path_segments_removed += 1
else:
lineTuples.extend(data[i:i + 2])
i += 2
if lineTuples:
newPath.append((cmd, lineTuples))
# convert Bézier curve segments into s where possible
elif cmd == 'c':
# set up the assumed bezier control point as the current point,
# i.e. (0,0) since we're using relative coords
bez_ctl_pt = (0, 0)
# however if the previous command was 's'
# the assumed control point is a reflection of the previous control point at the current point
if len(newPath):
(prevCmd, prevData) = newPath[-1]
if prevCmd == 's':
bez_ctl_pt = (prevData[-2] - prevData[-4], prevData[-1] - prevData[-3])
i = 0
curveTuples = []
while i < len(data):
# rotate by 180deg means negate both coordinates
# if the previous control point is equal then we can substitute a
# shorthand bezier command
if bez_ctl_pt[0] == data[i] and bez_ctl_pt[1] == data[i + 1]:
if curveTuples:
newPath.append(('c', curveTuples))
curveTuples = []
# append the s command
newPath.append(('s', [data[i + 2], data[i + 3], data[i + 4], data[i + 5]]))
_num_path_segments_removed += 1
else:
j = 0
while j <= 5:
curveTuples.append(data[i + j])
j += 1
# set up control point for next curve segment
bez_ctl_pt = (data[i + 4] - data[i + 2], data[i + 5] - data[i + 3])
i += 6
if curveTuples:
newPath.append(('c', curveTuples))
# convert quadratic curve segments into t where possible
elif cmd == 'q':
quad_ctl_pt = (0, 0)
i = 0
curveTuples = []
while i < len(data):
if quad_ctl_pt[0] == data[i] and quad_ctl_pt[1] == data[i + 1]:
if curveTuples:
newPath.append(('q', curveTuples))
curveTuples = []
# append the t command
newPath.append(('t', [data[i + 2], data[i + 3]]))
_num_path_segments_removed += 1
else:
j = 0
while j <= 3:
curveTuples.append(data[i + j])
j += 1
quad_ctl_pt = (data[i + 2] - data[i], data[i + 3] - data[i + 1])
i += 4
if curveTuples:
newPath.append(('q', curveTuples))
else:
newPath.append((cmd, data))
path = newPath
# For each m, l, h or v, collapse unnecessary coordinates that run in the same direction
# i.e. "h-100-100" becomes "h-200" but "h300-100" does not change.
# If the path has intermediate markers we have to preserve intermediate nodes, though.
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists, even if we're deleting items, because these
# deletions are relatively cheap.
if not has_intermediate_markers:
for pathIndex in range(len(path)):
cmd, data = path[pathIndex]
# h / v expects only one parameter and we start drawing with the first (so we need at least 2)
if cmd in ['h', 'v'] and len(data) >= 2:
coordIndex = 0
while coordIndex+1 < len(data):
if is_same_sign(data[coordIndex], data[coordIndex+1]):
data[coordIndex] += data[coordIndex+1]
del data[coordIndex+1]
_num_path_segments_removed += 1
else:
coordIndex += 1
# l expects two parameters and we start drawing with the first (so we need at least 4)
elif cmd == 'l' and len(data) >= 4:
coordIndex = 0
while coordIndex+2 < len(data):
if is_same_direction(*data[coordIndex:coordIndex+4]):
data[coordIndex] += data[coordIndex+2]
data[coordIndex+1] += data[coordIndex+3]
del data[coordIndex+2] # delete the next two elements
del data[coordIndex+2]
_num_path_segments_removed += 1
else:
coordIndex += 2
# m expects two parameters but we have to skip the first pair as it's not drawn (so we need at least 6)
elif cmd == 'm' and len(data) >= 6:
coordIndex = 2
while coordIndex+2 < len(data):
if is_same_direction(*data[coordIndex:coordIndex+4]):
data[coordIndex] += data[coordIndex+2]
data[coordIndex+1] += data[coordIndex+3]
del data[coordIndex+2] # delete the next two elements
del data[coordIndex+2]
_num_path_segments_removed += 1
else:
coordIndex += 2
# it is possible that we have consecutive h, v, c, t commands now
# so again collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = [path[0]]
for (cmd, data) in path[1:]:
# flush the previous command if it is not the same type as the current command
if prevCmd != '':
if cmd != prevCmd or cmd == 'm':
newPath.append((prevCmd, prevData))
prevCmd = ''
prevData = []
# if the previous and current commands are the same type, collapse
if cmd == prevCmd and cmd != 'm':
prevData.extend(data)
# save last command and data
else:
prevCmd = cmd
prevData = data
# flush last command and data
if prevCmd != '':
newPath.append((prevCmd, prevData))
path = newPath
newPathStr = serializePath(path, options)
# if for whatever reason we actually made the path longer don't use it
# TODO: maybe we could compare path lengths after each optimization step and use the shortest
if len(newPathStr) <= len(oldPathStr):
_num_bytes_saved_in_path_data += (len(oldPathStr) - len(newPathStr))
element.setAttribute('d', newPathStr)
|
[
"def",
"cleanPath",
"(",
"element",
",",
"options",
")",
":",
"global",
"_num_bytes_saved_in_path_data",
"global",
"_num_path_segments_removed",
"# this gets the parser object from svg_regex.py",
"oldPathStr",
"=",
"element",
".",
"getAttribute",
"(",
"'d'",
")",
"path",
"=",
"svg_parser",
".",
"parse",
"(",
"oldPathStr",
")",
"style",
"=",
"_getStyle",
"(",
"element",
")",
"# This determines whether the stroke has round or square linecaps. If it does, we do not want to collapse empty",
"# segments, as they are actually rendered (as circles or squares with diameter/dimension matching the path-width).",
"has_round_or_square_linecaps",
"=",
"(",
"element",
".",
"getAttribute",
"(",
"'stroke-linecap'",
")",
"in",
"[",
"'round'",
",",
"'square'",
"]",
"or",
"'stroke-linecap'",
"in",
"style",
"and",
"style",
"[",
"'stroke-linecap'",
"]",
"in",
"[",
"'round'",
",",
"'square'",
"]",
")",
"# This determines whether the stroke has intermediate markers. If it does, we do not want to collapse",
"# straight segments running in the same direction, as markers are rendered on the intermediate nodes.",
"has_intermediate_markers",
"=",
"(",
"element",
".",
"hasAttribute",
"(",
"'marker'",
")",
"or",
"element",
".",
"hasAttribute",
"(",
"'marker-mid'",
")",
"or",
"'marker'",
"in",
"style",
"or",
"'marker-mid'",
"in",
"style",
")",
"# The first command must be a moveto, and whether it's relative (m)",
"# or absolute (M), the first set of coordinates *is* absolute. So",
"# the first iteration of the loop below will get x,y and startx,starty.",
"# convert absolute coordinates into relative ones.",
"# Reuse the data structure 'path', since we're not adding or removing subcommands.",
"# Also reuse the coordinate lists since we're not adding or removing any.",
"x",
"=",
"y",
"=",
"0",
"for",
"pathIndex",
"in",
"range",
"(",
"len",
"(",
"path",
")",
")",
":",
"cmd",
",",
"data",
"=",
"path",
"[",
"pathIndex",
"]",
"# Changes to cmd don't get through to the data structure",
"i",
"=",
"0",
"# adjust abs to rel",
"# only the A command has some values that we don't want to adjust (radii, rotation, flags)",
"if",
"cmd",
"==",
"'A'",
":",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
",",
"7",
")",
":",
"data",
"[",
"i",
"+",
"5",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"6",
"]",
"-=",
"y",
"x",
"+=",
"data",
"[",
"i",
"+",
"5",
"]",
"y",
"+=",
"data",
"[",
"i",
"+",
"6",
"]",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"'a'",
",",
"data",
")",
"elif",
"cmd",
"==",
"'a'",
":",
"x",
"+=",
"sum",
"(",
"data",
"[",
"5",
":",
":",
"7",
"]",
")",
"y",
"+=",
"sum",
"(",
"data",
"[",
"6",
":",
":",
"7",
"]",
")",
"elif",
"cmd",
"==",
"'H'",
":",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
")",
":",
"data",
"[",
"i",
"]",
"-=",
"x",
"x",
"+=",
"data",
"[",
"i",
"]",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"'h'",
",",
"data",
")",
"elif",
"cmd",
"==",
"'h'",
":",
"x",
"+=",
"sum",
"(",
"data",
")",
"elif",
"cmd",
"==",
"'V'",
":",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
")",
":",
"data",
"[",
"i",
"]",
"-=",
"y",
"y",
"+=",
"data",
"[",
"i",
"]",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"'v'",
",",
"data",
")",
"elif",
"cmd",
"==",
"'v'",
":",
"y",
"+=",
"sum",
"(",
"data",
")",
"elif",
"cmd",
"==",
"'M'",
":",
"startx",
",",
"starty",
"=",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"1",
"]",
"# If this is a path starter, don't convert its first",
"# coordinate to relative; that would just make it (0, 0)",
"if",
"pathIndex",
"!=",
"0",
":",
"data",
"[",
"0",
"]",
"-=",
"x",
"data",
"[",
"1",
"]",
"-=",
"y",
"x",
",",
"y",
"=",
"startx",
",",
"starty",
"i",
"=",
"2",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
",",
"2",
")",
":",
"data",
"[",
"i",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"1",
"]",
"-=",
"y",
"x",
"+=",
"data",
"[",
"i",
"]",
"y",
"+=",
"data",
"[",
"i",
"+",
"1",
"]",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"'m'",
",",
"data",
")",
"elif",
"cmd",
"in",
"[",
"'L'",
",",
"'T'",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
",",
"2",
")",
":",
"data",
"[",
"i",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"1",
"]",
"-=",
"y",
"x",
"+=",
"data",
"[",
"i",
"]",
"y",
"+=",
"data",
"[",
"i",
"+",
"1",
"]",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"cmd",
".",
"lower",
"(",
")",
",",
"data",
")",
"elif",
"cmd",
"in",
"[",
"'m'",
"]",
":",
"if",
"pathIndex",
"==",
"0",
":",
"# START OF PATH - this is an absolute moveto",
"# followed by relative linetos",
"startx",
",",
"starty",
"=",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"1",
"]",
"x",
",",
"y",
"=",
"startx",
",",
"starty",
"i",
"=",
"2",
"else",
":",
"startx",
"=",
"x",
"+",
"data",
"[",
"0",
"]",
"starty",
"=",
"y",
"+",
"data",
"[",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
",",
"2",
")",
":",
"x",
"+=",
"data",
"[",
"i",
"]",
"y",
"+=",
"data",
"[",
"i",
"+",
"1",
"]",
"elif",
"cmd",
"in",
"[",
"'l'",
",",
"'t'",
"]",
":",
"x",
"+=",
"sum",
"(",
"data",
"[",
"0",
":",
":",
"2",
"]",
")",
"y",
"+=",
"sum",
"(",
"data",
"[",
"1",
":",
":",
"2",
"]",
")",
"elif",
"cmd",
"in",
"[",
"'S'",
",",
"'Q'",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
",",
"4",
")",
":",
"data",
"[",
"i",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"1",
"]",
"-=",
"y",
"data",
"[",
"i",
"+",
"2",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"3",
"]",
"-=",
"y",
"x",
"+=",
"data",
"[",
"i",
"+",
"2",
"]",
"y",
"+=",
"data",
"[",
"i",
"+",
"3",
"]",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"cmd",
".",
"lower",
"(",
")",
",",
"data",
")",
"elif",
"cmd",
"in",
"[",
"'s'",
",",
"'q'",
"]",
":",
"x",
"+=",
"sum",
"(",
"data",
"[",
"2",
":",
":",
"4",
"]",
")",
"y",
"+=",
"sum",
"(",
"data",
"[",
"3",
":",
":",
"4",
"]",
")",
"elif",
"cmd",
"==",
"'C'",
":",
"for",
"i",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"data",
")",
",",
"6",
")",
":",
"data",
"[",
"i",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"1",
"]",
"-=",
"y",
"data",
"[",
"i",
"+",
"2",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"3",
"]",
"-=",
"y",
"data",
"[",
"i",
"+",
"4",
"]",
"-=",
"x",
"data",
"[",
"i",
"+",
"5",
"]",
"-=",
"y",
"x",
"+=",
"data",
"[",
"i",
"+",
"4",
"]",
"y",
"+=",
"data",
"[",
"i",
"+",
"5",
"]",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"'c'",
",",
"data",
")",
"elif",
"cmd",
"==",
"'c'",
":",
"x",
"+=",
"sum",
"(",
"data",
"[",
"4",
":",
":",
"6",
"]",
")",
"y",
"+=",
"sum",
"(",
"data",
"[",
"5",
":",
":",
"6",
"]",
")",
"elif",
"cmd",
"in",
"[",
"'z'",
",",
"'Z'",
"]",
":",
"x",
",",
"y",
"=",
"startx",
",",
"starty",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"'z'",
",",
"data",
")",
"# remove empty segments and redundant commands",
"# Reuse the data structure 'path' and the coordinate lists, even if we're",
"# deleting items, because these deletions are relatively cheap.",
"if",
"not",
"has_round_or_square_linecaps",
":",
"# remove empty path segments",
"for",
"pathIndex",
"in",
"range",
"(",
"len",
"(",
"path",
")",
")",
":",
"cmd",
",",
"data",
"=",
"path",
"[",
"pathIndex",
"]",
"i",
"=",
"0",
"if",
"cmd",
"in",
"[",
"'m'",
",",
"'l'",
",",
"'t'",
"]",
":",
"if",
"cmd",
"==",
"'m'",
":",
"# It might be tempting to rewrite \"m0 0 ...\" into",
"# \"l...\" here. However, this is an unsound",
"# optimization in general as \"m0 0 ... z\" is",
"# different from \"l...z\".",
"#",
"# To do such a rewrite, we need to understand the",
"# full subpath. This logic happens after this",
"# loop.",
"i",
"=",
"2",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"if",
"data",
"[",
"i",
"]",
"==",
"data",
"[",
"i",
"+",
"1",
"]",
"==",
"0",
":",
"del",
"data",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"i",
"+=",
"2",
"elif",
"cmd",
"==",
"'c'",
":",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"if",
"data",
"[",
"i",
"]",
"==",
"data",
"[",
"i",
"+",
"1",
"]",
"==",
"data",
"[",
"i",
"+",
"2",
"]",
"==",
"data",
"[",
"i",
"+",
"3",
"]",
"==",
"data",
"[",
"i",
"+",
"4",
"]",
"==",
"data",
"[",
"i",
"+",
"5",
"]",
"==",
"0",
":",
"del",
"data",
"[",
"i",
":",
"i",
"+",
"6",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"i",
"+=",
"6",
"elif",
"cmd",
"==",
"'a'",
":",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"if",
"data",
"[",
"i",
"+",
"5",
"]",
"==",
"data",
"[",
"i",
"+",
"6",
"]",
"==",
"0",
":",
"del",
"data",
"[",
"i",
":",
"i",
"+",
"7",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"i",
"+=",
"7",
"elif",
"cmd",
"==",
"'q'",
":",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"if",
"data",
"[",
"i",
"]",
"==",
"data",
"[",
"i",
"+",
"1",
"]",
"==",
"data",
"[",
"i",
"+",
"2",
"]",
"==",
"data",
"[",
"i",
"+",
"3",
"]",
"==",
"0",
":",
"del",
"data",
"[",
"i",
":",
"i",
"+",
"4",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"i",
"+=",
"4",
"elif",
"cmd",
"in",
"[",
"'h'",
",",
"'v'",
"]",
":",
"oldLen",
"=",
"len",
"(",
"data",
")",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"cmd",
",",
"[",
"coord",
"for",
"coord",
"in",
"data",
"if",
"coord",
"!=",
"0",
"]",
")",
"_num_path_segments_removed",
"+=",
"len",
"(",
"path",
"[",
"pathIndex",
"]",
"[",
"1",
"]",
")",
"-",
"oldLen",
"# remove no-op commands",
"pathIndex",
"=",
"len",
"(",
"path",
")",
"subpath_needs_anchor",
"=",
"False",
"# NB: We can never rewrite the first m/M command (expect if it",
"# is the only command)",
"while",
"pathIndex",
">",
"1",
":",
"pathIndex",
"-=",
"1",
"cmd",
",",
"data",
"=",
"path",
"[",
"pathIndex",
"]",
"if",
"cmd",
"==",
"'z'",
":",
"next_cmd",
",",
"next_data",
"=",
"path",
"[",
"pathIndex",
"-",
"1",
"]",
"if",
"next_cmd",
"==",
"'m'",
"and",
"len",
"(",
"next_data",
")",
"==",
"2",
":",
"# mX Yz -> mX Y",
"# note the len check on next_data as it is not",
"# safe to rewrite \"m0 0 1 1z\" in general (it is a",
"# question of where the \"pen\" ends - you can",
"# continue a draw on the same subpath after a",
"# \"z\").",
"del",
"path",
"[",
"pathIndex",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"# it is not safe to rewrite \"m0 0 ...\" to \"l...\"",
"# because of this \"z\" command.",
"subpath_needs_anchor",
"=",
"True",
"elif",
"cmd",
"==",
"'m'",
":",
"if",
"len",
"(",
"path",
")",
"-",
"1",
"==",
"pathIndex",
"and",
"len",
"(",
"data",
")",
"==",
"2",
":",
"# Ends with an empty move (but no line/draw",
"# following it)",
"del",
"path",
"[",
"pathIndex",
"]",
"_num_path_segments_removed",
"+=",
"1",
"continue",
"if",
"subpath_needs_anchor",
":",
"subpath_needs_anchor",
"=",
"False",
"elif",
"data",
"[",
"0",
"]",
"==",
"data",
"[",
"1",
"]",
"==",
"0",
":",
"# unanchored, i.e. we can replace \"m0 0 ...\" with",
"# \"l...\" as there is no \"z\" after it.",
"path",
"[",
"pathIndex",
"]",
"=",
"(",
"'l'",
",",
"data",
"[",
"2",
":",
"]",
")",
"_num_path_segments_removed",
"+=",
"1",
"# fixup: Delete subcommands having no coordinates.",
"path",
"=",
"[",
"elem",
"for",
"elem",
"in",
"path",
"if",
"len",
"(",
"elem",
"[",
"1",
"]",
")",
">",
"0",
"or",
"elem",
"[",
"0",
"]",
"==",
"'z'",
"]",
"# convert straight curves into lines",
"newPath",
"=",
"[",
"path",
"[",
"0",
"]",
"]",
"for",
"(",
"cmd",
",",
"data",
")",
"in",
"path",
"[",
"1",
":",
"]",
":",
"i",
"=",
"0",
"newData",
"=",
"data",
"if",
"cmd",
"==",
"'c'",
":",
"newData",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"# since all commands are now relative, we can think of previous point as (0,0)",
"# and new point (dx,dy) is (data[i+4],data[i+5])",
"# eqn of line will be y = (dy/dx)*x or if dx=0 then eqn of line is x=0",
"(",
"p1x",
",",
"p1y",
")",
"=",
"(",
"data",
"[",
"i",
"]",
",",
"data",
"[",
"i",
"+",
"1",
"]",
")",
"(",
"p2x",
",",
"p2y",
")",
"=",
"(",
"data",
"[",
"i",
"+",
"2",
"]",
",",
"data",
"[",
"i",
"+",
"3",
"]",
")",
"dx",
"=",
"data",
"[",
"i",
"+",
"4",
"]",
"dy",
"=",
"data",
"[",
"i",
"+",
"5",
"]",
"foundStraightCurve",
"=",
"False",
"if",
"dx",
"==",
"0",
":",
"if",
"p1x",
"==",
"0",
"and",
"p2x",
"==",
"0",
":",
"foundStraightCurve",
"=",
"True",
"else",
":",
"m",
"=",
"dy",
"/",
"dx",
"if",
"p1y",
"==",
"m",
"*",
"p1x",
"and",
"p2y",
"==",
"m",
"*",
"p2x",
":",
"foundStraightCurve",
"=",
"True",
"if",
"foundStraightCurve",
":",
"# flush any existing curve coords first",
"if",
"newData",
":",
"newPath",
".",
"append",
"(",
"(",
"cmd",
",",
"newData",
")",
")",
"newData",
"=",
"[",
"]",
"# now create a straight line segment",
"newPath",
".",
"append",
"(",
"(",
"'l'",
",",
"[",
"dx",
",",
"dy",
"]",
")",
")",
"else",
":",
"newData",
".",
"extend",
"(",
"data",
"[",
"i",
":",
"i",
"+",
"6",
"]",
")",
"i",
"+=",
"6",
"if",
"newData",
"or",
"cmd",
"==",
"'z'",
"or",
"cmd",
"==",
"'Z'",
":",
"newPath",
".",
"append",
"(",
"(",
"cmd",
",",
"newData",
")",
")",
"path",
"=",
"newPath",
"# collapse all consecutive commands of the same type into one command",
"prevCmd",
"=",
"''",
"prevData",
"=",
"[",
"]",
"newPath",
"=",
"[",
"]",
"for",
"(",
"cmd",
",",
"data",
")",
"in",
"path",
":",
"if",
"prevCmd",
"==",
"''",
":",
"# initialize with current path cmd and data",
"prevCmd",
"=",
"cmd",
"prevData",
"=",
"data",
"else",
":",
"# collapse if",
"# - cmd is not moveto (explicit moveto commands are not drawn)",
"# - the previous and current commands are the same type,",
"# - the previous command is moveto and the current is lineto",
"# (subsequent moveto pairs are treated as implicit lineto commands)",
"if",
"cmd",
"!=",
"'m'",
"and",
"(",
"cmd",
"==",
"prevCmd",
"or",
"(",
"cmd",
"==",
"'l'",
"and",
"prevCmd",
"==",
"'m'",
")",
")",
":",
"prevData",
".",
"extend",
"(",
"data",
")",
"# else flush the previous command if it is not the same type as the current command",
"else",
":",
"newPath",
".",
"append",
"(",
"(",
"prevCmd",
",",
"prevData",
")",
")",
"prevCmd",
"=",
"cmd",
"prevData",
"=",
"data",
"# flush last command and data",
"newPath",
".",
"append",
"(",
"(",
"prevCmd",
",",
"prevData",
")",
")",
"path",
"=",
"newPath",
"# convert to shorthand path segments where possible",
"newPath",
"=",
"[",
"]",
"for",
"(",
"cmd",
",",
"data",
")",
"in",
"path",
":",
"# convert line segments into h,v where possible",
"if",
"cmd",
"==",
"'l'",
":",
"i",
"=",
"0",
"lineTuples",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"if",
"data",
"[",
"i",
"]",
"==",
"0",
":",
"# vertical",
"if",
"lineTuples",
":",
"# flush the existing line command",
"newPath",
".",
"append",
"(",
"(",
"'l'",
",",
"lineTuples",
")",
")",
"lineTuples",
"=",
"[",
"]",
"# append the v and then the remaining line coords",
"newPath",
".",
"append",
"(",
"(",
"'v'",
",",
"[",
"data",
"[",
"i",
"+",
"1",
"]",
"]",
")",
")",
"_num_path_segments_removed",
"+=",
"1",
"elif",
"data",
"[",
"i",
"+",
"1",
"]",
"==",
"0",
":",
"if",
"lineTuples",
":",
"# flush the line command, then append the h and then the remaining line coords",
"newPath",
".",
"append",
"(",
"(",
"'l'",
",",
"lineTuples",
")",
")",
"lineTuples",
"=",
"[",
"]",
"newPath",
".",
"append",
"(",
"(",
"'h'",
",",
"[",
"data",
"[",
"i",
"]",
"]",
")",
")",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"lineTuples",
".",
"extend",
"(",
"data",
"[",
"i",
":",
"i",
"+",
"2",
"]",
")",
"i",
"+=",
"2",
"if",
"lineTuples",
":",
"newPath",
".",
"append",
"(",
"(",
"'l'",
",",
"lineTuples",
")",
")",
"# also handle implied relative linetos",
"elif",
"cmd",
"==",
"'m'",
":",
"i",
"=",
"2",
"lineTuples",
"=",
"[",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"1",
"]",
"]",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"if",
"data",
"[",
"i",
"]",
"==",
"0",
":",
"# vertical",
"if",
"lineTuples",
":",
"# flush the existing m/l command",
"newPath",
".",
"append",
"(",
"(",
"cmd",
",",
"lineTuples",
")",
")",
"lineTuples",
"=",
"[",
"]",
"cmd",
"=",
"'l'",
"# dealing with linetos now",
"# append the v and then the remaining line coords",
"newPath",
".",
"append",
"(",
"(",
"'v'",
",",
"[",
"data",
"[",
"i",
"+",
"1",
"]",
"]",
")",
")",
"_num_path_segments_removed",
"+=",
"1",
"elif",
"data",
"[",
"i",
"+",
"1",
"]",
"==",
"0",
":",
"if",
"lineTuples",
":",
"# flush the m/l command, then append the h and then the remaining line coords",
"newPath",
".",
"append",
"(",
"(",
"cmd",
",",
"lineTuples",
")",
")",
"lineTuples",
"=",
"[",
"]",
"cmd",
"=",
"'l'",
"# dealing with linetos now",
"newPath",
".",
"append",
"(",
"(",
"'h'",
",",
"[",
"data",
"[",
"i",
"]",
"]",
")",
")",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"lineTuples",
".",
"extend",
"(",
"data",
"[",
"i",
":",
"i",
"+",
"2",
"]",
")",
"i",
"+=",
"2",
"if",
"lineTuples",
":",
"newPath",
".",
"append",
"(",
"(",
"cmd",
",",
"lineTuples",
")",
")",
"# convert Bézier curve segments into s where possible",
"elif",
"cmd",
"==",
"'c'",
":",
"# set up the assumed bezier control point as the current point,",
"# i.e. (0,0) since we're using relative coords",
"bez_ctl_pt",
"=",
"(",
"0",
",",
"0",
")",
"# however if the previous command was 's'",
"# the assumed control point is a reflection of the previous control point at the current point",
"if",
"len",
"(",
"newPath",
")",
":",
"(",
"prevCmd",
",",
"prevData",
")",
"=",
"newPath",
"[",
"-",
"1",
"]",
"if",
"prevCmd",
"==",
"'s'",
":",
"bez_ctl_pt",
"=",
"(",
"prevData",
"[",
"-",
"2",
"]",
"-",
"prevData",
"[",
"-",
"4",
"]",
",",
"prevData",
"[",
"-",
"1",
"]",
"-",
"prevData",
"[",
"-",
"3",
"]",
")",
"i",
"=",
"0",
"curveTuples",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"# rotate by 180deg means negate both coordinates",
"# if the previous control point is equal then we can substitute a",
"# shorthand bezier command",
"if",
"bez_ctl_pt",
"[",
"0",
"]",
"==",
"data",
"[",
"i",
"]",
"and",
"bez_ctl_pt",
"[",
"1",
"]",
"==",
"data",
"[",
"i",
"+",
"1",
"]",
":",
"if",
"curveTuples",
":",
"newPath",
".",
"append",
"(",
"(",
"'c'",
",",
"curveTuples",
")",
")",
"curveTuples",
"=",
"[",
"]",
"# append the s command",
"newPath",
".",
"append",
"(",
"(",
"'s'",
",",
"[",
"data",
"[",
"i",
"+",
"2",
"]",
",",
"data",
"[",
"i",
"+",
"3",
"]",
",",
"data",
"[",
"i",
"+",
"4",
"]",
",",
"data",
"[",
"i",
"+",
"5",
"]",
"]",
")",
")",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"j",
"=",
"0",
"while",
"j",
"<=",
"5",
":",
"curveTuples",
".",
"append",
"(",
"data",
"[",
"i",
"+",
"j",
"]",
")",
"j",
"+=",
"1",
"# set up control point for next curve segment",
"bez_ctl_pt",
"=",
"(",
"data",
"[",
"i",
"+",
"4",
"]",
"-",
"data",
"[",
"i",
"+",
"2",
"]",
",",
"data",
"[",
"i",
"+",
"5",
"]",
"-",
"data",
"[",
"i",
"+",
"3",
"]",
")",
"i",
"+=",
"6",
"if",
"curveTuples",
":",
"newPath",
".",
"append",
"(",
"(",
"'c'",
",",
"curveTuples",
")",
")",
"# convert quadratic curve segments into t where possible",
"elif",
"cmd",
"==",
"'q'",
":",
"quad_ctl_pt",
"=",
"(",
"0",
",",
"0",
")",
"i",
"=",
"0",
"curveTuples",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"if",
"quad_ctl_pt",
"[",
"0",
"]",
"==",
"data",
"[",
"i",
"]",
"and",
"quad_ctl_pt",
"[",
"1",
"]",
"==",
"data",
"[",
"i",
"+",
"1",
"]",
":",
"if",
"curveTuples",
":",
"newPath",
".",
"append",
"(",
"(",
"'q'",
",",
"curveTuples",
")",
")",
"curveTuples",
"=",
"[",
"]",
"# append the t command",
"newPath",
".",
"append",
"(",
"(",
"'t'",
",",
"[",
"data",
"[",
"i",
"+",
"2",
"]",
",",
"data",
"[",
"i",
"+",
"3",
"]",
"]",
")",
")",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"j",
"=",
"0",
"while",
"j",
"<=",
"3",
":",
"curveTuples",
".",
"append",
"(",
"data",
"[",
"i",
"+",
"j",
"]",
")",
"j",
"+=",
"1",
"quad_ctl_pt",
"=",
"(",
"data",
"[",
"i",
"+",
"2",
"]",
"-",
"data",
"[",
"i",
"]",
",",
"data",
"[",
"i",
"+",
"3",
"]",
"-",
"data",
"[",
"i",
"+",
"1",
"]",
")",
"i",
"+=",
"4",
"if",
"curveTuples",
":",
"newPath",
".",
"append",
"(",
"(",
"'q'",
",",
"curveTuples",
")",
")",
"else",
":",
"newPath",
".",
"append",
"(",
"(",
"cmd",
",",
"data",
")",
")",
"path",
"=",
"newPath",
"# For each m, l, h or v, collapse unnecessary coordinates that run in the same direction",
"# i.e. \"h-100-100\" becomes \"h-200\" but \"h300-100\" does not change.",
"# If the path has intermediate markers we have to preserve intermediate nodes, though.",
"# Reuse the data structure 'path', since we're not adding or removing subcommands.",
"# Also reuse the coordinate lists, even if we're deleting items, because these",
"# deletions are relatively cheap.",
"if",
"not",
"has_intermediate_markers",
":",
"for",
"pathIndex",
"in",
"range",
"(",
"len",
"(",
"path",
")",
")",
":",
"cmd",
",",
"data",
"=",
"path",
"[",
"pathIndex",
"]",
"# h / v expects only one parameter and we start drawing with the first (so we need at least 2)",
"if",
"cmd",
"in",
"[",
"'h'",
",",
"'v'",
"]",
"and",
"len",
"(",
"data",
")",
">=",
"2",
":",
"coordIndex",
"=",
"0",
"while",
"coordIndex",
"+",
"1",
"<",
"len",
"(",
"data",
")",
":",
"if",
"is_same_sign",
"(",
"data",
"[",
"coordIndex",
"]",
",",
"data",
"[",
"coordIndex",
"+",
"1",
"]",
")",
":",
"data",
"[",
"coordIndex",
"]",
"+=",
"data",
"[",
"coordIndex",
"+",
"1",
"]",
"del",
"data",
"[",
"coordIndex",
"+",
"1",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"coordIndex",
"+=",
"1",
"# l expects two parameters and we start drawing with the first (so we need at least 4)",
"elif",
"cmd",
"==",
"'l'",
"and",
"len",
"(",
"data",
")",
">=",
"4",
":",
"coordIndex",
"=",
"0",
"while",
"coordIndex",
"+",
"2",
"<",
"len",
"(",
"data",
")",
":",
"if",
"is_same_direction",
"(",
"*",
"data",
"[",
"coordIndex",
":",
"coordIndex",
"+",
"4",
"]",
")",
":",
"data",
"[",
"coordIndex",
"]",
"+=",
"data",
"[",
"coordIndex",
"+",
"2",
"]",
"data",
"[",
"coordIndex",
"+",
"1",
"]",
"+=",
"data",
"[",
"coordIndex",
"+",
"3",
"]",
"del",
"data",
"[",
"coordIndex",
"+",
"2",
"]",
"# delete the next two elements",
"del",
"data",
"[",
"coordIndex",
"+",
"2",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"coordIndex",
"+=",
"2",
"# m expects two parameters but we have to skip the first pair as it's not drawn (so we need at least 6)",
"elif",
"cmd",
"==",
"'m'",
"and",
"len",
"(",
"data",
")",
">=",
"6",
":",
"coordIndex",
"=",
"2",
"while",
"coordIndex",
"+",
"2",
"<",
"len",
"(",
"data",
")",
":",
"if",
"is_same_direction",
"(",
"*",
"data",
"[",
"coordIndex",
":",
"coordIndex",
"+",
"4",
"]",
")",
":",
"data",
"[",
"coordIndex",
"]",
"+=",
"data",
"[",
"coordIndex",
"+",
"2",
"]",
"data",
"[",
"coordIndex",
"+",
"1",
"]",
"+=",
"data",
"[",
"coordIndex",
"+",
"3",
"]",
"del",
"data",
"[",
"coordIndex",
"+",
"2",
"]",
"# delete the next two elements",
"del",
"data",
"[",
"coordIndex",
"+",
"2",
"]",
"_num_path_segments_removed",
"+=",
"1",
"else",
":",
"coordIndex",
"+=",
"2",
"# it is possible that we have consecutive h, v, c, t commands now",
"# so again collapse all consecutive commands of the same type into one command",
"prevCmd",
"=",
"''",
"prevData",
"=",
"[",
"]",
"newPath",
"=",
"[",
"path",
"[",
"0",
"]",
"]",
"for",
"(",
"cmd",
",",
"data",
")",
"in",
"path",
"[",
"1",
":",
"]",
":",
"# flush the previous command if it is not the same type as the current command",
"if",
"prevCmd",
"!=",
"''",
":",
"if",
"cmd",
"!=",
"prevCmd",
"or",
"cmd",
"==",
"'m'",
":",
"newPath",
".",
"append",
"(",
"(",
"prevCmd",
",",
"prevData",
")",
")",
"prevCmd",
"=",
"''",
"prevData",
"=",
"[",
"]",
"# if the previous and current commands are the same type, collapse",
"if",
"cmd",
"==",
"prevCmd",
"and",
"cmd",
"!=",
"'m'",
":",
"prevData",
".",
"extend",
"(",
"data",
")",
"# save last command and data",
"else",
":",
"prevCmd",
"=",
"cmd",
"prevData",
"=",
"data",
"# flush last command and data",
"if",
"prevCmd",
"!=",
"''",
":",
"newPath",
".",
"append",
"(",
"(",
"prevCmd",
",",
"prevData",
")",
")",
"path",
"=",
"newPath",
"newPathStr",
"=",
"serializePath",
"(",
"path",
",",
"options",
")",
"# if for whatever reason we actually made the path longer don't use it",
"# TODO: maybe we could compare path lengths after each optimization step and use the shortest",
"if",
"len",
"(",
"newPathStr",
")",
"<=",
"len",
"(",
"oldPathStr",
")",
":",
"_num_bytes_saved_in_path_data",
"+=",
"(",
"len",
"(",
"oldPathStr",
")",
"-",
"len",
"(",
"newPathStr",
")",
")",
"element",
".",
"setAttribute",
"(",
"'d'",
",",
"newPathStr",
")"
] |
Cleans the path string (d attribute) of the element
|
[
"Cleans",
"the",
"path",
"string",
"(",
"d",
"attribute",
")",
"of",
"the",
"element"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2061-L2555
|
scour-project/scour
|
scour/scour.py
|
parseListOfPoints
|
def parseListOfPoints(s):
"""
Parse string into a list of points.
Returns a list containing an even number of coordinate strings
"""
i = 0
# (wsp)? comma-or-wsp-separated coordinate pairs (wsp)?
# coordinate-pair = coordinate comma-or-wsp coordinate
# coordinate = sign? integer
# comma-wsp: (wsp+ comma? wsp*) | (comma wsp*)
ws_nums = re.split(r"\s*[\s,]\s*", s.strip())
nums = []
# also, if 100-100 is found, split it into two also
# <polygon points="100,-100,100-100,100-100-100,-100-100" />
for i in range(len(ws_nums)):
negcoords = ws_nums[i].split("-")
# this string didn't have any negative coordinates
if len(negcoords) == 1:
nums.append(negcoords[0])
# we got negative coords
else:
for j in range(len(negcoords)):
# first number could be positive
if j == 0:
if negcoords[0] != '':
nums.append(negcoords[0])
# otherwise all other strings will be negative
else:
# unless we accidentally split a number that was in scientific notation
# and had a negative exponent (500.00e-1)
prev = ""
if len(nums):
prev = nums[len(nums) - 1]
if prev and prev[len(prev) - 1] in ['e', 'E']:
nums[len(nums) - 1] = prev + '-' + negcoords[j]
else:
nums.append('-' + negcoords[j])
# if we have an odd number of points, return empty
if len(nums) % 2 != 0:
return []
# now resolve into Decimal values
i = 0
while i < len(nums):
try:
nums[i] = getcontext().create_decimal(nums[i])
nums[i + 1] = getcontext().create_decimal(nums[i + 1])
except InvalidOperation: # one of the lengths had a unit or is an invalid number
return []
i += 2
return nums
|
python
|
def parseListOfPoints(s):
"""
Parse string into a list of points.
Returns a list containing an even number of coordinate strings
"""
i = 0
# (wsp)? comma-or-wsp-separated coordinate pairs (wsp)?
# coordinate-pair = coordinate comma-or-wsp coordinate
# coordinate = sign? integer
# comma-wsp: (wsp+ comma? wsp*) | (comma wsp*)
ws_nums = re.split(r"\s*[\s,]\s*", s.strip())
nums = []
# also, if 100-100 is found, split it into two also
# <polygon points="100,-100,100-100,100-100-100,-100-100" />
for i in range(len(ws_nums)):
negcoords = ws_nums[i].split("-")
# this string didn't have any negative coordinates
if len(negcoords) == 1:
nums.append(negcoords[0])
# we got negative coords
else:
for j in range(len(negcoords)):
# first number could be positive
if j == 0:
if negcoords[0] != '':
nums.append(negcoords[0])
# otherwise all other strings will be negative
else:
# unless we accidentally split a number that was in scientific notation
# and had a negative exponent (500.00e-1)
prev = ""
if len(nums):
prev = nums[len(nums) - 1]
if prev and prev[len(prev) - 1] in ['e', 'E']:
nums[len(nums) - 1] = prev + '-' + negcoords[j]
else:
nums.append('-' + negcoords[j])
# if we have an odd number of points, return empty
if len(nums) % 2 != 0:
return []
# now resolve into Decimal values
i = 0
while i < len(nums):
try:
nums[i] = getcontext().create_decimal(nums[i])
nums[i + 1] = getcontext().create_decimal(nums[i + 1])
except InvalidOperation: # one of the lengths had a unit or is an invalid number
return []
i += 2
return nums
|
[
"def",
"parseListOfPoints",
"(",
"s",
")",
":",
"i",
"=",
"0",
"# (wsp)? comma-or-wsp-separated coordinate pairs (wsp)?",
"# coordinate-pair = coordinate comma-or-wsp coordinate",
"# coordinate = sign? integer",
"# comma-wsp: (wsp+ comma? wsp*) | (comma wsp*)",
"ws_nums",
"=",
"re",
".",
"split",
"(",
"r\"\\s*[\\s,]\\s*\"",
",",
"s",
".",
"strip",
"(",
")",
")",
"nums",
"=",
"[",
"]",
"# also, if 100-100 is found, split it into two also",
"# <polygon points=\"100,-100,100-100,100-100-100,-100-100\" />",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"ws_nums",
")",
")",
":",
"negcoords",
"=",
"ws_nums",
"[",
"i",
"]",
".",
"split",
"(",
"\"-\"",
")",
"# this string didn't have any negative coordinates",
"if",
"len",
"(",
"negcoords",
")",
"==",
"1",
":",
"nums",
".",
"append",
"(",
"negcoords",
"[",
"0",
"]",
")",
"# we got negative coords",
"else",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"negcoords",
")",
")",
":",
"# first number could be positive",
"if",
"j",
"==",
"0",
":",
"if",
"negcoords",
"[",
"0",
"]",
"!=",
"''",
":",
"nums",
".",
"append",
"(",
"negcoords",
"[",
"0",
"]",
")",
"# otherwise all other strings will be negative",
"else",
":",
"# unless we accidentally split a number that was in scientific notation",
"# and had a negative exponent (500.00e-1)",
"prev",
"=",
"\"\"",
"if",
"len",
"(",
"nums",
")",
":",
"prev",
"=",
"nums",
"[",
"len",
"(",
"nums",
")",
"-",
"1",
"]",
"if",
"prev",
"and",
"prev",
"[",
"len",
"(",
"prev",
")",
"-",
"1",
"]",
"in",
"[",
"'e'",
",",
"'E'",
"]",
":",
"nums",
"[",
"len",
"(",
"nums",
")",
"-",
"1",
"]",
"=",
"prev",
"+",
"'-'",
"+",
"negcoords",
"[",
"j",
"]",
"else",
":",
"nums",
".",
"append",
"(",
"'-'",
"+",
"negcoords",
"[",
"j",
"]",
")",
"# if we have an odd number of points, return empty",
"if",
"len",
"(",
"nums",
")",
"%",
"2",
"!=",
"0",
":",
"return",
"[",
"]",
"# now resolve into Decimal values",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"nums",
")",
":",
"try",
":",
"nums",
"[",
"i",
"]",
"=",
"getcontext",
"(",
")",
".",
"create_decimal",
"(",
"nums",
"[",
"i",
"]",
")",
"nums",
"[",
"i",
"+",
"1",
"]",
"=",
"getcontext",
"(",
")",
".",
"create_decimal",
"(",
"nums",
"[",
"i",
"+",
"1",
"]",
")",
"except",
"InvalidOperation",
":",
"# one of the lengths had a unit or is an invalid number",
"return",
"[",
"]",
"i",
"+=",
"2",
"return",
"nums"
] |
Parse string into a list of points.
Returns a list containing an even number of coordinate strings
|
[
"Parse",
"string",
"into",
"a",
"list",
"of",
"points",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2558-L2615
|
scour-project/scour
|
scour/scour.py
|
cleanPolygon
|
def cleanPolygon(elem, options):
"""
Remove unnecessary closing point of polygon points attribute
"""
global _num_points_removed_from_polygon
pts = parseListOfPoints(elem.getAttribute('points'))
N = len(pts) / 2
if N >= 2:
(startx, starty) = pts[:2]
(endx, endy) = pts[-2:]
if startx == endx and starty == endy:
del pts[-2:]
_num_points_removed_from_polygon += 1
elem.setAttribute('points', scourCoordinates(pts, options, True))
|
python
|
def cleanPolygon(elem, options):
"""
Remove unnecessary closing point of polygon points attribute
"""
global _num_points_removed_from_polygon
pts = parseListOfPoints(elem.getAttribute('points'))
N = len(pts) / 2
if N >= 2:
(startx, starty) = pts[:2]
(endx, endy) = pts[-2:]
if startx == endx and starty == endy:
del pts[-2:]
_num_points_removed_from_polygon += 1
elem.setAttribute('points', scourCoordinates(pts, options, True))
|
[
"def",
"cleanPolygon",
"(",
"elem",
",",
"options",
")",
":",
"global",
"_num_points_removed_from_polygon",
"pts",
"=",
"parseListOfPoints",
"(",
"elem",
".",
"getAttribute",
"(",
"'points'",
")",
")",
"N",
"=",
"len",
"(",
"pts",
")",
"/",
"2",
"if",
"N",
">=",
"2",
":",
"(",
"startx",
",",
"starty",
")",
"=",
"pts",
"[",
":",
"2",
"]",
"(",
"endx",
",",
"endy",
")",
"=",
"pts",
"[",
"-",
"2",
":",
"]",
"if",
"startx",
"==",
"endx",
"and",
"starty",
"==",
"endy",
":",
"del",
"pts",
"[",
"-",
"2",
":",
"]",
"_num_points_removed_from_polygon",
"+=",
"1",
"elem",
".",
"setAttribute",
"(",
"'points'",
",",
"scourCoordinates",
"(",
"pts",
",",
"options",
",",
"True",
")",
")"
] |
Remove unnecessary closing point of polygon points attribute
|
[
"Remove",
"unnecessary",
"closing",
"point",
"of",
"polygon",
"points",
"attribute"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2618-L2632
|
scour-project/scour
|
scour/scour.py
|
cleanPolyline
|
def cleanPolyline(elem, options):
"""
Scour the polyline points attribute
"""
pts = parseListOfPoints(elem.getAttribute('points'))
elem.setAttribute('points', scourCoordinates(pts, options, True))
|
python
|
def cleanPolyline(elem, options):
"""
Scour the polyline points attribute
"""
pts = parseListOfPoints(elem.getAttribute('points'))
elem.setAttribute('points', scourCoordinates(pts, options, True))
|
[
"def",
"cleanPolyline",
"(",
"elem",
",",
"options",
")",
":",
"pts",
"=",
"parseListOfPoints",
"(",
"elem",
".",
"getAttribute",
"(",
"'points'",
")",
")",
"elem",
".",
"setAttribute",
"(",
"'points'",
",",
"scourCoordinates",
"(",
"pts",
",",
"options",
",",
"True",
")",
")"
] |
Scour the polyline points attribute
|
[
"Scour",
"the",
"polyline",
"points",
"attribute"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2635-L2640
|
scour-project/scour
|
scour/scour.py
|
controlPoints
|
def controlPoints(cmd, data):
"""
Checks if there are control points in the path data
Returns the indices of all values in the path data which are control points
"""
cmd = cmd.lower()
if cmd in ['c', 's', 'q']:
indices = range(len(data))
if cmd == 'c': # c: (x1 y1 x2 y2 x y)+
return [index for index in indices if (index % 6) < 4]
elif cmd in ['s', 'q']: # s: (x2 y2 x y)+ q: (x1 y1 x y)+
return [index for index in indices if (index % 4) < 2]
return []
|
python
|
def controlPoints(cmd, data):
"""
Checks if there are control points in the path data
Returns the indices of all values in the path data which are control points
"""
cmd = cmd.lower()
if cmd in ['c', 's', 'q']:
indices = range(len(data))
if cmd == 'c': # c: (x1 y1 x2 y2 x y)+
return [index for index in indices if (index % 6) < 4]
elif cmd in ['s', 'q']: # s: (x2 y2 x y)+ q: (x1 y1 x y)+
return [index for index in indices if (index % 4) < 2]
return []
|
[
"def",
"controlPoints",
"(",
"cmd",
",",
"data",
")",
":",
"cmd",
"=",
"cmd",
".",
"lower",
"(",
")",
"if",
"cmd",
"in",
"[",
"'c'",
",",
"'s'",
",",
"'q'",
"]",
":",
"indices",
"=",
"range",
"(",
"len",
"(",
"data",
")",
")",
"if",
"cmd",
"==",
"'c'",
":",
"# c: (x1 y1 x2 y2 x y)+",
"return",
"[",
"index",
"for",
"index",
"in",
"indices",
"if",
"(",
"index",
"%",
"6",
")",
"<",
"4",
"]",
"elif",
"cmd",
"in",
"[",
"'s'",
",",
"'q'",
"]",
":",
"# s: (x2 y2 x y)+ q: (x1 y1 x y)+",
"return",
"[",
"index",
"for",
"index",
"in",
"indices",
"if",
"(",
"index",
"%",
"4",
")",
"<",
"2",
"]",
"return",
"[",
"]"
] |
Checks if there are control points in the path data
Returns the indices of all values in the path data which are control points
|
[
"Checks",
"if",
"there",
"are",
"control",
"points",
"in",
"the",
"path",
"data"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2643-L2657
|
scour-project/scour
|
scour/scour.py
|
flags
|
def flags(cmd, data):
"""
Checks if there are flags in the path data
Returns the indices of all values in the path data which are flags
"""
if cmd.lower() == 'a': # a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+
indices = range(len(data))
return [index for index in indices if (index % 7) in [3, 4]]
return []
|
python
|
def flags(cmd, data):
"""
Checks if there are flags in the path data
Returns the indices of all values in the path data which are flags
"""
if cmd.lower() == 'a': # a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+
indices = range(len(data))
return [index for index in indices if (index % 7) in [3, 4]]
return []
|
[
"def",
"flags",
"(",
"cmd",
",",
"data",
")",
":",
"if",
"cmd",
".",
"lower",
"(",
")",
"==",
"'a'",
":",
"# a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+",
"indices",
"=",
"range",
"(",
"len",
"(",
"data",
")",
")",
"return",
"[",
"index",
"for",
"index",
"in",
"indices",
"if",
"(",
"index",
"%",
"7",
")",
"in",
"[",
"3",
",",
"4",
"]",
"]",
"return",
"[",
"]"
] |
Checks if there are flags in the path data
Returns the indices of all values in the path data which are flags
|
[
"Checks",
"if",
"there",
"are",
"flags",
"in",
"the",
"path",
"data"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2660-L2670
|
scour-project/scour
|
scour/scour.py
|
serializePath
|
def serializePath(pathObj, options):
"""
Reserializes the path data with some cleanups.
"""
# elliptical arc commands must have comma/wsp separating the coordinates
# this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754
return ''.join([cmd + scourCoordinates(data, options,
control_points=controlPoints(cmd, data),
flags=flags(cmd, data))
for cmd, data in pathObj])
|
python
|
def serializePath(pathObj, options):
"""
Reserializes the path data with some cleanups.
"""
# elliptical arc commands must have comma/wsp separating the coordinates
# this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754
return ''.join([cmd + scourCoordinates(data, options,
control_points=controlPoints(cmd, data),
flags=flags(cmd, data))
for cmd, data in pathObj])
|
[
"def",
"serializePath",
"(",
"pathObj",
",",
"options",
")",
":",
"# elliptical arc commands must have comma/wsp separating the coordinates",
"# this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754",
"return",
"''",
".",
"join",
"(",
"[",
"cmd",
"+",
"scourCoordinates",
"(",
"data",
",",
"options",
",",
"control_points",
"=",
"controlPoints",
"(",
"cmd",
",",
"data",
")",
",",
"flags",
"=",
"flags",
"(",
"cmd",
",",
"data",
")",
")",
"for",
"cmd",
",",
"data",
"in",
"pathObj",
"]",
")"
] |
Reserializes the path data with some cleanups.
|
[
"Reserializes",
"the",
"path",
"data",
"with",
"some",
"cleanups",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2673-L2682
|
scour-project/scour
|
scour/scour.py
|
serializeTransform
|
def serializeTransform(transformObj):
"""
Reserializes the transform data with some cleanups.
"""
return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')'
for command, numbers in transformObj])
|
python
|
def serializeTransform(transformObj):
"""
Reserializes the transform data with some cleanups.
"""
return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')'
for command, numbers in transformObj])
|
[
"def",
"serializeTransform",
"(",
"transformObj",
")",
":",
"return",
"' '",
".",
"join",
"(",
"[",
"command",
"+",
"'('",
"+",
"' '",
".",
"join",
"(",
"[",
"scourUnitlessLength",
"(",
"number",
")",
"for",
"number",
"in",
"numbers",
"]",
")",
"+",
"')'",
"for",
"command",
",",
"numbers",
"in",
"transformObj",
"]",
")"
] |
Reserializes the transform data with some cleanups.
|
[
"Reserializes",
"the",
"transform",
"data",
"with",
"some",
"cleanups",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2685-L2690
|
scour-project/scour
|
scour/scour.py
|
scourCoordinates
|
def scourCoordinates(data, options, force_whitespace=False, control_points=[], flags=[]):
"""
Serializes coordinate data with some cleanups:
- removes all trailing zeros after the decimal
- integerize coordinates if possible
- removes extraneous whitespace
- adds spaces between values in a subcommand if required (or if force_whitespace is True)
"""
if data is not None:
newData = []
c = 0
previousCoord = ''
for coord in data:
is_control_point = c in control_points
scouredCoord = scourUnitlessLength(coord,
renderer_workaround=options.renderer_workaround,
is_control_point=is_control_point)
# don't output a space if this number starts with a dot (.) or minus sign (-); we only need a space if
# - this number starts with a digit
# - this number starts with a dot but the previous number had *no* dot or exponent
# i.e. '1.3 0.5' -> '1.3.5' or '1e3 0.5' -> '1e3.5' is fine but '123 0.5' -> '123.5' is obviously not
# - 'force_whitespace' is explicitly set to 'True'
# we never need a space after flags (occuring in elliptical arcs), but librsvg struggles without it
if (c > 0
and (force_whitespace
or scouredCoord[0].isdigit()
or (scouredCoord[0] == '.' and not ('.' in previousCoord or 'e' in previousCoord)))
and ((c-1 not in flags) or options.renderer_workaround)):
newData.append(' ')
# add the scoured coordinate to the path string
newData.append(scouredCoord)
previousCoord = scouredCoord
c += 1
# What we need to do to work around GNOME bugs 548494, 563933 and 620565, is to make sure that a dot doesn't
# immediately follow a command (so 'h50' and 'h0.5' are allowed, but not 'h.5').
# Then, we need to add a space character after any coordinates having an 'e' (scientific notation),
# so as to have the exponent separate from the next number.
# TODO: Check whether this is still required (bugs all marked as fixed, might be time to phase it out)
if options.renderer_workaround:
if len(newData) > 0:
for i in range(1, len(newData)):
if newData[i][0] == '-' and 'e' in newData[i - 1]:
newData[i - 1] += ' '
return ''.join(newData)
else:
return ''.join(newData)
return ''
|
python
|
def scourCoordinates(data, options, force_whitespace=False, control_points=[], flags=[]):
"""
Serializes coordinate data with some cleanups:
- removes all trailing zeros after the decimal
- integerize coordinates if possible
- removes extraneous whitespace
- adds spaces between values in a subcommand if required (or if force_whitespace is True)
"""
if data is not None:
newData = []
c = 0
previousCoord = ''
for coord in data:
is_control_point = c in control_points
scouredCoord = scourUnitlessLength(coord,
renderer_workaround=options.renderer_workaround,
is_control_point=is_control_point)
# don't output a space if this number starts with a dot (.) or minus sign (-); we only need a space if
# - this number starts with a digit
# - this number starts with a dot but the previous number had *no* dot or exponent
# i.e. '1.3 0.5' -> '1.3.5' or '1e3 0.5' -> '1e3.5' is fine but '123 0.5' -> '123.5' is obviously not
# - 'force_whitespace' is explicitly set to 'True'
# we never need a space after flags (occuring in elliptical arcs), but librsvg struggles without it
if (c > 0
and (force_whitespace
or scouredCoord[0].isdigit()
or (scouredCoord[0] == '.' and not ('.' in previousCoord or 'e' in previousCoord)))
and ((c-1 not in flags) or options.renderer_workaround)):
newData.append(' ')
# add the scoured coordinate to the path string
newData.append(scouredCoord)
previousCoord = scouredCoord
c += 1
# What we need to do to work around GNOME bugs 548494, 563933 and 620565, is to make sure that a dot doesn't
# immediately follow a command (so 'h50' and 'h0.5' are allowed, but not 'h.5').
# Then, we need to add a space character after any coordinates having an 'e' (scientific notation),
# so as to have the exponent separate from the next number.
# TODO: Check whether this is still required (bugs all marked as fixed, might be time to phase it out)
if options.renderer_workaround:
if len(newData) > 0:
for i in range(1, len(newData)):
if newData[i][0] == '-' and 'e' in newData[i - 1]:
newData[i - 1] += ' '
return ''.join(newData)
else:
return ''.join(newData)
return ''
|
[
"def",
"scourCoordinates",
"(",
"data",
",",
"options",
",",
"force_whitespace",
"=",
"False",
",",
"control_points",
"=",
"[",
"]",
",",
"flags",
"=",
"[",
"]",
")",
":",
"if",
"data",
"is",
"not",
"None",
":",
"newData",
"=",
"[",
"]",
"c",
"=",
"0",
"previousCoord",
"=",
"''",
"for",
"coord",
"in",
"data",
":",
"is_control_point",
"=",
"c",
"in",
"control_points",
"scouredCoord",
"=",
"scourUnitlessLength",
"(",
"coord",
",",
"renderer_workaround",
"=",
"options",
".",
"renderer_workaround",
",",
"is_control_point",
"=",
"is_control_point",
")",
"# don't output a space if this number starts with a dot (.) or minus sign (-); we only need a space if",
"# - this number starts with a digit",
"# - this number starts with a dot but the previous number had *no* dot or exponent",
"# i.e. '1.3 0.5' -> '1.3.5' or '1e3 0.5' -> '1e3.5' is fine but '123 0.5' -> '123.5' is obviously not",
"# - 'force_whitespace' is explicitly set to 'True'",
"# we never need a space after flags (occuring in elliptical arcs), but librsvg struggles without it",
"if",
"(",
"c",
">",
"0",
"and",
"(",
"force_whitespace",
"or",
"scouredCoord",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
"or",
"(",
"scouredCoord",
"[",
"0",
"]",
"==",
"'.'",
"and",
"not",
"(",
"'.'",
"in",
"previousCoord",
"or",
"'e'",
"in",
"previousCoord",
")",
")",
")",
"and",
"(",
"(",
"c",
"-",
"1",
"not",
"in",
"flags",
")",
"or",
"options",
".",
"renderer_workaround",
")",
")",
":",
"newData",
".",
"append",
"(",
"' '",
")",
"# add the scoured coordinate to the path string",
"newData",
".",
"append",
"(",
"scouredCoord",
")",
"previousCoord",
"=",
"scouredCoord",
"c",
"+=",
"1",
"# What we need to do to work around GNOME bugs 548494, 563933 and 620565, is to make sure that a dot doesn't",
"# immediately follow a command (so 'h50' and 'h0.5' are allowed, but not 'h.5').",
"# Then, we need to add a space character after any coordinates having an 'e' (scientific notation),",
"# so as to have the exponent separate from the next number.",
"# TODO: Check whether this is still required (bugs all marked as fixed, might be time to phase it out)",
"if",
"options",
".",
"renderer_workaround",
":",
"if",
"len",
"(",
"newData",
")",
">",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"newData",
")",
")",
":",
"if",
"newData",
"[",
"i",
"]",
"[",
"0",
"]",
"==",
"'-'",
"and",
"'e'",
"in",
"newData",
"[",
"i",
"-",
"1",
"]",
":",
"newData",
"[",
"i",
"-",
"1",
"]",
"+=",
"' '",
"return",
"''",
".",
"join",
"(",
"newData",
")",
"else",
":",
"return",
"''",
".",
"join",
"(",
"newData",
")",
"return",
"''"
] |
Serializes coordinate data with some cleanups:
- removes all trailing zeros after the decimal
- integerize coordinates if possible
- removes extraneous whitespace
- adds spaces between values in a subcommand if required (or if force_whitespace is True)
|
[
"Serializes",
"coordinate",
"data",
"with",
"some",
"cleanups",
":",
"-",
"removes",
"all",
"trailing",
"zeros",
"after",
"the",
"decimal",
"-",
"integerize",
"coordinates",
"if",
"possible",
"-",
"removes",
"extraneous",
"whitespace",
"-",
"adds",
"spaces",
"between",
"values",
"in",
"a",
"subcommand",
"if",
"required",
"(",
"or",
"if",
"force_whitespace",
"is",
"True",
")"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2693-L2742
|
scour-project/scour
|
scour/scour.py
|
scourLength
|
def scourLength(length):
"""
Scours a length. Accepts units.
"""
length = SVGLength(length)
return scourUnitlessLength(length.value) + Unit.str(length.units)
|
python
|
def scourLength(length):
"""
Scours a length. Accepts units.
"""
length = SVGLength(length)
return scourUnitlessLength(length.value) + Unit.str(length.units)
|
[
"def",
"scourLength",
"(",
"length",
")",
":",
"length",
"=",
"SVGLength",
"(",
"length",
")",
"return",
"scourUnitlessLength",
"(",
"length",
".",
"value",
")",
"+",
"Unit",
".",
"str",
"(",
"length",
".",
"units",
")"
] |
Scours a length. Accepts units.
|
[
"Scours",
"a",
"length",
".",
"Accepts",
"units",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2745-L2751
|
scour-project/scour
|
scour/scour.py
|
scourUnitlessLength
|
def scourUnitlessLength(length, renderer_workaround=False, is_control_point=False): # length is of a numeric type
"""
Scours the numeric part of a length only. Does not accept units.
This is faster than scourLength on elements guaranteed not to
contain units.
"""
if not isinstance(length, Decimal):
length = getcontext().create_decimal(str(length))
initial_length = length
# reduce numeric precision
# plus() corresponds to the unary prefix plus operator and applies context precision and rounding
if is_control_point:
length = scouringContextC.plus(length)
else:
length = scouringContext.plus(length)
# remove trailing zeroes as we do not care for significance
intLength = length.to_integral_value()
if length == intLength:
length = Decimal(intLength)
else:
length = length.normalize()
# Gather the non-scientific notation version of the coordinate.
# Re-quantize from the initial value to prevent unnecessary loss of precision
# (e.g. 123.4 should become 123, not 120 or even 100)
nonsci = '{0:f}'.format(length)
nonsci = '{0:f}'.format(initial_length.quantize(Decimal(nonsci)))
if not renderer_workaround:
if len(nonsci) > 2 and nonsci[:2] == '0.':
nonsci = nonsci[1:] # remove the 0, leave the dot
elif len(nonsci) > 3 and nonsci[:3] == '-0.':
nonsci = '-' + nonsci[2:] # remove the 0, leave the minus and dot
return_value = nonsci
# Gather the scientific notation version of the coordinate which
# can only be shorter if the length of the number is at least 4 characters (e.g. 1000 = 1e3).
if len(nonsci) > 3:
# We have to implement this ourselves since both 'normalize()' and 'to_sci_string()'
# don't handle negative exponents in a reasonable way (e.g. 0.000001 remains unchanged)
exponent = length.adjusted() # how far do we have to shift the dot?
length = length.scaleb(-exponent).normalize() # shift the dot and remove potential trailing zeroes
sci = six.text_type(length) + 'e' + six.text_type(exponent)
if len(sci) < len(nonsci):
return_value = sci
return return_value
|
python
|
def scourUnitlessLength(length, renderer_workaround=False, is_control_point=False): # length is of a numeric type
"""
Scours the numeric part of a length only. Does not accept units.
This is faster than scourLength on elements guaranteed not to
contain units.
"""
if not isinstance(length, Decimal):
length = getcontext().create_decimal(str(length))
initial_length = length
# reduce numeric precision
# plus() corresponds to the unary prefix plus operator and applies context precision and rounding
if is_control_point:
length = scouringContextC.plus(length)
else:
length = scouringContext.plus(length)
# remove trailing zeroes as we do not care for significance
intLength = length.to_integral_value()
if length == intLength:
length = Decimal(intLength)
else:
length = length.normalize()
# Gather the non-scientific notation version of the coordinate.
# Re-quantize from the initial value to prevent unnecessary loss of precision
# (e.g. 123.4 should become 123, not 120 or even 100)
nonsci = '{0:f}'.format(length)
nonsci = '{0:f}'.format(initial_length.quantize(Decimal(nonsci)))
if not renderer_workaround:
if len(nonsci) > 2 and nonsci[:2] == '0.':
nonsci = nonsci[1:] # remove the 0, leave the dot
elif len(nonsci) > 3 and nonsci[:3] == '-0.':
nonsci = '-' + nonsci[2:] # remove the 0, leave the minus and dot
return_value = nonsci
# Gather the scientific notation version of the coordinate which
# can only be shorter if the length of the number is at least 4 characters (e.g. 1000 = 1e3).
if len(nonsci) > 3:
# We have to implement this ourselves since both 'normalize()' and 'to_sci_string()'
# don't handle negative exponents in a reasonable way (e.g. 0.000001 remains unchanged)
exponent = length.adjusted() # how far do we have to shift the dot?
length = length.scaleb(-exponent).normalize() # shift the dot and remove potential trailing zeroes
sci = six.text_type(length) + 'e' + six.text_type(exponent)
if len(sci) < len(nonsci):
return_value = sci
return return_value
|
[
"def",
"scourUnitlessLength",
"(",
"length",
",",
"renderer_workaround",
"=",
"False",
",",
"is_control_point",
"=",
"False",
")",
":",
"# length is of a numeric type",
"if",
"not",
"isinstance",
"(",
"length",
",",
"Decimal",
")",
":",
"length",
"=",
"getcontext",
"(",
")",
".",
"create_decimal",
"(",
"str",
"(",
"length",
")",
")",
"initial_length",
"=",
"length",
"# reduce numeric precision",
"# plus() corresponds to the unary prefix plus operator and applies context precision and rounding",
"if",
"is_control_point",
":",
"length",
"=",
"scouringContextC",
".",
"plus",
"(",
"length",
")",
"else",
":",
"length",
"=",
"scouringContext",
".",
"plus",
"(",
"length",
")",
"# remove trailing zeroes as we do not care for significance",
"intLength",
"=",
"length",
".",
"to_integral_value",
"(",
")",
"if",
"length",
"==",
"intLength",
":",
"length",
"=",
"Decimal",
"(",
"intLength",
")",
"else",
":",
"length",
"=",
"length",
".",
"normalize",
"(",
")",
"# Gather the non-scientific notation version of the coordinate.",
"# Re-quantize from the initial value to prevent unnecessary loss of precision",
"# (e.g. 123.4 should become 123, not 120 or even 100)",
"nonsci",
"=",
"'{0:f}'",
".",
"format",
"(",
"length",
")",
"nonsci",
"=",
"'{0:f}'",
".",
"format",
"(",
"initial_length",
".",
"quantize",
"(",
"Decimal",
"(",
"nonsci",
")",
")",
")",
"if",
"not",
"renderer_workaround",
":",
"if",
"len",
"(",
"nonsci",
")",
">",
"2",
"and",
"nonsci",
"[",
":",
"2",
"]",
"==",
"'0.'",
":",
"nonsci",
"=",
"nonsci",
"[",
"1",
":",
"]",
"# remove the 0, leave the dot",
"elif",
"len",
"(",
"nonsci",
")",
">",
"3",
"and",
"nonsci",
"[",
":",
"3",
"]",
"==",
"'-0.'",
":",
"nonsci",
"=",
"'-'",
"+",
"nonsci",
"[",
"2",
":",
"]",
"# remove the 0, leave the minus and dot",
"return_value",
"=",
"nonsci",
"# Gather the scientific notation version of the coordinate which",
"# can only be shorter if the length of the number is at least 4 characters (e.g. 1000 = 1e3).",
"if",
"len",
"(",
"nonsci",
")",
">",
"3",
":",
"# We have to implement this ourselves since both 'normalize()' and 'to_sci_string()'",
"# don't handle negative exponents in a reasonable way (e.g. 0.000001 remains unchanged)",
"exponent",
"=",
"length",
".",
"adjusted",
"(",
")",
"# how far do we have to shift the dot?",
"length",
"=",
"length",
".",
"scaleb",
"(",
"-",
"exponent",
")",
".",
"normalize",
"(",
")",
"# shift the dot and remove potential trailing zeroes",
"sci",
"=",
"six",
".",
"text_type",
"(",
"length",
")",
"+",
"'e'",
"+",
"six",
".",
"text_type",
"(",
"exponent",
")",
"if",
"len",
"(",
"sci",
")",
"<",
"len",
"(",
"nonsci",
")",
":",
"return_value",
"=",
"sci",
"return",
"return_value"
] |
Scours the numeric part of a length only. Does not accept units.
This is faster than scourLength on elements guaranteed not to
contain units.
|
[
"Scours",
"the",
"numeric",
"part",
"of",
"a",
"length",
"only",
".",
"Does",
"not",
"accept",
"units",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2754-L2804
|
scour-project/scour
|
scour/scour.py
|
reducePrecision
|
def reducePrecision(element):
"""
Because opacities, letter spacings, stroke widths and all that don't need
to be preserved in SVG files with 9 digits of precision.
Takes all of these attributes, in the given element node and its children,
and reduces their precision to the current Decimal context's precision.
Also checks for the attributes actually being lengths, not 'inherit', 'none'
or anything that isn't an SVGLength.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
styles = _getStyle(element)
for lengthAttr in ['opacity', 'flood-opacity', 'fill-opacity',
'stroke-opacity', 'stop-opacity', 'stroke-miterlimit',
'stroke-dashoffset', 'letter-spacing', 'word-spacing',
'kerning', 'font-size-adjust', 'font-size',
'stroke-width']:
val = element.getAttribute(lengthAttr)
if val != '':
valLen = SVGLength(val)
if valLen.units != Unit.INVALID: # not an absolute/relative size or inherit, can be % though
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
element.setAttribute(lengthAttr, newVal)
# repeat for attributes hidden in styles
if lengthAttr in styles:
val = styles[lengthAttr]
valLen = SVGLength(val)
if valLen.units != Unit.INVALID:
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
styles[lengthAttr] = newVal
_setStyle(element, styles)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += reducePrecision(child)
return num
|
python
|
def reducePrecision(element):
"""
Because opacities, letter spacings, stroke widths and all that don't need
to be preserved in SVG files with 9 digits of precision.
Takes all of these attributes, in the given element node and its children,
and reduces their precision to the current Decimal context's precision.
Also checks for the attributes actually being lengths, not 'inherit', 'none'
or anything that isn't an SVGLength.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
styles = _getStyle(element)
for lengthAttr in ['opacity', 'flood-opacity', 'fill-opacity',
'stroke-opacity', 'stop-opacity', 'stroke-miterlimit',
'stroke-dashoffset', 'letter-spacing', 'word-spacing',
'kerning', 'font-size-adjust', 'font-size',
'stroke-width']:
val = element.getAttribute(lengthAttr)
if val != '':
valLen = SVGLength(val)
if valLen.units != Unit.INVALID: # not an absolute/relative size or inherit, can be % though
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
element.setAttribute(lengthAttr, newVal)
# repeat for attributes hidden in styles
if lengthAttr in styles:
val = styles[lengthAttr]
valLen = SVGLength(val)
if valLen.units != Unit.INVALID:
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
styles[lengthAttr] = newVal
_setStyle(element, styles)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += reducePrecision(child)
return num
|
[
"def",
"reducePrecision",
"(",
"element",
")",
":",
"num",
"=",
"0",
"styles",
"=",
"_getStyle",
"(",
"element",
")",
"for",
"lengthAttr",
"in",
"[",
"'opacity'",
",",
"'flood-opacity'",
",",
"'fill-opacity'",
",",
"'stroke-opacity'",
",",
"'stop-opacity'",
",",
"'stroke-miterlimit'",
",",
"'stroke-dashoffset'",
",",
"'letter-spacing'",
",",
"'word-spacing'",
",",
"'kerning'",
",",
"'font-size-adjust'",
",",
"'font-size'",
",",
"'stroke-width'",
"]",
":",
"val",
"=",
"element",
".",
"getAttribute",
"(",
"lengthAttr",
")",
"if",
"val",
"!=",
"''",
":",
"valLen",
"=",
"SVGLength",
"(",
"val",
")",
"if",
"valLen",
".",
"units",
"!=",
"Unit",
".",
"INVALID",
":",
"# not an absolute/relative size or inherit, can be % though",
"newVal",
"=",
"scourLength",
"(",
"val",
")",
"if",
"len",
"(",
"newVal",
")",
"<",
"len",
"(",
"val",
")",
":",
"num",
"+=",
"len",
"(",
"val",
")",
"-",
"len",
"(",
"newVal",
")",
"element",
".",
"setAttribute",
"(",
"lengthAttr",
",",
"newVal",
")",
"# repeat for attributes hidden in styles",
"if",
"lengthAttr",
"in",
"styles",
":",
"val",
"=",
"styles",
"[",
"lengthAttr",
"]",
"valLen",
"=",
"SVGLength",
"(",
"val",
")",
"if",
"valLen",
".",
"units",
"!=",
"Unit",
".",
"INVALID",
":",
"newVal",
"=",
"scourLength",
"(",
"val",
")",
"if",
"len",
"(",
"newVal",
")",
"<",
"len",
"(",
"val",
")",
":",
"num",
"+=",
"len",
"(",
"val",
")",
"-",
"len",
"(",
"newVal",
")",
"styles",
"[",
"lengthAttr",
"]",
"=",
"newVal",
"_setStyle",
"(",
"element",
",",
"styles",
")",
"for",
"child",
"in",
"element",
".",
"childNodes",
":",
"if",
"child",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"num",
"+=",
"reducePrecision",
"(",
"child",
")",
"return",
"num"
] |
Because opacities, letter spacings, stroke widths and all that don't need
to be preserved in SVG files with 9 digits of precision.
Takes all of these attributes, in the given element node and its children,
and reduces their precision to the current Decimal context's precision.
Also checks for the attributes actually being lengths, not 'inherit', 'none'
or anything that isn't an SVGLength.
Returns the number of bytes saved after performing these reductions.
|
[
"Because",
"opacities",
"letter",
"spacings",
"stroke",
"widths",
"and",
"all",
"that",
"don",
"t",
"need",
"to",
"be",
"preserved",
"in",
"SVG",
"files",
"with",
"9",
"digits",
"of",
"precision",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2807-L2850
|
scour-project/scour
|
scour/scour.py
|
optimizeAngle
|
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360
elif angle < -90:
angle += 360
return angle
|
python
|
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360
elif angle < -90:
angle += 360
return angle
|
[
"def",
"optimizeAngle",
"(",
"angle",
")",
":",
"# First, we put the new angle in the range ]-360, 360[.",
"# The modulo operator yields results with the sign of the",
"# divisor, so for negative dividends, we preserve the sign",
"# of the angle.",
"if",
"angle",
"<",
"0",
":",
"angle",
"%=",
"-",
"360",
"else",
":",
"angle",
"%=",
"360",
"# 720 degrees is unnecessary, as 360 covers all angles.",
"# As \"-x\" is shorter than \"35x\" and \"-xxx\" one character",
"# longer than positive angles <= 260, we constrain angle",
"# range to [-90, 270[ (or, equally valid: ]-100, 260]).",
"if",
"angle",
">=",
"270",
":",
"angle",
"-=",
"360",
"elif",
"angle",
"<",
"-",
"90",
":",
"angle",
"+=",
"360",
"return",
"angle"
] |
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
|
[
"Because",
"any",
"rotation",
"can",
"be",
"expressed",
"within",
"360",
"degrees",
"of",
"any",
"given",
"number",
"and",
"since",
"negative",
"angles",
"sometimes",
"are",
"one",
"character",
"longer",
"than",
"corresponding",
"positive",
"angle",
"we",
"shorten",
"the",
"number",
"to",
"one",
"in",
"the",
"range",
"to",
"[",
"-",
"90",
"270",
"[",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2853-L2876
|
scour-project/scour
|
scour/scour.py
|
optimizeTransform
|
def optimizeTransform(transform):
"""
Optimises a series of transformations parsed from a single
transform="" attribute.
The transformation list is modified in-place.
"""
# FIXME: reordering these would optimize even more cases:
# first: Fold consecutive runs of the same transformation
# extra: Attempt to cast between types to create sameness:
# "matrix(0 1 -1 0 0 0) rotate(180) scale(-1)" all
# are rotations (90, 180, 180) -- thus "rotate(90)"
# second: Simplify transforms where numbers are optional.
# third: Attempt to simplify any single remaining matrix()
#
# if there's only one transformation and it's a matrix,
# try to make it a shorter non-matrix transformation
# NOTE: as matrix(a b c d e f) in SVG means the matrix:
# |¯ a c e ¯| make constants |¯ A1 A2 A3 ¯|
# | b d f | translating them | B1 B2 B3 |
# |_ 0 0 1 _| to more readable |_ 0 0 1 _|
if len(transform) == 1 and transform[0][0] == 'matrix':
matrix = A1, B1, A2, B2, A3, B3 = transform[0][1]
# |¯ 1 0 0 ¯|
# | 0 1 0 | Identity matrix (no transformation)
# |_ 0 0 1 _|
if matrix == [1, 0, 0, 1, 0, 0]:
del transform[0]
# |¯ 1 0 X ¯|
# | 0 1 Y | Translation by (X, Y).
# |_ 0 0 1 _|
elif (A1 == 1 and A2 == 0
and B1 == 0 and B2 == 1):
transform[0] = ('translate', [A3, B3])
# |¯ X 0 0 ¯|
# | 0 Y 0 | Scaling by (X, Y).
# |_ 0 0 1 _|
elif (A2 == 0 and A3 == 0
and B1 == 0 and B3 == 0):
transform[0] = ('scale', [A1, B2])
# |¯ cos(A) -sin(A) 0 ¯| Rotation by angle A,
# | sin(A) cos(A) 0 | clockwise, about the origin.
# |_ 0 0 1 _| A is in degrees, [-180...180].
elif (A1 == B2 and -1 <= A1 <= 1 and A3 == 0
and -B1 == A2 and -1 <= B1 <= 1 and B3 == 0
# as cos² A + sin² A == 1 and as decimal trig is approximate:
# FIXME: the "epsilon" term here should really be some function
# of the precision of the (sin|cos)_A terms, not 1e-15:
and abs((B1 ** 2) + (A1 ** 2) - 1) < Decimal("1e-15")):
sin_A, cos_A = B1, A1
# while asin(A) and acos(A) both only have an 180° range
# the sign of sin(A) and cos(A) varies across quadrants,
# letting us hone in on the angle the matrix represents:
# -- => < -90 | -+ => -90..0 | ++ => 0..90 | +- => >= 90
#
# http://en.wikipedia.org/wiki/File:Sine_cosine_plot.svg
# shows asin has the correct angle the middle quadrants:
A = Decimal(str(math.degrees(math.asin(float(sin_A)))))
if cos_A < 0: # otherwise needs adjusting from the edges
if sin_A < 0:
A = -180 - A
else:
A = 180 - A
transform[0] = ('rotate', [A])
# Simplify transformations where numbers are optional.
for type, args in transform:
if type == 'translate':
# Only the X coordinate is required for translations.
# If the Y coordinate is unspecified, it's 0.
if len(args) == 2 and args[1] == 0:
del args[1]
elif type == 'rotate':
args[0] = optimizeAngle(args[0]) # angle
# Only the angle is required for rotations.
# If the coordinates are unspecified, it's the origin (0, 0).
if len(args) == 3 and args[1] == args[2] == 0:
del args[1:]
elif type == 'scale':
# Only the X scaling factor is required.
# If the Y factor is unspecified, it's the same as X.
if len(args) == 2 and args[0] == args[1]:
del args[1]
# Attempt to coalesce runs of the same transformation.
# Translations followed immediately by other translations,
# rotations followed immediately by other rotations,
# scaling followed immediately by other scaling,
# are safe to add.
# Identity skewX/skewY are safe to remove, but how do they accrete?
# |¯ 1 0 0 ¯|
# | tan(A) 1 0 | skews X coordinates by angle A
# |_ 0 0 1 _|
#
# |¯ 1 tan(A) 0 ¯|
# | 0 1 0 | skews Y coordinates by angle A
# |_ 0 0 1 _|
#
# FIXME: A matrix followed immediately by another matrix
# would be safe to multiply together, too.
i = 1
while i < len(transform):
currType, currArgs = transform[i]
prevType, prevArgs = transform[i - 1]
if currType == prevType == 'translate':
prevArgs[0] += currArgs[0] # x
# for y, only add if the second translation has an explicit y
if len(currArgs) == 2:
if len(prevArgs) == 2:
prevArgs[1] += currArgs[1] # y
elif len(prevArgs) == 1:
prevArgs.append(currArgs[1]) # y
del transform[i]
if prevArgs[0] == prevArgs[1] == 0:
# Identity translation!
i -= 1
del transform[i]
elif (currType == prevType == 'rotate'
and len(prevArgs) == len(currArgs) == 1):
# Only coalesce if both rotations are from the origin.
prevArgs[0] = optimizeAngle(prevArgs[0] + currArgs[0])
del transform[i]
elif currType == prevType == 'scale':
prevArgs[0] *= currArgs[0] # x
# handle an implicit y
if len(prevArgs) == 2 and len(currArgs) == 2:
# y1 * y2
prevArgs[1] *= currArgs[1]
elif len(prevArgs) == 1 and len(currArgs) == 2:
# create y2 = uniformscalefactor1 * y2
prevArgs.append(prevArgs[0] * currArgs[1])
elif len(prevArgs) == 2 and len(currArgs) == 1:
# y1 * uniformscalefactor2
prevArgs[1] *= currArgs[0]
del transform[i]
# if prevArgs is [1] or [1, 1], then it is effectively an
# identity matrix and can be removed.
if prevArgs[0] == 1 and (len(prevArgs) == 1 or prevArgs[1] == 1):
# Identity scale!
i -= 1
del transform[i]
else:
i += 1
# Some fixups are needed for single-element transformation lists, since
# the loop above was to coalesce elements with their predecessors in the
# list, and thus it required 2 elements.
i = 0
while i < len(transform):
currType, currArgs = transform[i]
if ((currType == 'skewX' or currType == 'skewY')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity skew!
del transform[i]
elif ((currType == 'rotate')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity rotation!
del transform[i]
else:
i += 1
|
python
|
def optimizeTransform(transform):
"""
Optimises a series of transformations parsed from a single
transform="" attribute.
The transformation list is modified in-place.
"""
# FIXME: reordering these would optimize even more cases:
# first: Fold consecutive runs of the same transformation
# extra: Attempt to cast between types to create sameness:
# "matrix(0 1 -1 0 0 0) rotate(180) scale(-1)" all
# are rotations (90, 180, 180) -- thus "rotate(90)"
# second: Simplify transforms where numbers are optional.
# third: Attempt to simplify any single remaining matrix()
#
# if there's only one transformation and it's a matrix,
# try to make it a shorter non-matrix transformation
# NOTE: as matrix(a b c d e f) in SVG means the matrix:
# |¯ a c e ¯| make constants |¯ A1 A2 A3 ¯|
# | b d f | translating them | B1 B2 B3 |
# |_ 0 0 1 _| to more readable |_ 0 0 1 _|
if len(transform) == 1 and transform[0][0] == 'matrix':
matrix = A1, B1, A2, B2, A3, B3 = transform[0][1]
# |¯ 1 0 0 ¯|
# | 0 1 0 | Identity matrix (no transformation)
# |_ 0 0 1 _|
if matrix == [1, 0, 0, 1, 0, 0]:
del transform[0]
# |¯ 1 0 X ¯|
# | 0 1 Y | Translation by (X, Y).
# |_ 0 0 1 _|
elif (A1 == 1 and A2 == 0
and B1 == 0 and B2 == 1):
transform[0] = ('translate', [A3, B3])
# |¯ X 0 0 ¯|
# | 0 Y 0 | Scaling by (X, Y).
# |_ 0 0 1 _|
elif (A2 == 0 and A3 == 0
and B1 == 0 and B3 == 0):
transform[0] = ('scale', [A1, B2])
# |¯ cos(A) -sin(A) 0 ¯| Rotation by angle A,
# | sin(A) cos(A) 0 | clockwise, about the origin.
# |_ 0 0 1 _| A is in degrees, [-180...180].
elif (A1 == B2 and -1 <= A1 <= 1 and A3 == 0
and -B1 == A2 and -1 <= B1 <= 1 and B3 == 0
# as cos² A + sin² A == 1 and as decimal trig is approximate:
# FIXME: the "epsilon" term here should really be some function
# of the precision of the (sin|cos)_A terms, not 1e-15:
and abs((B1 ** 2) + (A1 ** 2) - 1) < Decimal("1e-15")):
sin_A, cos_A = B1, A1
# while asin(A) and acos(A) both only have an 180° range
# the sign of sin(A) and cos(A) varies across quadrants,
# letting us hone in on the angle the matrix represents:
# -- => < -90 | -+ => -90..0 | ++ => 0..90 | +- => >= 90
#
# http://en.wikipedia.org/wiki/File:Sine_cosine_plot.svg
# shows asin has the correct angle the middle quadrants:
A = Decimal(str(math.degrees(math.asin(float(sin_A)))))
if cos_A < 0: # otherwise needs adjusting from the edges
if sin_A < 0:
A = -180 - A
else:
A = 180 - A
transform[0] = ('rotate', [A])
# Simplify transformations where numbers are optional.
for type, args in transform:
if type == 'translate':
# Only the X coordinate is required for translations.
# If the Y coordinate is unspecified, it's 0.
if len(args) == 2 and args[1] == 0:
del args[1]
elif type == 'rotate':
args[0] = optimizeAngle(args[0]) # angle
# Only the angle is required for rotations.
# If the coordinates are unspecified, it's the origin (0, 0).
if len(args) == 3 and args[1] == args[2] == 0:
del args[1:]
elif type == 'scale':
# Only the X scaling factor is required.
# If the Y factor is unspecified, it's the same as X.
if len(args) == 2 and args[0] == args[1]:
del args[1]
# Attempt to coalesce runs of the same transformation.
# Translations followed immediately by other translations,
# rotations followed immediately by other rotations,
# scaling followed immediately by other scaling,
# are safe to add.
# Identity skewX/skewY are safe to remove, but how do they accrete?
# |¯ 1 0 0 ¯|
# | tan(A) 1 0 | skews X coordinates by angle A
# |_ 0 0 1 _|
#
# |¯ 1 tan(A) 0 ¯|
# | 0 1 0 | skews Y coordinates by angle A
# |_ 0 0 1 _|
#
# FIXME: A matrix followed immediately by another matrix
# would be safe to multiply together, too.
i = 1
while i < len(transform):
currType, currArgs = transform[i]
prevType, prevArgs = transform[i - 1]
if currType == prevType == 'translate':
prevArgs[0] += currArgs[0] # x
# for y, only add if the second translation has an explicit y
if len(currArgs) == 2:
if len(prevArgs) == 2:
prevArgs[1] += currArgs[1] # y
elif len(prevArgs) == 1:
prevArgs.append(currArgs[1]) # y
del transform[i]
if prevArgs[0] == prevArgs[1] == 0:
# Identity translation!
i -= 1
del transform[i]
elif (currType == prevType == 'rotate'
and len(prevArgs) == len(currArgs) == 1):
# Only coalesce if both rotations are from the origin.
prevArgs[0] = optimizeAngle(prevArgs[0] + currArgs[0])
del transform[i]
elif currType == prevType == 'scale':
prevArgs[0] *= currArgs[0] # x
# handle an implicit y
if len(prevArgs) == 2 and len(currArgs) == 2:
# y1 * y2
prevArgs[1] *= currArgs[1]
elif len(prevArgs) == 1 and len(currArgs) == 2:
# create y2 = uniformscalefactor1 * y2
prevArgs.append(prevArgs[0] * currArgs[1])
elif len(prevArgs) == 2 and len(currArgs) == 1:
# y1 * uniformscalefactor2
prevArgs[1] *= currArgs[0]
del transform[i]
# if prevArgs is [1] or [1, 1], then it is effectively an
# identity matrix and can be removed.
if prevArgs[0] == 1 and (len(prevArgs) == 1 or prevArgs[1] == 1):
# Identity scale!
i -= 1
del transform[i]
else:
i += 1
# Some fixups are needed for single-element transformation lists, since
# the loop above was to coalesce elements with their predecessors in the
# list, and thus it required 2 elements.
i = 0
while i < len(transform):
currType, currArgs = transform[i]
if ((currType == 'skewX' or currType == 'skewY')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity skew!
del transform[i]
elif ((currType == 'rotate')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity rotation!
del transform[i]
else:
i += 1
|
[
"def",
"optimizeTransform",
"(",
"transform",
")",
":",
"# FIXME: reordering these would optimize even more cases:",
"# first: Fold consecutive runs of the same transformation",
"# extra: Attempt to cast between types to create sameness:",
"# \"matrix(0 1 -1 0 0 0) rotate(180) scale(-1)\" all",
"# are rotations (90, 180, 180) -- thus \"rotate(90)\"",
"# second: Simplify transforms where numbers are optional.",
"# third: Attempt to simplify any single remaining matrix()",
"#",
"# if there's only one transformation and it's a matrix,",
"# try to make it a shorter non-matrix transformation",
"# NOTE: as matrix(a b c d e f) in SVG means the matrix:",
"# |¯ a c e ¯| make constants |¯ A1 A2 A3 ¯|",
"# | b d f | translating them | B1 B2 B3 |",
"# |_ 0 0 1 _| to more readable |_ 0 0 1 _|",
"if",
"len",
"(",
"transform",
")",
"==",
"1",
"and",
"transform",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'matrix'",
":",
"matrix",
"=",
"A1",
",",
"B1",
",",
"A2",
",",
"B2",
",",
"A3",
",",
"B3",
"=",
"transform",
"[",
"0",
"]",
"[",
"1",
"]",
"# |¯ 1 0 0 ¯|",
"# | 0 1 0 | Identity matrix (no transformation)",
"# |_ 0 0 1 _|",
"if",
"matrix",
"==",
"[",
"1",
",",
"0",
",",
"0",
",",
"1",
",",
"0",
",",
"0",
"]",
":",
"del",
"transform",
"[",
"0",
"]",
"# |¯ 1 0 X ¯|",
"# | 0 1 Y | Translation by (X, Y).",
"# |_ 0 0 1 _|",
"elif",
"(",
"A1",
"==",
"1",
"and",
"A2",
"==",
"0",
"and",
"B1",
"==",
"0",
"and",
"B2",
"==",
"1",
")",
":",
"transform",
"[",
"0",
"]",
"=",
"(",
"'translate'",
",",
"[",
"A3",
",",
"B3",
"]",
")",
"# |¯ X 0 0 ¯|",
"# | 0 Y 0 | Scaling by (X, Y).",
"# |_ 0 0 1 _|",
"elif",
"(",
"A2",
"==",
"0",
"and",
"A3",
"==",
"0",
"and",
"B1",
"==",
"0",
"and",
"B3",
"==",
"0",
")",
":",
"transform",
"[",
"0",
"]",
"=",
"(",
"'scale'",
",",
"[",
"A1",
",",
"B2",
"]",
")",
"# |¯ cos(A) -sin(A) 0 ¯| Rotation by angle A,",
"# | sin(A) cos(A) 0 | clockwise, about the origin.",
"# |_ 0 0 1 _| A is in degrees, [-180...180].",
"elif",
"(",
"A1",
"==",
"B2",
"and",
"-",
"1",
"<=",
"A1",
"<=",
"1",
"and",
"A3",
"==",
"0",
"and",
"-",
"B1",
"==",
"A2",
"and",
"-",
"1",
"<=",
"B1",
"<=",
"1",
"and",
"B3",
"==",
"0",
"# as cos² A + sin² A == 1 and as decimal trig is approximate:",
"# FIXME: the \"epsilon\" term here should really be some function",
"# of the precision of the (sin|cos)_A terms, not 1e-15:",
"and",
"abs",
"(",
"(",
"B1",
"**",
"2",
")",
"+",
"(",
"A1",
"**",
"2",
")",
"-",
"1",
")",
"<",
"Decimal",
"(",
"\"1e-15\"",
")",
")",
":",
"sin_A",
",",
"cos_A",
"=",
"B1",
",",
"A1",
"# while asin(A) and acos(A) both only have an 180° range",
"# the sign of sin(A) and cos(A) varies across quadrants,",
"# letting us hone in on the angle the matrix represents:",
"# -- => < -90 | -+ => -90..0 | ++ => 0..90 | +- => >= 90",
"#",
"# http://en.wikipedia.org/wiki/File:Sine_cosine_plot.svg",
"# shows asin has the correct angle the middle quadrants:",
"A",
"=",
"Decimal",
"(",
"str",
"(",
"math",
".",
"degrees",
"(",
"math",
".",
"asin",
"(",
"float",
"(",
"sin_A",
")",
")",
")",
")",
")",
"if",
"cos_A",
"<",
"0",
":",
"# otherwise needs adjusting from the edges",
"if",
"sin_A",
"<",
"0",
":",
"A",
"=",
"-",
"180",
"-",
"A",
"else",
":",
"A",
"=",
"180",
"-",
"A",
"transform",
"[",
"0",
"]",
"=",
"(",
"'rotate'",
",",
"[",
"A",
"]",
")",
"# Simplify transformations where numbers are optional.",
"for",
"type",
",",
"args",
"in",
"transform",
":",
"if",
"type",
"==",
"'translate'",
":",
"# Only the X coordinate is required for translations.",
"# If the Y coordinate is unspecified, it's 0.",
"if",
"len",
"(",
"args",
")",
"==",
"2",
"and",
"args",
"[",
"1",
"]",
"==",
"0",
":",
"del",
"args",
"[",
"1",
"]",
"elif",
"type",
"==",
"'rotate'",
":",
"args",
"[",
"0",
"]",
"=",
"optimizeAngle",
"(",
"args",
"[",
"0",
"]",
")",
"# angle",
"# Only the angle is required for rotations.",
"# If the coordinates are unspecified, it's the origin (0, 0).",
"if",
"len",
"(",
"args",
")",
"==",
"3",
"and",
"args",
"[",
"1",
"]",
"==",
"args",
"[",
"2",
"]",
"==",
"0",
":",
"del",
"args",
"[",
"1",
":",
"]",
"elif",
"type",
"==",
"'scale'",
":",
"# Only the X scaling factor is required.",
"# If the Y factor is unspecified, it's the same as X.",
"if",
"len",
"(",
"args",
")",
"==",
"2",
"and",
"args",
"[",
"0",
"]",
"==",
"args",
"[",
"1",
"]",
":",
"del",
"args",
"[",
"1",
"]",
"# Attempt to coalesce runs of the same transformation.",
"# Translations followed immediately by other translations,",
"# rotations followed immediately by other rotations,",
"# scaling followed immediately by other scaling,",
"# are safe to add.",
"# Identity skewX/skewY are safe to remove, but how do they accrete?",
"# |¯ 1 0 0 ¯|",
"# | tan(A) 1 0 | skews X coordinates by angle A",
"# |_ 0 0 1 _|",
"#",
"# |¯ 1 tan(A) 0 ¯|",
"# | 0 1 0 | skews Y coordinates by angle A",
"# |_ 0 0 1 _|",
"#",
"# FIXME: A matrix followed immediately by another matrix",
"# would be safe to multiply together, too.",
"i",
"=",
"1",
"while",
"i",
"<",
"len",
"(",
"transform",
")",
":",
"currType",
",",
"currArgs",
"=",
"transform",
"[",
"i",
"]",
"prevType",
",",
"prevArgs",
"=",
"transform",
"[",
"i",
"-",
"1",
"]",
"if",
"currType",
"==",
"prevType",
"==",
"'translate'",
":",
"prevArgs",
"[",
"0",
"]",
"+=",
"currArgs",
"[",
"0",
"]",
"# x",
"# for y, only add if the second translation has an explicit y",
"if",
"len",
"(",
"currArgs",
")",
"==",
"2",
":",
"if",
"len",
"(",
"prevArgs",
")",
"==",
"2",
":",
"prevArgs",
"[",
"1",
"]",
"+=",
"currArgs",
"[",
"1",
"]",
"# y",
"elif",
"len",
"(",
"prevArgs",
")",
"==",
"1",
":",
"prevArgs",
".",
"append",
"(",
"currArgs",
"[",
"1",
"]",
")",
"# y",
"del",
"transform",
"[",
"i",
"]",
"if",
"prevArgs",
"[",
"0",
"]",
"==",
"prevArgs",
"[",
"1",
"]",
"==",
"0",
":",
"# Identity translation!",
"i",
"-=",
"1",
"del",
"transform",
"[",
"i",
"]",
"elif",
"(",
"currType",
"==",
"prevType",
"==",
"'rotate'",
"and",
"len",
"(",
"prevArgs",
")",
"==",
"len",
"(",
"currArgs",
")",
"==",
"1",
")",
":",
"# Only coalesce if both rotations are from the origin.",
"prevArgs",
"[",
"0",
"]",
"=",
"optimizeAngle",
"(",
"prevArgs",
"[",
"0",
"]",
"+",
"currArgs",
"[",
"0",
"]",
")",
"del",
"transform",
"[",
"i",
"]",
"elif",
"currType",
"==",
"prevType",
"==",
"'scale'",
":",
"prevArgs",
"[",
"0",
"]",
"*=",
"currArgs",
"[",
"0",
"]",
"# x",
"# handle an implicit y",
"if",
"len",
"(",
"prevArgs",
")",
"==",
"2",
"and",
"len",
"(",
"currArgs",
")",
"==",
"2",
":",
"# y1 * y2",
"prevArgs",
"[",
"1",
"]",
"*=",
"currArgs",
"[",
"1",
"]",
"elif",
"len",
"(",
"prevArgs",
")",
"==",
"1",
"and",
"len",
"(",
"currArgs",
")",
"==",
"2",
":",
"# create y2 = uniformscalefactor1 * y2",
"prevArgs",
".",
"append",
"(",
"prevArgs",
"[",
"0",
"]",
"*",
"currArgs",
"[",
"1",
"]",
")",
"elif",
"len",
"(",
"prevArgs",
")",
"==",
"2",
"and",
"len",
"(",
"currArgs",
")",
"==",
"1",
":",
"# y1 * uniformscalefactor2",
"prevArgs",
"[",
"1",
"]",
"*=",
"currArgs",
"[",
"0",
"]",
"del",
"transform",
"[",
"i",
"]",
"# if prevArgs is [1] or [1, 1], then it is effectively an",
"# identity matrix and can be removed.",
"if",
"prevArgs",
"[",
"0",
"]",
"==",
"1",
"and",
"(",
"len",
"(",
"prevArgs",
")",
"==",
"1",
"or",
"prevArgs",
"[",
"1",
"]",
"==",
"1",
")",
":",
"# Identity scale!",
"i",
"-=",
"1",
"del",
"transform",
"[",
"i",
"]",
"else",
":",
"i",
"+=",
"1",
"# Some fixups are needed for single-element transformation lists, since",
"# the loop above was to coalesce elements with their predecessors in the",
"# list, and thus it required 2 elements.",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"transform",
")",
":",
"currType",
",",
"currArgs",
"=",
"transform",
"[",
"i",
"]",
"if",
"(",
"(",
"currType",
"==",
"'skewX'",
"or",
"currType",
"==",
"'skewY'",
")",
"and",
"len",
"(",
"currArgs",
")",
"==",
"1",
"and",
"currArgs",
"[",
"0",
"]",
"==",
"0",
")",
":",
"# Identity skew!",
"del",
"transform",
"[",
"i",
"]",
"elif",
"(",
"(",
"currType",
"==",
"'rotate'",
")",
"and",
"len",
"(",
"currArgs",
")",
"==",
"1",
"and",
"currArgs",
"[",
"0",
"]",
"==",
"0",
")",
":",
"# Identity rotation!",
"del",
"transform",
"[",
"i",
"]",
"else",
":",
"i",
"+=",
"1"
] |
Optimises a series of transformations parsed from a single
transform="" attribute.
The transformation list is modified in-place.
|
[
"Optimises",
"a",
"series",
"of",
"transformations",
"parsed",
"from",
"a",
"single",
"transform",
"=",
"attribute",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2879-L3038
|
scour-project/scour
|
scour/scour.py
|
optimizeTransforms
|
def optimizeTransforms(element, options):
"""
Attempts to optimise transform specifications on the given node and its children.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
for transformAttr in ['transform', 'patternTransform', 'gradientTransform']:
val = element.getAttribute(transformAttr)
if val != '':
transform = svg_transform_parser.parse(val)
optimizeTransform(transform)
newVal = serializeTransform(transform)
if len(newVal) < len(val):
if len(newVal):
element.setAttribute(transformAttr, newVal)
else:
element.removeAttribute(transformAttr)
num += len(val) - len(newVal)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += optimizeTransforms(child, options)
return num
|
python
|
def optimizeTransforms(element, options):
"""
Attempts to optimise transform specifications on the given node and its children.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
for transformAttr in ['transform', 'patternTransform', 'gradientTransform']:
val = element.getAttribute(transformAttr)
if val != '':
transform = svg_transform_parser.parse(val)
optimizeTransform(transform)
newVal = serializeTransform(transform)
if len(newVal) < len(val):
if len(newVal):
element.setAttribute(transformAttr, newVal)
else:
element.removeAttribute(transformAttr)
num += len(val) - len(newVal)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += optimizeTransforms(child, options)
return num
|
[
"def",
"optimizeTransforms",
"(",
"element",
",",
"options",
")",
":",
"num",
"=",
"0",
"for",
"transformAttr",
"in",
"[",
"'transform'",
",",
"'patternTransform'",
",",
"'gradientTransform'",
"]",
":",
"val",
"=",
"element",
".",
"getAttribute",
"(",
"transformAttr",
")",
"if",
"val",
"!=",
"''",
":",
"transform",
"=",
"svg_transform_parser",
".",
"parse",
"(",
"val",
")",
"optimizeTransform",
"(",
"transform",
")",
"newVal",
"=",
"serializeTransform",
"(",
"transform",
")",
"if",
"len",
"(",
"newVal",
")",
"<",
"len",
"(",
"val",
")",
":",
"if",
"len",
"(",
"newVal",
")",
":",
"element",
".",
"setAttribute",
"(",
"transformAttr",
",",
"newVal",
")",
"else",
":",
"element",
".",
"removeAttribute",
"(",
"transformAttr",
")",
"num",
"+=",
"len",
"(",
"val",
")",
"-",
"len",
"(",
"newVal",
")",
"for",
"child",
"in",
"element",
".",
"childNodes",
":",
"if",
"child",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"num",
"+=",
"optimizeTransforms",
"(",
"child",
",",
"options",
")",
"return",
"num"
] |
Attempts to optimise transform specifications on the given node and its children.
Returns the number of bytes saved after performing these reductions.
|
[
"Attempts",
"to",
"optimise",
"transform",
"specifications",
"on",
"the",
"given",
"node",
"and",
"its",
"children",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L3041-L3069
|
scour-project/scour
|
scour/scour.py
|
removeComments
|
def removeComments(element):
"""
Removes comments from the element and its children.
"""
global _num_bytes_saved_in_comments
num = 0
if isinstance(element, xml.dom.minidom.Comment):
_num_bytes_saved_in_comments += len(element.data)
element.parentNode.removeChild(element)
num += 1
else:
for subelement in element.childNodes[:]:
num += removeComments(subelement)
return num
|
python
|
def removeComments(element):
"""
Removes comments from the element and its children.
"""
global _num_bytes_saved_in_comments
num = 0
if isinstance(element, xml.dom.minidom.Comment):
_num_bytes_saved_in_comments += len(element.data)
element.parentNode.removeChild(element)
num += 1
else:
for subelement in element.childNodes[:]:
num += removeComments(subelement)
return num
|
[
"def",
"removeComments",
"(",
"element",
")",
":",
"global",
"_num_bytes_saved_in_comments",
"num",
"=",
"0",
"if",
"isinstance",
"(",
"element",
",",
"xml",
".",
"dom",
".",
"minidom",
".",
"Comment",
")",
":",
"_num_bytes_saved_in_comments",
"+=",
"len",
"(",
"element",
".",
"data",
")",
"element",
".",
"parentNode",
".",
"removeChild",
"(",
"element",
")",
"num",
"+=",
"1",
"else",
":",
"for",
"subelement",
"in",
"element",
".",
"childNodes",
"[",
":",
"]",
":",
"num",
"+=",
"removeComments",
"(",
"subelement",
")",
"return",
"num"
] |
Removes comments from the element and its children.
|
[
"Removes",
"comments",
"from",
"the",
"element",
"and",
"its",
"children",
"."
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L3072-L3087
|
scour-project/scour
|
scour/scour.py
|
embedRasters
|
def embedRasters(element, options):
import base64
"""
Converts raster references to inline images.
NOTE: there are size limits to base64-encoding handling in browsers
"""
global _num_rasters_embedded
href = element.getAttributeNS(NS['XLINK'], 'href')
# if xlink:href is set, then grab the id
if href != '' and len(href) > 1:
ext = os.path.splitext(os.path.basename(href))[1].lower()[1:]
# only operate on files with 'png', 'jpg', and 'gif' file extensions
if ext in ['png', 'jpg', 'gif']:
# fix common issues with file paths
# TODO: should we warn the user instead of trying to correct those invalid URIs?
# convert backslashes to slashes
href_fixed = href.replace('\\', '/')
# absolute 'file:' URIs have to use three slashes (unless specifying a host which I've never seen)
href_fixed = re.sub('file:/+', 'file:///', href_fixed)
# parse the URI to get scheme and path
# in principle it would make sense to work only with this ParseResult and call 'urlunparse()' in the end
# however 'urlunparse(urlparse(file:raster.png))' -> 'file:///raster.png' which is nonsense
parsed_href = urllib.parse.urlparse(href_fixed)
# assume locations without protocol point to local files (and should use the 'file:' protocol)
if parsed_href.scheme == '':
parsed_href = parsed_href._replace(scheme='file')
if href_fixed[0] == '/':
href_fixed = 'file://' + href_fixed
else:
href_fixed = 'file:' + href_fixed
# relative local paths are relative to the input file, therefore temporarily change the working dir
working_dir_old = None
if parsed_href.scheme == 'file' and parsed_href.path[0] != '/':
if options.infilename:
working_dir_old = os.getcwd()
working_dir_new = os.path.abspath(os.path.dirname(options.infilename))
os.chdir(working_dir_new)
# open/download the file
try:
file = urllib.request.urlopen(href_fixed)
rasterdata = file.read()
file.close()
except Exception as e:
print("WARNING: Could not open file '" + href + "' for embedding. "
"The raster image will be kept as a reference but might be invalid. "
"(Exception details: " + str(e) + ")", file=options.ensure_value("stdout", sys.stdout))
rasterdata = ''
finally:
# always restore initial working directory if we changed it above
if working_dir_old is not None:
os.chdir(working_dir_old)
# TODO: should we remove all images which don't resolve?
# then we also have to consider unreachable remote locations (i.e. if there is no internet connection)
if rasterdata != '':
# base64-encode raster
b64eRaster = base64.b64encode(rasterdata)
# set href attribute to base64-encoded equivalent
if b64eRaster != '':
# PNG and GIF both have MIME Type 'image/[ext]', but
# JPEG has MIME Type 'image/jpeg'
if ext == 'jpg':
ext = 'jpeg'
element.setAttributeNS(NS['XLINK'], 'href',
'data:image/' + ext + ';base64,' + b64eRaster.decode())
_num_rasters_embedded += 1
del b64eRaster
|
python
|
def embedRasters(element, options):
import base64
"""
Converts raster references to inline images.
NOTE: there are size limits to base64-encoding handling in browsers
"""
global _num_rasters_embedded
href = element.getAttributeNS(NS['XLINK'], 'href')
# if xlink:href is set, then grab the id
if href != '' and len(href) > 1:
ext = os.path.splitext(os.path.basename(href))[1].lower()[1:]
# only operate on files with 'png', 'jpg', and 'gif' file extensions
if ext in ['png', 'jpg', 'gif']:
# fix common issues with file paths
# TODO: should we warn the user instead of trying to correct those invalid URIs?
# convert backslashes to slashes
href_fixed = href.replace('\\', '/')
# absolute 'file:' URIs have to use three slashes (unless specifying a host which I've never seen)
href_fixed = re.sub('file:/+', 'file:///', href_fixed)
# parse the URI to get scheme and path
# in principle it would make sense to work only with this ParseResult and call 'urlunparse()' in the end
# however 'urlunparse(urlparse(file:raster.png))' -> 'file:///raster.png' which is nonsense
parsed_href = urllib.parse.urlparse(href_fixed)
# assume locations without protocol point to local files (and should use the 'file:' protocol)
if parsed_href.scheme == '':
parsed_href = parsed_href._replace(scheme='file')
if href_fixed[0] == '/':
href_fixed = 'file://' + href_fixed
else:
href_fixed = 'file:' + href_fixed
# relative local paths are relative to the input file, therefore temporarily change the working dir
working_dir_old = None
if parsed_href.scheme == 'file' and parsed_href.path[0] != '/':
if options.infilename:
working_dir_old = os.getcwd()
working_dir_new = os.path.abspath(os.path.dirname(options.infilename))
os.chdir(working_dir_new)
# open/download the file
try:
file = urllib.request.urlopen(href_fixed)
rasterdata = file.read()
file.close()
except Exception as e:
print("WARNING: Could not open file '" + href + "' for embedding. "
"The raster image will be kept as a reference but might be invalid. "
"(Exception details: " + str(e) + ")", file=options.ensure_value("stdout", sys.stdout))
rasterdata = ''
finally:
# always restore initial working directory if we changed it above
if working_dir_old is not None:
os.chdir(working_dir_old)
# TODO: should we remove all images which don't resolve?
# then we also have to consider unreachable remote locations (i.e. if there is no internet connection)
if rasterdata != '':
# base64-encode raster
b64eRaster = base64.b64encode(rasterdata)
# set href attribute to base64-encoded equivalent
if b64eRaster != '':
# PNG and GIF both have MIME Type 'image/[ext]', but
# JPEG has MIME Type 'image/jpeg'
if ext == 'jpg':
ext = 'jpeg'
element.setAttributeNS(NS['XLINK'], 'href',
'data:image/' + ext + ';base64,' + b64eRaster.decode())
_num_rasters_embedded += 1
del b64eRaster
|
[
"def",
"embedRasters",
"(",
"element",
",",
"options",
")",
":",
"import",
"base64",
"global",
"_num_rasters_embedded",
"href",
"=",
"element",
".",
"getAttributeNS",
"(",
"NS",
"[",
"'XLINK'",
"]",
",",
"'href'",
")",
"# if xlink:href is set, then grab the id",
"if",
"href",
"!=",
"''",
"and",
"len",
"(",
"href",
")",
">",
"1",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"href",
")",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"[",
"1",
":",
"]",
"# only operate on files with 'png', 'jpg', and 'gif' file extensions",
"if",
"ext",
"in",
"[",
"'png'",
",",
"'jpg'",
",",
"'gif'",
"]",
":",
"# fix common issues with file paths",
"# TODO: should we warn the user instead of trying to correct those invalid URIs?",
"# convert backslashes to slashes",
"href_fixed",
"=",
"href",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"# absolute 'file:' URIs have to use three slashes (unless specifying a host which I've never seen)",
"href_fixed",
"=",
"re",
".",
"sub",
"(",
"'file:/+'",
",",
"'file:///'",
",",
"href_fixed",
")",
"# parse the URI to get scheme and path",
"# in principle it would make sense to work only with this ParseResult and call 'urlunparse()' in the end",
"# however 'urlunparse(urlparse(file:raster.png))' -> 'file:///raster.png' which is nonsense",
"parsed_href",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"href_fixed",
")",
"# assume locations without protocol point to local files (and should use the 'file:' protocol)",
"if",
"parsed_href",
".",
"scheme",
"==",
"''",
":",
"parsed_href",
"=",
"parsed_href",
".",
"_replace",
"(",
"scheme",
"=",
"'file'",
")",
"if",
"href_fixed",
"[",
"0",
"]",
"==",
"'/'",
":",
"href_fixed",
"=",
"'file://'",
"+",
"href_fixed",
"else",
":",
"href_fixed",
"=",
"'file:'",
"+",
"href_fixed",
"# relative local paths are relative to the input file, therefore temporarily change the working dir",
"working_dir_old",
"=",
"None",
"if",
"parsed_href",
".",
"scheme",
"==",
"'file'",
"and",
"parsed_href",
".",
"path",
"[",
"0",
"]",
"!=",
"'/'",
":",
"if",
"options",
".",
"infilename",
":",
"working_dir_old",
"=",
"os",
".",
"getcwd",
"(",
")",
"working_dir_new",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"options",
".",
"infilename",
")",
")",
"os",
".",
"chdir",
"(",
"working_dir_new",
")",
"# open/download the file",
"try",
":",
"file",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"href_fixed",
")",
"rasterdata",
"=",
"file",
".",
"read",
"(",
")",
"file",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"WARNING: Could not open file '\"",
"+",
"href",
"+",
"\"' for embedding. \"",
"\"The raster image will be kept as a reference but might be invalid. \"",
"\"(Exception details: \"",
"+",
"str",
"(",
"e",
")",
"+",
"\")\"",
",",
"file",
"=",
"options",
".",
"ensure_value",
"(",
"\"stdout\"",
",",
"sys",
".",
"stdout",
")",
")",
"rasterdata",
"=",
"''",
"finally",
":",
"# always restore initial working directory if we changed it above",
"if",
"working_dir_old",
"is",
"not",
"None",
":",
"os",
".",
"chdir",
"(",
"working_dir_old",
")",
"# TODO: should we remove all images which don't resolve?",
"# then we also have to consider unreachable remote locations (i.e. if there is no internet connection)",
"if",
"rasterdata",
"!=",
"''",
":",
"# base64-encode raster",
"b64eRaster",
"=",
"base64",
".",
"b64encode",
"(",
"rasterdata",
")",
"# set href attribute to base64-encoded equivalent",
"if",
"b64eRaster",
"!=",
"''",
":",
"# PNG and GIF both have MIME Type 'image/[ext]', but",
"# JPEG has MIME Type 'image/jpeg'",
"if",
"ext",
"==",
"'jpg'",
":",
"ext",
"=",
"'jpeg'",
"element",
".",
"setAttributeNS",
"(",
"NS",
"[",
"'XLINK'",
"]",
",",
"'href'",
",",
"'data:image/'",
"+",
"ext",
"+",
"';base64,'",
"+",
"b64eRaster",
".",
"decode",
"(",
")",
")",
"_num_rasters_embedded",
"+=",
"1",
"del",
"b64eRaster"
] |
Converts raster references to inline images.
NOTE: there are size limits to base64-encoding handling in browsers
|
[
"Converts",
"raster",
"references",
"to",
"inline",
"images",
".",
"NOTE",
":",
"there",
"are",
"size",
"limits",
"to",
"base64",
"-",
"encoding",
"handling",
"in",
"browsers"
] |
train
|
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L3090-L3165
|
tym-xqo/nerium
|
nerium/formatter.py
|
get_format
|
def get_format(format_):
""" Find format schema in $FORMAT_PATH or nerium/schema
"""
format_path = os.getenv('FORMAT_PATH', 'format_files')
try:
spec = importlib.util.spec_from_file_location(
"format_mod", f"{format_path}/{format_}.py")
format_mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(format_mod)
except FileNotFoundError:
try:
format_mod = import_module(f'nerium.schema.{format_}')
except ModuleNotFoundError:
format_mod = import_module('nerium.schema.default')
schema = format_mod.ResultSchema()
return schema
|
python
|
def get_format(format_):
""" Find format schema in $FORMAT_PATH or nerium/schema
"""
format_path = os.getenv('FORMAT_PATH', 'format_files')
try:
spec = importlib.util.spec_from_file_location(
"format_mod", f"{format_path}/{format_}.py")
format_mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(format_mod)
except FileNotFoundError:
try:
format_mod = import_module(f'nerium.schema.{format_}')
except ModuleNotFoundError:
format_mod = import_module('nerium.schema.default')
schema = format_mod.ResultSchema()
return schema
|
[
"def",
"get_format",
"(",
"format_",
")",
":",
"format_path",
"=",
"os",
".",
"getenv",
"(",
"'FORMAT_PATH'",
",",
"'format_files'",
")",
"try",
":",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"\"format_mod\"",
",",
"f\"{format_path}/{format_}.py\"",
")",
"format_mod",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"spec",
")",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"format_mod",
")",
"except",
"FileNotFoundError",
":",
"try",
":",
"format_mod",
"=",
"import_module",
"(",
"f'nerium.schema.{format_}'",
")",
"except",
"ModuleNotFoundError",
":",
"format_mod",
"=",
"import_module",
"(",
"'nerium.schema.default'",
")",
"schema",
"=",
"format_mod",
".",
"ResultSchema",
"(",
")",
"return",
"schema"
] |
Find format schema in $FORMAT_PATH or nerium/schema
|
[
"Find",
"format",
"schema",
"in",
"$FORMAT_PATH",
"or",
"nerium",
"/",
"schema"
] |
train
|
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/formatter.py#L6-L21
|
vedvyas/doxytag2zealdb
|
doxytag2zealdb/propertylist.py
|
DoxygenPropertyList.set_property
|
def set_property(self, key, value):
'''Set a new (or updating existing) key value pair.
Args:
key: A string containing the key namespace
value: A str, int, or bool value
Raises:
NotImplementedError: an unsupported value-type was provided
'''
value_type = type(value)
if value_type not in [str, int, bool]:
raise NotImplementedError(
'Only string, integer, and boolean properties are implemented')
key_object = self.properties.findChild(name='key', text=key)
# Key (and value, if it's a valid property list) don't exist
if key_object is None:
key_object = self.soup.new_tag('key')
key_object.string = key
self.properties.append(key_object)
value_object = self.soup.new_tag(
{str: 'string', int: 'integer', bool: str(value).lower()}[
value_type])
if value_type is not bool:
value_object.string = str(value)
self.properties.append(value_object)
return
# Key (and value, if it's a valid property list) exist
# Eh, just remove the key+value tags from the tree and re-add them
# (with the new value)
value_object = key_object.find_next_sibling()
key_object.decompose()
value_object.decompose()
self.set_property(key, value)
|
python
|
def set_property(self, key, value):
'''Set a new (or updating existing) key value pair.
Args:
key: A string containing the key namespace
value: A str, int, or bool value
Raises:
NotImplementedError: an unsupported value-type was provided
'''
value_type = type(value)
if value_type not in [str, int, bool]:
raise NotImplementedError(
'Only string, integer, and boolean properties are implemented')
key_object = self.properties.findChild(name='key', text=key)
# Key (and value, if it's a valid property list) don't exist
if key_object is None:
key_object = self.soup.new_tag('key')
key_object.string = key
self.properties.append(key_object)
value_object = self.soup.new_tag(
{str: 'string', int: 'integer', bool: str(value).lower()}[
value_type])
if value_type is not bool:
value_object.string = str(value)
self.properties.append(value_object)
return
# Key (and value, if it's a valid property list) exist
# Eh, just remove the key+value tags from the tree and re-add them
# (with the new value)
value_object = key_object.find_next_sibling()
key_object.decompose()
value_object.decompose()
self.set_property(key, value)
|
[
"def",
"set_property",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"value_type",
"=",
"type",
"(",
"value",
")",
"if",
"value_type",
"not",
"in",
"[",
"str",
",",
"int",
",",
"bool",
"]",
":",
"raise",
"NotImplementedError",
"(",
"'Only string, integer, and boolean properties are implemented'",
")",
"key_object",
"=",
"self",
".",
"properties",
".",
"findChild",
"(",
"name",
"=",
"'key'",
",",
"text",
"=",
"key",
")",
"# Key (and value, if it's a valid property list) don't exist",
"if",
"key_object",
"is",
"None",
":",
"key_object",
"=",
"self",
".",
"soup",
".",
"new_tag",
"(",
"'key'",
")",
"key_object",
".",
"string",
"=",
"key",
"self",
".",
"properties",
".",
"append",
"(",
"key_object",
")",
"value_object",
"=",
"self",
".",
"soup",
".",
"new_tag",
"(",
"{",
"str",
":",
"'string'",
",",
"int",
":",
"'integer'",
",",
"bool",
":",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"}",
"[",
"value_type",
"]",
")",
"if",
"value_type",
"is",
"not",
"bool",
":",
"value_object",
".",
"string",
"=",
"str",
"(",
"value",
")",
"self",
".",
"properties",
".",
"append",
"(",
"value_object",
")",
"return",
"# Key (and value, if it's a valid property list) exist",
"# Eh, just remove the key+value tags from the tree and re-add them",
"# (with the new value)",
"value_object",
"=",
"key_object",
".",
"find_next_sibling",
"(",
")",
"key_object",
".",
"decompose",
"(",
")",
"value_object",
".",
"decompose",
"(",
")",
"self",
".",
"set_property",
"(",
"key",
",",
"value",
")"
] |
Set a new (or updating existing) key value pair.
Args:
key: A string containing the key namespace
value: A str, int, or bool value
Raises:
NotImplementedError: an unsupported value-type was provided
|
[
"Set",
"a",
"new",
"(",
"or",
"updating",
"existing",
")",
"key",
"value",
"pair",
"."
] |
train
|
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/propertylist.py#L68-L110
|
vedvyas/doxytag2zealdb
|
doxytag2zealdb/propertylist.py
|
DoxygenPropertyList.save
|
def save(self):
'''Save current property list representation to the original file.'''
with open(self.filename, 'w') as plist_file:
plist_file.write(str(self.soup))
|
python
|
def save(self):
'''Save current property list representation to the original file.'''
with open(self.filename, 'w') as plist_file:
plist_file.write(str(self.soup))
|
[
"def",
"save",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'w'",
")",
"as",
"plist_file",
":",
"plist_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"soup",
")",
")"
] |
Save current property list representation to the original file.
|
[
"Save",
"current",
"property",
"list",
"representation",
"to",
"the",
"original",
"file",
"."
] |
train
|
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/propertylist.py#L112-L115
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.fetch_userid
|
def fetch_userid(self, side):
"""Return the userid for the specified bed side."""
for user in self.users:
obj = self.users[user]
if obj.side == side:
return user
|
python
|
def fetch_userid(self, side):
"""Return the userid for the specified bed side."""
for user in self.users:
obj = self.users[user]
if obj.side == side:
return user
|
[
"def",
"fetch_userid",
"(",
"self",
",",
"side",
")",
":",
"for",
"user",
"in",
"self",
".",
"users",
":",
"obj",
"=",
"self",
".",
"users",
"[",
"user",
"]",
"if",
"obj",
".",
"side",
"==",
"side",
":",
"return",
"user"
] |
Return the userid for the specified bed side.
|
[
"Return",
"the",
"userid",
"for",
"the",
"specified",
"bed",
"side",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L84-L89
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.start
|
async def start(self):
"""Start api initialization."""
_LOGGER.debug('Initializing pyEight Version: %s', __version__)
await self.fetch_token()
if self._token is not None:
await self.fetch_device_list()
await self.assign_users()
return True
else:
# We couldn't authenticate
return False
|
python
|
async def start(self):
"""Start api initialization."""
_LOGGER.debug('Initializing pyEight Version: %s', __version__)
await self.fetch_token()
if self._token is not None:
await self.fetch_device_list()
await self.assign_users()
return True
else:
# We couldn't authenticate
return False
|
[
"async",
"def",
"start",
"(",
"self",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"'Initializing pyEight Version: %s'",
",",
"__version__",
")",
"await",
"self",
".",
"fetch_token",
"(",
")",
"if",
"self",
".",
"_token",
"is",
"not",
"None",
":",
"await",
"self",
".",
"fetch_device_list",
"(",
")",
"await",
"self",
".",
"assign_users",
"(",
")",
"return",
"True",
"else",
":",
"# We couldn't authenticate",
"return",
"False"
] |
Start api initialization.
|
[
"Start",
"api",
"initialization",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L96-L106
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.fetch_token
|
async def fetch_token(self):
"""Fetch new session token from api."""
url = '{}/login'.format(API_URL)
payload = 'email={}&password={}'.format(self._email, self._password)
reg = await self.api_post(url, None, payload)
if reg is None:
_LOGGER.error('Unable to authenticate and fetch eight token.')
else:
self._userid = reg['session']['userId']
self._token = reg['session']['token']
self._expdate = reg['session']['expirationDate']
_LOGGER.debug('UserID: %s, Token: %s', self._userid, self.token)
|
python
|
async def fetch_token(self):
"""Fetch new session token from api."""
url = '{}/login'.format(API_URL)
payload = 'email={}&password={}'.format(self._email, self._password)
reg = await self.api_post(url, None, payload)
if reg is None:
_LOGGER.error('Unable to authenticate and fetch eight token.')
else:
self._userid = reg['session']['userId']
self._token = reg['session']['token']
self._expdate = reg['session']['expirationDate']
_LOGGER.debug('UserID: %s, Token: %s', self._userid, self.token)
|
[
"async",
"def",
"fetch_token",
"(",
"self",
")",
":",
"url",
"=",
"'{}/login'",
".",
"format",
"(",
"API_URL",
")",
"payload",
"=",
"'email={}&password={}'",
".",
"format",
"(",
"self",
".",
"_email",
",",
"self",
".",
"_password",
")",
"reg",
"=",
"await",
"self",
".",
"api_post",
"(",
"url",
",",
"None",
",",
"payload",
")",
"if",
"reg",
"is",
"None",
":",
"_LOGGER",
".",
"error",
"(",
"'Unable to authenticate and fetch eight token.'",
")",
"else",
":",
"self",
".",
"_userid",
"=",
"reg",
"[",
"'session'",
"]",
"[",
"'userId'",
"]",
"self",
".",
"_token",
"=",
"reg",
"[",
"'session'",
"]",
"[",
"'token'",
"]",
"self",
".",
"_expdate",
"=",
"reg",
"[",
"'session'",
"]",
"[",
"'expirationDate'",
"]",
"_LOGGER",
".",
"debug",
"(",
"'UserID: %s, Token: %s'",
",",
"self",
".",
"_userid",
",",
"self",
".",
"token",
")"
] |
Fetch new session token from api.
|
[
"Fetch",
"new",
"session",
"token",
"from",
"api",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L113-L125
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.fetch_device_list
|
async def fetch_device_list(self):
"""Fetch list of devices."""
url = '{}/users/me'.format(API_URL)
dlist = await self.api_get(url)
if dlist is None:
_LOGGER.error('Unable to fetch eight devices.')
else:
self._devices = dlist['user']['devices']
_LOGGER.debug('Devices: %s', self._devices)
|
python
|
async def fetch_device_list(self):
"""Fetch list of devices."""
url = '{}/users/me'.format(API_URL)
dlist = await self.api_get(url)
if dlist is None:
_LOGGER.error('Unable to fetch eight devices.')
else:
self._devices = dlist['user']['devices']
_LOGGER.debug('Devices: %s', self._devices)
|
[
"async",
"def",
"fetch_device_list",
"(",
"self",
")",
":",
"url",
"=",
"'{}/users/me'",
".",
"format",
"(",
"API_URL",
")",
"dlist",
"=",
"await",
"self",
".",
"api_get",
"(",
"url",
")",
"if",
"dlist",
"is",
"None",
":",
"_LOGGER",
".",
"error",
"(",
"'Unable to fetch eight devices.'",
")",
"else",
":",
"self",
".",
"_devices",
"=",
"dlist",
"[",
"'user'",
"]",
"[",
"'devices'",
"]",
"_LOGGER",
".",
"debug",
"(",
"'Devices: %s'",
",",
"self",
".",
"_devices",
")"
] |
Fetch list of devices.
|
[
"Fetch",
"list",
"of",
"devices",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L127-L136
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.assign_users
|
async def assign_users(self):
"""Update device properties."""
device = self._devices[0]
url = '{}/devices/{}?filter=ownerId,leftUserId,rightUserId' \
.format(API_URL, device)
data = await self.api_get(url)
if data is None:
_LOGGER.error('Unable to assign eight device users.')
else:
# Find the side to the known userid
if data['result']['rightUserId'] == self._userid:
self.users[data['result']['rightUserId']] = \
EightUser(self, data['result']['rightUserId'], 'right')
user_side = 'right'
elif data['result']['leftUserId'] == self._userid:
self.users[data['result']['leftUserId']] = \
EightUser(self, data['result']['leftUserId'], 'left')
user_side = 'left'
else:
_LOGGER.error('Unable to assign eight device users.')
if self._partner:
if user_side == 'right':
self.users[data['result']['leftUserId']] = \
EightUser(self, data['result']['leftUserId'], 'left')
else:
self.users[data['result']['rightUserId']] = \
EightUser(self, data['result']['rightUserId'], 'right')
|
python
|
async def assign_users(self):
"""Update device properties."""
device = self._devices[0]
url = '{}/devices/{}?filter=ownerId,leftUserId,rightUserId' \
.format(API_URL, device)
data = await self.api_get(url)
if data is None:
_LOGGER.error('Unable to assign eight device users.')
else:
# Find the side to the known userid
if data['result']['rightUserId'] == self._userid:
self.users[data['result']['rightUserId']] = \
EightUser(self, data['result']['rightUserId'], 'right')
user_side = 'right'
elif data['result']['leftUserId'] == self._userid:
self.users[data['result']['leftUserId']] = \
EightUser(self, data['result']['leftUserId'], 'left')
user_side = 'left'
else:
_LOGGER.error('Unable to assign eight device users.')
if self._partner:
if user_side == 'right':
self.users[data['result']['leftUserId']] = \
EightUser(self, data['result']['leftUserId'], 'left')
else:
self.users[data['result']['rightUserId']] = \
EightUser(self, data['result']['rightUserId'], 'right')
|
[
"async",
"def",
"assign_users",
"(",
"self",
")",
":",
"device",
"=",
"self",
".",
"_devices",
"[",
"0",
"]",
"url",
"=",
"'{}/devices/{}?filter=ownerId,leftUserId,rightUserId'",
".",
"format",
"(",
"API_URL",
",",
"device",
")",
"data",
"=",
"await",
"self",
".",
"api_get",
"(",
"url",
")",
"if",
"data",
"is",
"None",
":",
"_LOGGER",
".",
"error",
"(",
"'Unable to assign eight device users.'",
")",
"else",
":",
"# Find the side to the known userid",
"if",
"data",
"[",
"'result'",
"]",
"[",
"'rightUserId'",
"]",
"==",
"self",
".",
"_userid",
":",
"self",
".",
"users",
"[",
"data",
"[",
"'result'",
"]",
"[",
"'rightUserId'",
"]",
"]",
"=",
"EightUser",
"(",
"self",
",",
"data",
"[",
"'result'",
"]",
"[",
"'rightUserId'",
"]",
",",
"'right'",
")",
"user_side",
"=",
"'right'",
"elif",
"data",
"[",
"'result'",
"]",
"[",
"'leftUserId'",
"]",
"==",
"self",
".",
"_userid",
":",
"self",
".",
"users",
"[",
"data",
"[",
"'result'",
"]",
"[",
"'leftUserId'",
"]",
"]",
"=",
"EightUser",
"(",
"self",
",",
"data",
"[",
"'result'",
"]",
"[",
"'leftUserId'",
"]",
",",
"'left'",
")",
"user_side",
"=",
"'left'",
"else",
":",
"_LOGGER",
".",
"error",
"(",
"'Unable to assign eight device users.'",
")",
"if",
"self",
".",
"_partner",
":",
"if",
"user_side",
"==",
"'right'",
":",
"self",
".",
"users",
"[",
"data",
"[",
"'result'",
"]",
"[",
"'leftUserId'",
"]",
"]",
"=",
"EightUser",
"(",
"self",
",",
"data",
"[",
"'result'",
"]",
"[",
"'leftUserId'",
"]",
",",
"'left'",
")",
"else",
":",
"self",
".",
"users",
"[",
"data",
"[",
"'result'",
"]",
"[",
"'rightUserId'",
"]",
"]",
"=",
"EightUser",
"(",
"self",
",",
"data",
"[",
"'result'",
"]",
"[",
"'rightUserId'",
"]",
",",
"'right'",
")"
] |
Update device properties.
|
[
"Update",
"device",
"properties",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L138-L166
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.room_temperature
|
def room_temperature(self):
"""Return room temperature for both sides of bed."""
# Check which side is active, if both are return the average
tmp = None
tmp2 = None
for user in self.users:
obj = self.users[user]
if obj.current_values['processing']:
if tmp is None:
tmp = obj.current_values['room_temp']
else:
tmp = (tmp + obj.current_values['room_temp']) / 2
else:
if tmp2 is None:
tmp2 = obj.current_values['room_temp']
else:
tmp2 = (tmp2 + obj.current_values['room_temp']) / 2
if tmp is not None:
return tmp
elif tmp2 is not None:
return tmp2
|
python
|
def room_temperature(self):
"""Return room temperature for both sides of bed."""
# Check which side is active, if both are return the average
tmp = None
tmp2 = None
for user in self.users:
obj = self.users[user]
if obj.current_values['processing']:
if tmp is None:
tmp = obj.current_values['room_temp']
else:
tmp = (tmp + obj.current_values['room_temp']) / 2
else:
if tmp2 is None:
tmp2 = obj.current_values['room_temp']
else:
tmp2 = (tmp2 + obj.current_values['room_temp']) / 2
if tmp is not None:
return tmp
elif tmp2 is not None:
return tmp2
|
[
"def",
"room_temperature",
"(",
"self",
")",
":",
"# Check which side is active, if both are return the average",
"tmp",
"=",
"None",
"tmp2",
"=",
"None",
"for",
"user",
"in",
"self",
".",
"users",
":",
"obj",
"=",
"self",
".",
"users",
"[",
"user",
"]",
"if",
"obj",
".",
"current_values",
"[",
"'processing'",
"]",
":",
"if",
"tmp",
"is",
"None",
":",
"tmp",
"=",
"obj",
".",
"current_values",
"[",
"'room_temp'",
"]",
"else",
":",
"tmp",
"=",
"(",
"tmp",
"+",
"obj",
".",
"current_values",
"[",
"'room_temp'",
"]",
")",
"/",
"2",
"else",
":",
"if",
"tmp2",
"is",
"None",
":",
"tmp2",
"=",
"obj",
".",
"current_values",
"[",
"'room_temp'",
"]",
"else",
":",
"tmp2",
"=",
"(",
"tmp2",
"+",
"obj",
".",
"current_values",
"[",
"'room_temp'",
"]",
")",
"/",
"2",
"if",
"tmp",
"is",
"not",
"None",
":",
"return",
"tmp",
"elif",
"tmp2",
"is",
"not",
"None",
":",
"return",
"tmp2"
] |
Return room temperature for both sides of bed.
|
[
"Return",
"room",
"temperature",
"for",
"both",
"sides",
"of",
"bed",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L168-L189
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.handle_device_json
|
def handle_device_json(self, data):
"""Manage the device json list."""
self._device_json.insert(0, data)
self._device_json.pop()
|
python
|
def handle_device_json(self, data):
"""Manage the device json list."""
self._device_json.insert(0, data)
self._device_json.pop()
|
[
"def",
"handle_device_json",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_device_json",
".",
"insert",
"(",
"0",
",",
"data",
")",
"self",
".",
"_device_json",
".",
"pop",
"(",
")"
] |
Manage the device json list.
|
[
"Manage",
"the",
"device",
"json",
"list",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L191-L194
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.update_device_data
|
async def update_device_data(self):
"""Update device data json."""
url = '{}/devices/{}?offlineView=true'.format(API_URL, self.deviceid)
# Check for access token expiration (every 15days)
exp_delta = datetime.strptime(self._expdate, '%Y-%m-%dT%H:%M:%S.%fZ') \
- datetime.fromtimestamp(time.time())
# Renew 1hr before expiration
if exp_delta.total_seconds() < 3600:
_LOGGER.debug('Fetching new access token before expiration.')
await self.fetch_token()
device_resp = await self.api_get(url)
if device_resp is None:
_LOGGER.error('Unable to fetch eight device data.')
else:
# Want to keep last 10 readings so purge the last after we add
self.handle_device_json(device_resp['result'])
for user in self.users:
self.users[user].dynamic_presence()
|
python
|
async def update_device_data(self):
"""Update device data json."""
url = '{}/devices/{}?offlineView=true'.format(API_URL, self.deviceid)
# Check for access token expiration (every 15days)
exp_delta = datetime.strptime(self._expdate, '%Y-%m-%dT%H:%M:%S.%fZ') \
- datetime.fromtimestamp(time.time())
# Renew 1hr before expiration
if exp_delta.total_seconds() < 3600:
_LOGGER.debug('Fetching new access token before expiration.')
await self.fetch_token()
device_resp = await self.api_get(url)
if device_resp is None:
_LOGGER.error('Unable to fetch eight device data.')
else:
# Want to keep last 10 readings so purge the last after we add
self.handle_device_json(device_resp['result'])
for user in self.users:
self.users[user].dynamic_presence()
|
[
"async",
"def",
"update_device_data",
"(",
"self",
")",
":",
"url",
"=",
"'{}/devices/{}?offlineView=true'",
".",
"format",
"(",
"API_URL",
",",
"self",
".",
"deviceid",
")",
"# Check for access token expiration (every 15days)",
"exp_delta",
"=",
"datetime",
".",
"strptime",
"(",
"self",
".",
"_expdate",
",",
"'%Y-%m-%dT%H:%M:%S.%fZ'",
")",
"-",
"datetime",
".",
"fromtimestamp",
"(",
"time",
".",
"time",
"(",
")",
")",
"# Renew 1hr before expiration",
"if",
"exp_delta",
".",
"total_seconds",
"(",
")",
"<",
"3600",
":",
"_LOGGER",
".",
"debug",
"(",
"'Fetching new access token before expiration.'",
")",
"await",
"self",
".",
"fetch_token",
"(",
")",
"device_resp",
"=",
"await",
"self",
".",
"api_get",
"(",
"url",
")",
"if",
"device_resp",
"is",
"None",
":",
"_LOGGER",
".",
"error",
"(",
"'Unable to fetch eight device data.'",
")",
"else",
":",
"# Want to keep last 10 readings so purge the last after we add",
"self",
".",
"handle_device_json",
"(",
"device_resp",
"[",
"'result'",
"]",
")",
"for",
"user",
"in",
"self",
".",
"users",
":",
"self",
".",
"users",
"[",
"user",
"]",
".",
"dynamic_presence",
"(",
")"
] |
Update device data json.
|
[
"Update",
"device",
"data",
"json",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L196-L215
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.api_get
|
async def api_get(self, url, params=None):
"""Make api fetch request."""
request = None
headers = DEFAULT_HEADERS.copy()
headers.update({'Session-Token': self._token})
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, headers=headers, params=params)
# _LOGGER.debug('Get URL: %s', request.url)
if request.status != 200:
_LOGGER.error('Error fetching Eight data: %s', request.status)
return None
if 'application/json' in request.headers['content-type']:
request_json = await request.json()
else:
_LOGGER.debug('Response was not JSON, returning text.')
request_json = await request.text()
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Eight data. %s', err)
return None
|
python
|
async def api_get(self, url, params=None):
"""Make api fetch request."""
request = None
headers = DEFAULT_HEADERS.copy()
headers.update({'Session-Token': self._token})
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, headers=headers, params=params)
# _LOGGER.debug('Get URL: %s', request.url)
if request.status != 200:
_LOGGER.error('Error fetching Eight data: %s', request.status)
return None
if 'application/json' in request.headers['content-type']:
request_json = await request.json()
else:
_LOGGER.debug('Response was not JSON, returning text.')
request_json = await request.text()
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Eight data. %s', err)
return None
|
[
"async",
"def",
"api_get",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
")",
":",
"request",
"=",
"None",
"headers",
"=",
"DEFAULT_HEADERS",
".",
"copy",
"(",
")",
"headers",
".",
"update",
"(",
"{",
"'Session-Token'",
":",
"self",
".",
"_token",
"}",
")",
"try",
":",
"with",
"async_timeout",
".",
"timeout",
"(",
"DEFAULT_TIMEOUT",
",",
"loop",
"=",
"self",
".",
"_event_loop",
")",
":",
"request",
"=",
"await",
"self",
".",
"_api_session",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
")",
"# _LOGGER.debug('Get URL: %s', request.url)",
"if",
"request",
".",
"status",
"!=",
"200",
":",
"_LOGGER",
".",
"error",
"(",
"'Error fetching Eight data: %s'",
",",
"request",
".",
"status",
")",
"return",
"None",
"if",
"'application/json'",
"in",
"request",
".",
"headers",
"[",
"'content-type'",
"]",
":",
"request_json",
"=",
"await",
"request",
".",
"json",
"(",
")",
"else",
":",
"_LOGGER",
".",
"debug",
"(",
"'Response was not JSON, returning text.'",
")",
"request_json",
"=",
"await",
"request",
".",
"text",
"(",
")",
"return",
"request_json",
"except",
"(",
"aiohttp",
".",
"ClientError",
",",
"asyncio",
".",
"TimeoutError",
",",
"ConnectionRefusedError",
")",
"as",
"err",
":",
"_LOGGER",
".",
"error",
"(",
"'Error fetching Eight data. %s'",
",",
"err",
")",
"return",
"None"
] |
Make api fetch request.
|
[
"Make",
"api",
"fetch",
"request",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L241-L267
|
mezz64/pyEight
|
pyeight/eight.py
|
EightSleep.api_put
|
async def api_put(self, url, data=None):
"""Make api post request."""
put = None
headers = DEFAULT_HEADERS.copy()
headers.update({'Session-Token': self._token})
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
put = await self._api_session.put(
url, headers=headers, data=data)
if put.status != 200:
_LOGGER.error('Error putting Eight data: %s', put.status)
return None
if 'application/json' in put.headers['content-type']:
put_result = await put.json()
else:
_LOGGER.debug('Response was not JSON, returning text.')
put_result = await put.text()
return put_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error putting Eight data. %s', err)
return None
|
python
|
async def api_put(self, url, data=None):
"""Make api post request."""
put = None
headers = DEFAULT_HEADERS.copy()
headers.update({'Session-Token': self._token})
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
put = await self._api_session.put(
url, headers=headers, data=data)
if put.status != 200:
_LOGGER.error('Error putting Eight data: %s', put.status)
return None
if 'application/json' in put.headers['content-type']:
put_result = await put.json()
else:
_LOGGER.debug('Response was not JSON, returning text.')
put_result = await put.text()
return put_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error putting Eight data. %s', err)
return None
|
[
"async",
"def",
"api_put",
"(",
"self",
",",
"url",
",",
"data",
"=",
"None",
")",
":",
"put",
"=",
"None",
"headers",
"=",
"DEFAULT_HEADERS",
".",
"copy",
"(",
")",
"headers",
".",
"update",
"(",
"{",
"'Session-Token'",
":",
"self",
".",
"_token",
"}",
")",
"try",
":",
"with",
"async_timeout",
".",
"timeout",
"(",
"DEFAULT_TIMEOUT",
",",
"loop",
"=",
"self",
".",
"_event_loop",
")",
":",
"put",
"=",
"await",
"self",
".",
"_api_session",
".",
"put",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
")",
"if",
"put",
".",
"status",
"!=",
"200",
":",
"_LOGGER",
".",
"error",
"(",
"'Error putting Eight data: %s'",
",",
"put",
".",
"status",
")",
"return",
"None",
"if",
"'application/json'",
"in",
"put",
".",
"headers",
"[",
"'content-type'",
"]",
":",
"put_result",
"=",
"await",
"put",
".",
"json",
"(",
")",
"else",
":",
"_LOGGER",
".",
"debug",
"(",
"'Response was not JSON, returning text.'",
")",
"put_result",
"=",
"await",
"put",
".",
"text",
"(",
")",
"return",
"put_result",
"except",
"(",
"aiohttp",
".",
"ClientError",
",",
"asyncio",
".",
"TimeoutError",
",",
"ConnectionRefusedError",
")",
"as",
"err",
":",
"_LOGGER",
".",
"error",
"(",
"'Error putting Eight data. %s'",
",",
"err",
")",
"return",
"None"
] |
Make api post request.
|
[
"Make",
"api",
"post",
"request",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L269-L294
|
MonashBI/arcana
|
arcana/environment/static.py
|
StaticEnv.satisfy
|
def satisfy(self, *requirements):
"""
Checks whether the given requirements are satisfiable within the given
execution context
Parameter
---------
requirements : list(Requirement)
List of requirements to check whether they are satisfiable
"""
versions = []
for req_range in requirements:
try:
version = self._detected_versions[req_range.name]
except KeyError:
try:
version = req_range.requirement.detect_version()
except ArcanaRequirementNotFoundError as e:
if self._fail_on_missing:
raise
else:
logger.warning(e)
except ArcanaVersionNotDetectableError as e:
if self._fail_on_undetectable:
raise
else:
logger.warning(e)
else:
self._detected_versions[req_range.name] = version
if not req_range.within(version):
raise ArcanaVersionError(
"Detected {} version {} is not within requested range {}"
.format(req_range.requirement, version, req_range))
versions.append(version)
return versions
|
python
|
def satisfy(self, *requirements):
"""
Checks whether the given requirements are satisfiable within the given
execution context
Parameter
---------
requirements : list(Requirement)
List of requirements to check whether they are satisfiable
"""
versions = []
for req_range in requirements:
try:
version = self._detected_versions[req_range.name]
except KeyError:
try:
version = req_range.requirement.detect_version()
except ArcanaRequirementNotFoundError as e:
if self._fail_on_missing:
raise
else:
logger.warning(e)
except ArcanaVersionNotDetectableError as e:
if self._fail_on_undetectable:
raise
else:
logger.warning(e)
else:
self._detected_versions[req_range.name] = version
if not req_range.within(version):
raise ArcanaVersionError(
"Detected {} version {} is not within requested range {}"
.format(req_range.requirement, version, req_range))
versions.append(version)
return versions
|
[
"def",
"satisfy",
"(",
"self",
",",
"*",
"requirements",
")",
":",
"versions",
"=",
"[",
"]",
"for",
"req_range",
"in",
"requirements",
":",
"try",
":",
"version",
"=",
"self",
".",
"_detected_versions",
"[",
"req_range",
".",
"name",
"]",
"except",
"KeyError",
":",
"try",
":",
"version",
"=",
"req_range",
".",
"requirement",
".",
"detect_version",
"(",
")",
"except",
"ArcanaRequirementNotFoundError",
"as",
"e",
":",
"if",
"self",
".",
"_fail_on_missing",
":",
"raise",
"else",
":",
"logger",
".",
"warning",
"(",
"e",
")",
"except",
"ArcanaVersionNotDetectableError",
"as",
"e",
":",
"if",
"self",
".",
"_fail_on_undetectable",
":",
"raise",
"else",
":",
"logger",
".",
"warning",
"(",
"e",
")",
"else",
":",
"self",
".",
"_detected_versions",
"[",
"req_range",
".",
"name",
"]",
"=",
"version",
"if",
"not",
"req_range",
".",
"within",
"(",
"version",
")",
":",
"raise",
"ArcanaVersionError",
"(",
"\"Detected {} version {} is not within requested range {}\"",
".",
"format",
"(",
"req_range",
".",
"requirement",
",",
"version",
",",
"req_range",
")",
")",
"versions",
".",
"append",
"(",
"version",
")",
"return",
"versions"
] |
Checks whether the given requirements are satisfiable within the given
execution context
Parameter
---------
requirements : list(Requirement)
List of requirements to check whether they are satisfiable
|
[
"Checks",
"whether",
"the",
"given",
"requirements",
"are",
"satisfiable",
"within",
"the",
"given",
"execution",
"context"
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/static.py#L30-L64
|
gwww/elkm1
|
elkm1_lib/message.py
|
housecode_to_index
|
def housecode_to_index(housecode):
"""Convert a X10 housecode to a zero-based index"""
match = re.search(r'^([A-P])(\d{1,2})$', housecode.upper())
if match:
house_index = int(match.group(2))
if 1 <= house_index <= 16:
return (ord(match.group(1)) - ord('A')) * 16 + house_index - 1
raise ValueError("Invalid X10 housecode: %s" % housecode)
|
python
|
def housecode_to_index(housecode):
"""Convert a X10 housecode to a zero-based index"""
match = re.search(r'^([A-P])(\d{1,2})$', housecode.upper())
if match:
house_index = int(match.group(2))
if 1 <= house_index <= 16:
return (ord(match.group(1)) - ord('A')) * 16 + house_index - 1
raise ValueError("Invalid X10 housecode: %s" % housecode)
|
[
"def",
"housecode_to_index",
"(",
"housecode",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'^([A-P])(\\d{1,2})$'",
",",
"housecode",
".",
"upper",
"(",
")",
")",
"if",
"match",
":",
"house_index",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"if",
"1",
"<=",
"house_index",
"<=",
"16",
":",
"return",
"(",
"ord",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"-",
"ord",
"(",
"'A'",
")",
")",
"*",
"16",
"+",
"house_index",
"-",
"1",
"raise",
"ValueError",
"(",
"\"Invalid X10 housecode: %s\"",
"%",
"housecode",
")"
] |
Convert a X10 housecode to a zero-based index
|
[
"Convert",
"a",
"X10",
"housecode",
"to",
"a",
"zero",
"-",
"based",
"index"
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L236-L243
|
gwww/elkm1
|
elkm1_lib/message.py
|
index_to_housecode
|
def index_to_housecode(index):
"""Convert a zero-based index to a X10 housecode."""
if index < 0 or index > 255:
raise ValueError
quotient, remainder = divmod(index, 16)
return chr(quotient+ord('A')) + '{:02d}'.format(remainder+1)
|
python
|
def index_to_housecode(index):
"""Convert a zero-based index to a X10 housecode."""
if index < 0 or index > 255:
raise ValueError
quotient, remainder = divmod(index, 16)
return chr(quotient+ord('A')) + '{:02d}'.format(remainder+1)
|
[
"def",
"index_to_housecode",
"(",
"index",
")",
":",
"if",
"index",
"<",
"0",
"or",
"index",
">",
"255",
":",
"raise",
"ValueError",
"quotient",
",",
"remainder",
"=",
"divmod",
"(",
"index",
",",
"16",
")",
"return",
"chr",
"(",
"quotient",
"+",
"ord",
"(",
"'A'",
")",
")",
"+",
"'{:02d}'",
".",
"format",
"(",
"remainder",
"+",
"1",
")"
] |
Convert a zero-based index to a X10 housecode.
|
[
"Convert",
"a",
"zero",
"-",
"based",
"index",
"to",
"a",
"X10",
"housecode",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L246-L251
|
gwww/elkm1
|
elkm1_lib/message.py
|
_check_checksum
|
def _check_checksum(msg):
"""Ensure checksum in message is good."""
checksum = int(msg[-2:], 16)
for char in msg[:-2]:
checksum += ord(char)
if (checksum % 256) != 0:
raise ValueError("Elk message checksum invalid")
|
python
|
def _check_checksum(msg):
"""Ensure checksum in message is good."""
checksum = int(msg[-2:], 16)
for char in msg[:-2]:
checksum += ord(char)
if (checksum % 256) != 0:
raise ValueError("Elk message checksum invalid")
|
[
"def",
"_check_checksum",
"(",
"msg",
")",
":",
"checksum",
"=",
"int",
"(",
"msg",
"[",
"-",
"2",
":",
"]",
",",
"16",
")",
"for",
"char",
"in",
"msg",
"[",
":",
"-",
"2",
"]",
":",
"checksum",
"+=",
"ord",
"(",
"char",
")",
"if",
"(",
"checksum",
"%",
"256",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Elk message checksum invalid\"",
")"
] |
Ensure checksum in message is good.
|
[
"Ensure",
"checksum",
"in",
"message",
"is",
"good",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L268-L274
|
gwww/elkm1
|
elkm1_lib/message.py
|
_check_message_valid
|
def _check_message_valid(msg):
"""Check packet length valid and that checksum is good."""
try:
if int(msg[:2], 16) != (len(msg) - 2):
raise ValueError("Elk message length incorrect")
_check_checksum(msg)
except IndexError:
raise ValueError("Elk message length incorrect")
|
python
|
def _check_message_valid(msg):
"""Check packet length valid and that checksum is good."""
try:
if int(msg[:2], 16) != (len(msg) - 2):
raise ValueError("Elk message length incorrect")
_check_checksum(msg)
except IndexError:
raise ValueError("Elk message length incorrect")
|
[
"def",
"_check_message_valid",
"(",
"msg",
")",
":",
"try",
":",
"if",
"int",
"(",
"msg",
"[",
":",
"2",
"]",
",",
"16",
")",
"!=",
"(",
"len",
"(",
"msg",
")",
"-",
"2",
")",
":",
"raise",
"ValueError",
"(",
"\"Elk message length incorrect\"",
")",
"_check_checksum",
"(",
"msg",
")",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"\"Elk message length incorrect\"",
")"
] |
Check packet length valid and that checksum is good.
|
[
"Check",
"packet",
"length",
"valid",
"and",
"that",
"checksum",
"is",
"good",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L277-L284
|
gwww/elkm1
|
elkm1_lib/message.py
|
cw_encode
|
def cw_encode(index, value, value_format):
"""cw: Write a custom value."""
if value_format == 2:
value = value[0] << 8 + value[1]
return MessageEncode('0Dcw{:02d}{:05d}00'.format(index+1, value), None)
|
python
|
def cw_encode(index, value, value_format):
"""cw: Write a custom value."""
if value_format == 2:
value = value[0] << 8 + value[1]
return MessageEncode('0Dcw{:02d}{:05d}00'.format(index+1, value), None)
|
[
"def",
"cw_encode",
"(",
"index",
",",
"value",
",",
"value_format",
")",
":",
"if",
"value_format",
"==",
"2",
":",
"value",
"=",
"value",
"[",
"0",
"]",
"<<",
"8",
"+",
"value",
"[",
"1",
"]",
"return",
"MessageEncode",
"(",
"'0Dcw{:02d}{:05d}00'",
".",
"format",
"(",
"index",
"+",
"1",
",",
"value",
")",
",",
"None",
")"
] |
cw: Write a custom value.
|
[
"cw",
":",
"Write",
"a",
"custom",
"value",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L333-L337
|
gwww/elkm1
|
elkm1_lib/message.py
|
dm_encode
|
def dm_encode(keypad_area, clear, beep, timeout, line1, line2):
"""dm: Display message on keypad."""
return MessageEncode(
'2Edm{:1d}{:1d}{:1d}{:05d}{:^<16.16}{:^<16.16}00'
.format(keypad_area+1, clear, beep, timeout, line1, line2), None
)
|
python
|
def dm_encode(keypad_area, clear, beep, timeout, line1, line2):
"""dm: Display message on keypad."""
return MessageEncode(
'2Edm{:1d}{:1d}{:1d}{:05d}{:^<16.16}{:^<16.16}00'
.format(keypad_area+1, clear, beep, timeout, line1, line2), None
)
|
[
"def",
"dm_encode",
"(",
"keypad_area",
",",
"clear",
",",
"beep",
",",
"timeout",
",",
"line1",
",",
"line2",
")",
":",
"return",
"MessageEncode",
"(",
"'2Edm{:1d}{:1d}{:1d}{:05d}{:^<16.16}{:^<16.16}00'",
".",
"format",
"(",
"keypad_area",
"+",
"1",
",",
"clear",
",",
"beep",
",",
"timeout",
",",
"line1",
",",
"line2",
")",
",",
"None",
")"
] |
dm: Display message on keypad.
|
[
"dm",
":",
"Display",
"message",
"on",
"keypad",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L351-L356
|
gwww/elkm1
|
elkm1_lib/message.py
|
pc_encode
|
def pc_encode(index, function_code, extended_code, time):
"""pc: Control any PLC device."""
return MessageEncode('11pc{hc}{fc:02d}{ec:02d}{time:04d}00'.
format(hc=index_to_housecode(index),
fc=function_code, ec=extended_code,
time=time), None)
|
python
|
def pc_encode(index, function_code, extended_code, time):
"""pc: Control any PLC device."""
return MessageEncode('11pc{hc}{fc:02d}{ec:02d}{time:04d}00'.
format(hc=index_to_housecode(index),
fc=function_code, ec=extended_code,
time=time), None)
|
[
"def",
"pc_encode",
"(",
"index",
",",
"function_code",
",",
"extended_code",
",",
"time",
")",
":",
"return",
"MessageEncode",
"(",
"'11pc{hc}{fc:02d}{ec:02d}{time:04d}00'",
".",
"format",
"(",
"hc",
"=",
"index_to_housecode",
"(",
"index",
")",
",",
"fc",
"=",
"function_code",
",",
"ec",
"=",
"extended_code",
",",
"time",
"=",
"time",
")",
",",
"None",
")"
] |
pc: Control any PLC device.
|
[
"pc",
":",
"Control",
"any",
"PLC",
"device",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L369-L374
|
gwww/elkm1
|
elkm1_lib/message.py
|
zb_encode
|
def zb_encode(zone, area, user_code):
"""zb: Zone bypass. Zone < 0 unbypass all; Zone > Max bypass all."""
if zone < 0:
zone = 0
elif zone > Max.ZONES.value:
zone = 999
else:
zone += 1
return MessageEncode('10zb{zone:03d}{area:1d}{code:06d}00'.format(
zone=zone, area=area+1, code=user_code), 'ZB')
|
python
|
def zb_encode(zone, area, user_code):
"""zb: Zone bypass. Zone < 0 unbypass all; Zone > Max bypass all."""
if zone < 0:
zone = 0
elif zone > Max.ZONES.value:
zone = 999
else:
zone += 1
return MessageEncode('10zb{zone:03d}{area:1d}{code:06d}00'.format(
zone=zone, area=area+1, code=user_code), 'ZB')
|
[
"def",
"zb_encode",
"(",
"zone",
",",
"area",
",",
"user_code",
")",
":",
"if",
"zone",
"<",
"0",
":",
"zone",
"=",
"0",
"elif",
"zone",
">",
"Max",
".",
"ZONES",
".",
"value",
":",
"zone",
"=",
"999",
"else",
":",
"zone",
"+=",
"1",
"return",
"MessageEncode",
"(",
"'10zb{zone:03d}{area:1d}{code:06d}00'",
".",
"format",
"(",
"zone",
"=",
"zone",
",",
"area",
"=",
"area",
"+",
"1",
",",
"code",
"=",
"user_code",
")",
",",
"'ZB'",
")"
] |
zb: Zone bypass. Zone < 0 unbypass all; Zone > Max bypass all.
|
[
"zb",
":",
"Zone",
"bypass",
".",
"Zone",
"<",
"0",
"unbypass",
"all",
";",
"Zone",
">",
"Max",
"bypass",
"all",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L438-L447
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode.add_handler
|
def add_handler(self, message_type, handler):
"""Manage callbacks for message handlers."""
if message_type not in self._handlers:
self._handlers[message_type] = []
if handler not in self._handlers[message_type]:
self._handlers[message_type].append(handler)
|
python
|
def add_handler(self, message_type, handler):
"""Manage callbacks for message handlers."""
if message_type not in self._handlers:
self._handlers[message_type] = []
if handler not in self._handlers[message_type]:
self._handlers[message_type].append(handler)
|
[
"def",
"add_handler",
"(",
"self",
",",
"message_type",
",",
"handler",
")",
":",
"if",
"message_type",
"not",
"in",
"self",
".",
"_handlers",
":",
"self",
".",
"_handlers",
"[",
"message_type",
"]",
"=",
"[",
"]",
"if",
"handler",
"not",
"in",
"self",
".",
"_handlers",
"[",
"message_type",
"]",
":",
"self",
".",
"_handlers",
"[",
"message_type",
"]",
".",
"append",
"(",
"handler",
")"
] |
Manage callbacks for message handlers.
|
[
"Manage",
"callbacks",
"for",
"message",
"handlers",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L33-L39
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode.decode
|
def decode(self, msg):
"""Decode an Elk message by passing to appropriate decoder"""
_check_message_valid(msg)
cmd = msg[2:4]
decoder = getattr(self, '_{}_decode'.format(cmd.lower()), None)
if not decoder:
cmd = 'unknown'
decoder = self._unknown_decode
decoded_msg = decoder(msg)
for handler in self._handlers.get(cmd, []):
handler(**decoded_msg)
|
python
|
def decode(self, msg):
"""Decode an Elk message by passing to appropriate decoder"""
_check_message_valid(msg)
cmd = msg[2:4]
decoder = getattr(self, '_{}_decode'.format(cmd.lower()), None)
if not decoder:
cmd = 'unknown'
decoder = self._unknown_decode
decoded_msg = decoder(msg)
for handler in self._handlers.get(cmd, []):
handler(**decoded_msg)
|
[
"def",
"decode",
"(",
"self",
",",
"msg",
")",
":",
"_check_message_valid",
"(",
"msg",
")",
"cmd",
"=",
"msg",
"[",
"2",
":",
"4",
"]",
"decoder",
"=",
"getattr",
"(",
"self",
",",
"'_{}_decode'",
".",
"format",
"(",
"cmd",
".",
"lower",
"(",
")",
")",
",",
"None",
")",
"if",
"not",
"decoder",
":",
"cmd",
"=",
"'unknown'",
"decoder",
"=",
"self",
".",
"_unknown_decode",
"decoded_msg",
"=",
"decoder",
"(",
"msg",
")",
"for",
"handler",
"in",
"self",
".",
"_handlers",
".",
"get",
"(",
"cmd",
",",
"[",
"]",
")",
":",
"handler",
"(",
"*",
"*",
"decoded_msg",
")"
] |
Decode an Elk message by passing to appropriate decoder
|
[
"Decode",
"an",
"Elk",
"message",
"by",
"passing",
"to",
"appropriate",
"decoder"
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L41-L51
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._as_decode
|
def _as_decode(self, msg):
"""AS: Arming status report."""
return {'armed_statuses': [x for x in msg[4:12]],
'arm_up_states': [x for x in msg[12:20]],
'alarm_states': [x for x in msg[20:28]]}
|
python
|
def _as_decode(self, msg):
"""AS: Arming status report."""
return {'armed_statuses': [x for x in msg[4:12]],
'arm_up_states': [x for x in msg[12:20]],
'alarm_states': [x for x in msg[20:28]]}
|
[
"def",
"_as_decode",
"(",
"self",
",",
"msg",
")",
":",
"return",
"{",
"'armed_statuses'",
":",
"[",
"x",
"for",
"x",
"in",
"msg",
"[",
"4",
":",
"12",
"]",
"]",
",",
"'arm_up_states'",
":",
"[",
"x",
"for",
"x",
"in",
"msg",
"[",
"12",
":",
"20",
"]",
"]",
",",
"'alarm_states'",
":",
"[",
"x",
"for",
"x",
"in",
"msg",
"[",
"20",
":",
"28",
"]",
"]",
"}"
] |
AS: Arming status report.
|
[
"AS",
":",
"Arming",
"status",
"report",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L57-L61
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._cr_decode
|
def _cr_decode(self, msg):
"""CR: Custom values"""
if int(msg[4:6]) > 0:
index = int(msg[4:6])-1
return {'values': [self._cr_one_custom_value_decode(index, msg[6:12])]}
else:
part = 6
ret = []
for i in range(Max.SETTINGS.value):
ret.append(self._cr_one_custom_value_decode(i, msg[part:part+6]))
part += 6
return {'values': ret}
|
python
|
def _cr_decode(self, msg):
"""CR: Custom values"""
if int(msg[4:6]) > 0:
index = int(msg[4:6])-1
return {'values': [self._cr_one_custom_value_decode(index, msg[6:12])]}
else:
part = 6
ret = []
for i in range(Max.SETTINGS.value):
ret.append(self._cr_one_custom_value_decode(i, msg[part:part+6]))
part += 6
return {'values': ret}
|
[
"def",
"_cr_decode",
"(",
"self",
",",
"msg",
")",
":",
"if",
"int",
"(",
"msg",
"[",
"4",
":",
"6",
"]",
")",
">",
"0",
":",
"index",
"=",
"int",
"(",
"msg",
"[",
"4",
":",
"6",
"]",
")",
"-",
"1",
"return",
"{",
"'values'",
":",
"[",
"self",
".",
"_cr_one_custom_value_decode",
"(",
"index",
",",
"msg",
"[",
"6",
":",
"12",
"]",
")",
"]",
"}",
"else",
":",
"part",
"=",
"6",
"ret",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"Max",
".",
"SETTINGS",
".",
"value",
")",
":",
"ret",
".",
"append",
"(",
"self",
".",
"_cr_one_custom_value_decode",
"(",
"i",
",",
"msg",
"[",
"part",
":",
"part",
"+",
"6",
"]",
")",
")",
"part",
"+=",
"6",
"return",
"{",
"'values'",
":",
"ret",
"}"
] |
CR: Custom values
|
[
"CR",
":",
"Custom",
"values"
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L74-L85
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._cs_decode
|
def _cs_decode(self, msg):
"""CS: Output status for all outputs."""
output_status = [x == '1' for x in msg[4:4+Max.OUTPUTS.value]]
return {'output_status': output_status}
|
python
|
def _cs_decode(self, msg):
"""CS: Output status for all outputs."""
output_status = [x == '1' for x in msg[4:4+Max.OUTPUTS.value]]
return {'output_status': output_status}
|
[
"def",
"_cs_decode",
"(",
"self",
",",
"msg",
")",
":",
"output_status",
"=",
"[",
"x",
"==",
"'1'",
"for",
"x",
"in",
"msg",
"[",
"4",
":",
"4",
"+",
"Max",
".",
"OUTPUTS",
".",
"value",
"]",
"]",
"return",
"{",
"'output_status'",
":",
"output_status",
"}"
] |
CS: Output status for all outputs.
|
[
"CS",
":",
"Output",
"status",
"for",
"all",
"outputs",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L91-L94
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._ee_decode
|
def _ee_decode(self, msg):
"""EE: Entry/exit timer report."""
return {'area': int(msg[4:5])-1, 'is_exit': msg[5:6] == '0',
'timer1': int(msg[6:9]), 'timer2': int(msg[9:12]),
'armed_status': msg[12:13]}
|
python
|
def _ee_decode(self, msg):
"""EE: Entry/exit timer report."""
return {'area': int(msg[4:5])-1, 'is_exit': msg[5:6] == '0',
'timer1': int(msg[6:9]), 'timer2': int(msg[9:12]),
'armed_status': msg[12:13]}
|
[
"def",
"_ee_decode",
"(",
"self",
",",
"msg",
")",
":",
"return",
"{",
"'area'",
":",
"int",
"(",
"msg",
"[",
"4",
":",
"5",
"]",
")",
"-",
"1",
",",
"'is_exit'",
":",
"msg",
"[",
"5",
":",
"6",
"]",
"==",
"'0'",
",",
"'timer1'",
":",
"int",
"(",
"msg",
"[",
"6",
":",
"9",
"]",
")",
",",
"'timer2'",
":",
"int",
"(",
"msg",
"[",
"9",
":",
"12",
"]",
")",
",",
"'armed_status'",
":",
"msg",
"[",
"12",
":",
"13",
"]",
"}"
] |
EE: Entry/exit timer report.
|
[
"EE",
":",
"Entry",
"/",
"exit",
"timer",
"report",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L100-L104
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._ic_decode
|
def _ic_decode(self, msg):
"""IC: Send Valid Or Invalid User Code Format."""
code = msg[4:16]
if re.match(r'(0\d){6}', code):
code = re.sub(r'0(\d)', r'\1', code)
return {'code': code, 'user': int(msg[16:19])-1,
'keypad': int(msg[19:21])-1}
|
python
|
def _ic_decode(self, msg):
"""IC: Send Valid Or Invalid User Code Format."""
code = msg[4:16]
if re.match(r'(0\d){6}', code):
code = re.sub(r'0(\d)', r'\1', code)
return {'code': code, 'user': int(msg[16:19])-1,
'keypad': int(msg[19:21])-1}
|
[
"def",
"_ic_decode",
"(",
"self",
",",
"msg",
")",
":",
"code",
"=",
"msg",
"[",
"4",
":",
"16",
"]",
"if",
"re",
".",
"match",
"(",
"r'(0\\d){6}'",
",",
"code",
")",
":",
"code",
"=",
"re",
".",
"sub",
"(",
"r'0(\\d)'",
",",
"r'\\1'",
",",
"code",
")",
"return",
"{",
"'code'",
":",
"code",
",",
"'user'",
":",
"int",
"(",
"msg",
"[",
"16",
":",
"19",
"]",
")",
"-",
"1",
",",
"'keypad'",
":",
"int",
"(",
"msg",
"[",
"19",
":",
"21",
"]",
")",
"-",
"1",
"}"
] |
IC: Send Valid Or Invalid User Code Format.
|
[
"IC",
":",
"Send",
"Valid",
"Or",
"Invalid",
"User",
"Code",
"Format",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L106-L112
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._ka_decode
|
def _ka_decode(self, msg):
"""KA: Keypad areas for all keypads."""
return {'keypad_areas': [ord(x)-0x31 for x in msg[4:4+Max.KEYPADS.value]]}
|
python
|
def _ka_decode(self, msg):
"""KA: Keypad areas for all keypads."""
return {'keypad_areas': [ord(x)-0x31 for x in msg[4:4+Max.KEYPADS.value]]}
|
[
"def",
"_ka_decode",
"(",
"self",
",",
"msg",
")",
":",
"return",
"{",
"'keypad_areas'",
":",
"[",
"ord",
"(",
"x",
")",
"-",
"0x31",
"for",
"x",
"in",
"msg",
"[",
"4",
":",
"4",
"+",
"Max",
".",
"KEYPADS",
".",
"value",
"]",
"]",
"}"
] |
KA: Keypad areas for all keypads.
|
[
"KA",
":",
"Keypad",
"areas",
"for",
"all",
"keypads",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L118-L120
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._lw_decode
|
def _lw_decode(self, msg):
"""LW: temperatures from all keypads and zones 1-16."""
keypad_temps = []
zone_temps = []
for i in range(16):
keypad_temps.append(int(msg[4+3*i:7+3*i]) - 40)
zone_temps.append(int(msg[52+3*i:55+3*i]) - 60)
return {'keypad_temps': keypad_temps, 'zone_temps': zone_temps}
|
python
|
def _lw_decode(self, msg):
"""LW: temperatures from all keypads and zones 1-16."""
keypad_temps = []
zone_temps = []
for i in range(16):
keypad_temps.append(int(msg[4+3*i:7+3*i]) - 40)
zone_temps.append(int(msg[52+3*i:55+3*i]) - 60)
return {'keypad_temps': keypad_temps, 'zone_temps': zone_temps}
|
[
"def",
"_lw_decode",
"(",
"self",
",",
"msg",
")",
":",
"keypad_temps",
"=",
"[",
"]",
"zone_temps",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"16",
")",
":",
"keypad_temps",
".",
"append",
"(",
"int",
"(",
"msg",
"[",
"4",
"+",
"3",
"*",
"i",
":",
"7",
"+",
"3",
"*",
"i",
"]",
")",
"-",
"40",
")",
"zone_temps",
".",
"append",
"(",
"int",
"(",
"msg",
"[",
"52",
"+",
"3",
"*",
"i",
":",
"55",
"+",
"3",
"*",
"i",
"]",
")",
"-",
"60",
")",
"return",
"{",
"'keypad_temps'",
":",
"keypad_temps",
",",
"'zone_temps'",
":",
"zone_temps",
"}"
] |
LW: temperatures from all keypads and zones 1-16.
|
[
"LW",
":",
"temperatures",
"from",
"all",
"keypads",
"and",
"zones",
"1",
"-",
"16",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L126-L133
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._pc_decode
|
def _pc_decode(self, msg):
"""PC: PLC (lighting) change."""
housecode = msg[4:7]
return {'housecode': housecode, 'index': housecode_to_index(housecode),
'light_level': int(msg[7:9])}
|
python
|
def _pc_decode(self, msg):
"""PC: PLC (lighting) change."""
housecode = msg[4:7]
return {'housecode': housecode, 'index': housecode_to_index(housecode),
'light_level': int(msg[7:9])}
|
[
"def",
"_pc_decode",
"(",
"self",
",",
"msg",
")",
":",
"housecode",
"=",
"msg",
"[",
"4",
":",
"7",
"]",
"return",
"{",
"'housecode'",
":",
"housecode",
",",
"'index'",
":",
"housecode_to_index",
"(",
"housecode",
")",
",",
"'light_level'",
":",
"int",
"(",
"msg",
"[",
"7",
":",
"9",
"]",
")",
"}"
] |
PC: PLC (lighting) change.
|
[
"PC",
":",
"PLC",
"(",
"lighting",
")",
"change",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L135-L139
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._sd_decode
|
def _sd_decode(self, msg):
"""SD: Description text."""
desc_ch1 = msg[9]
show_on_keypad = ord(desc_ch1) >= 0x80
if show_on_keypad:
desc_ch1 = chr(ord(desc_ch1) & 0x7f)
return {'desc_type': int(msg[4:6]), 'unit': int(msg[6:9])-1,
'desc': (desc_ch1+msg[10:25]).rstrip(),
'show_on_keypad': show_on_keypad}
|
python
|
def _sd_decode(self, msg):
"""SD: Description text."""
desc_ch1 = msg[9]
show_on_keypad = ord(desc_ch1) >= 0x80
if show_on_keypad:
desc_ch1 = chr(ord(desc_ch1) & 0x7f)
return {'desc_type': int(msg[4:6]), 'unit': int(msg[6:9])-1,
'desc': (desc_ch1+msg[10:25]).rstrip(),
'show_on_keypad': show_on_keypad}
|
[
"def",
"_sd_decode",
"(",
"self",
",",
"msg",
")",
":",
"desc_ch1",
"=",
"msg",
"[",
"9",
"]",
"show_on_keypad",
"=",
"ord",
"(",
"desc_ch1",
")",
">=",
"0x80",
"if",
"show_on_keypad",
":",
"desc_ch1",
"=",
"chr",
"(",
"ord",
"(",
"desc_ch1",
")",
"&",
"0x7f",
")",
"return",
"{",
"'desc_type'",
":",
"int",
"(",
"msg",
"[",
"4",
":",
"6",
"]",
")",
",",
"'unit'",
":",
"int",
"(",
"msg",
"[",
"6",
":",
"9",
"]",
")",
"-",
"1",
",",
"'desc'",
":",
"(",
"desc_ch1",
"+",
"msg",
"[",
"10",
":",
"25",
"]",
")",
".",
"rstrip",
"(",
")",
",",
"'show_on_keypad'",
":",
"show_on_keypad",
"}"
] |
SD: Description text.
|
[
"SD",
":",
"Description",
"text",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L150-L158
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._st_decode
|
def _st_decode(self, msg):
"""ST: Temperature update."""
group = int(msg[4:5])
temperature = int(msg[7:10])
if group == 0:
temperature -= 60
elif group == 1:
temperature -= 40
return {'group': group, 'device': int(msg[5:7])-1,
'temperature': temperature}
|
python
|
def _st_decode(self, msg):
"""ST: Temperature update."""
group = int(msg[4:5])
temperature = int(msg[7:10])
if group == 0:
temperature -= 60
elif group == 1:
temperature -= 40
return {'group': group, 'device': int(msg[5:7])-1,
'temperature': temperature}
|
[
"def",
"_st_decode",
"(",
"self",
",",
"msg",
")",
":",
"group",
"=",
"int",
"(",
"msg",
"[",
"4",
":",
"5",
"]",
")",
"temperature",
"=",
"int",
"(",
"msg",
"[",
"7",
":",
"10",
"]",
")",
"if",
"group",
"==",
"0",
":",
"temperature",
"-=",
"60",
"elif",
"group",
"==",
"1",
":",
"temperature",
"-=",
"40",
"return",
"{",
"'group'",
":",
"group",
",",
"'device'",
":",
"int",
"(",
"msg",
"[",
"5",
":",
"7",
"]",
")",
"-",
"1",
",",
"'temperature'",
":",
"temperature",
"}"
] |
ST: Temperature update.
|
[
"ST",
":",
"Temperature",
"update",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L164-L173
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._tr_decode
|
def _tr_decode(self, msg):
"""TR: Thermostat data response."""
return {'thermostat_index': int(msg[4:6])-1, 'mode': int(msg[6]),
'hold': msg[7] == '1', 'fan': int(msg[8]),
'current_temp': int(msg[9:11]), 'heat_setpoint': int(msg[11:13]),
'cool_setpoint': int(msg[13:15]), 'humidity': int(msg[15:17])}
|
python
|
def _tr_decode(self, msg):
"""TR: Thermostat data response."""
return {'thermostat_index': int(msg[4:6])-1, 'mode': int(msg[6]),
'hold': msg[7] == '1', 'fan': int(msg[8]),
'current_temp': int(msg[9:11]), 'heat_setpoint': int(msg[11:13]),
'cool_setpoint': int(msg[13:15]), 'humidity': int(msg[15:17])}
|
[
"def",
"_tr_decode",
"(",
"self",
",",
"msg",
")",
":",
"return",
"{",
"'thermostat_index'",
":",
"int",
"(",
"msg",
"[",
"4",
":",
"6",
"]",
")",
"-",
"1",
",",
"'mode'",
":",
"int",
"(",
"msg",
"[",
"6",
"]",
")",
",",
"'hold'",
":",
"msg",
"[",
"7",
"]",
"==",
"'1'",
",",
"'fan'",
":",
"int",
"(",
"msg",
"[",
"8",
"]",
")",
",",
"'current_temp'",
":",
"int",
"(",
"msg",
"[",
"9",
":",
"11",
"]",
")",
",",
"'heat_setpoint'",
":",
"int",
"(",
"msg",
"[",
"11",
":",
"13",
"]",
")",
",",
"'cool_setpoint'",
":",
"int",
"(",
"msg",
"[",
"13",
":",
"15",
"]",
")",
",",
"'humidity'",
":",
"int",
"(",
"msg",
"[",
"15",
":",
"17",
"]",
")",
"}"
] |
TR: Thermostat data response.
|
[
"TR",
":",
"Thermostat",
"data",
"response",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L179-L184
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._vn_decode
|
def _vn_decode(self, msg):
"""VN: Version information."""
elkm1_version = "{}.{}.{}".format(int(msg[4:6], 16), int(msg[6:8], 16),
int(msg[8:10], 16))
xep_version = "{}.{}.{}".format(int(msg[10:12], 16), int(msg[12:14], 16),
int(msg[14:16], 16))
return {'elkm1_version': elkm1_version, 'xep_version': xep_version}
|
python
|
def _vn_decode(self, msg):
"""VN: Version information."""
elkm1_version = "{}.{}.{}".format(int(msg[4:6], 16), int(msg[6:8], 16),
int(msg[8:10], 16))
xep_version = "{}.{}.{}".format(int(msg[10:12], 16), int(msg[12:14], 16),
int(msg[14:16], 16))
return {'elkm1_version': elkm1_version, 'xep_version': xep_version}
|
[
"def",
"_vn_decode",
"(",
"self",
",",
"msg",
")",
":",
"elkm1_version",
"=",
"\"{}.{}.{}\"",
".",
"format",
"(",
"int",
"(",
"msg",
"[",
"4",
":",
"6",
"]",
",",
"16",
")",
",",
"int",
"(",
"msg",
"[",
"6",
":",
"8",
"]",
",",
"16",
")",
",",
"int",
"(",
"msg",
"[",
"8",
":",
"10",
"]",
",",
"16",
")",
")",
"xep_version",
"=",
"\"{}.{}.{}\"",
".",
"format",
"(",
"int",
"(",
"msg",
"[",
"10",
":",
"12",
"]",
",",
"16",
")",
",",
"int",
"(",
"msg",
"[",
"12",
":",
"14",
"]",
",",
"16",
")",
",",
"int",
"(",
"msg",
"[",
"14",
":",
"16",
"]",
",",
"16",
")",
")",
"return",
"{",
"'elkm1_version'",
":",
"elkm1_version",
",",
"'xep_version'",
":",
"xep_version",
"}"
] |
VN: Version information.
|
[
"VN",
":",
"Version",
"information",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L186-L192
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._zc_decode
|
def _zc_decode(self, msg):
"""ZC: Zone Change."""
status = _status_decode(int(msg[7:8], 16))
return {'zone_number': int(msg[4:7])-1, 'zone_status': status}
|
python
|
def _zc_decode(self, msg):
"""ZC: Zone Change."""
status = _status_decode(int(msg[7:8], 16))
return {'zone_number': int(msg[4:7])-1, 'zone_status': status}
|
[
"def",
"_zc_decode",
"(",
"self",
",",
"msg",
")",
":",
"status",
"=",
"_status_decode",
"(",
"int",
"(",
"msg",
"[",
"7",
":",
"8",
"]",
",",
"16",
")",
")",
"return",
"{",
"'zone_number'",
":",
"int",
"(",
"msg",
"[",
"4",
":",
"7",
"]",
")",
"-",
"1",
",",
"'zone_status'",
":",
"status",
"}"
] |
ZC: Zone Change.
|
[
"ZC",
":",
"Zone",
"Change",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L202-L205
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._zd_decode
|
def _zd_decode(self, msg):
"""ZD: Zone definitions."""
zone_definitions = [ord(x)-0x30 for x in msg[4:4+Max.ZONES.value]]
return {'zone_definitions': zone_definitions}
|
python
|
def _zd_decode(self, msg):
"""ZD: Zone definitions."""
zone_definitions = [ord(x)-0x30 for x in msg[4:4+Max.ZONES.value]]
return {'zone_definitions': zone_definitions}
|
[
"def",
"_zd_decode",
"(",
"self",
",",
"msg",
")",
":",
"zone_definitions",
"=",
"[",
"ord",
"(",
"x",
")",
"-",
"0x30",
"for",
"x",
"in",
"msg",
"[",
"4",
":",
"4",
"+",
"Max",
".",
"ZONES",
".",
"value",
"]",
"]",
"return",
"{",
"'zone_definitions'",
":",
"zone_definitions",
"}"
] |
ZD: Zone definitions.
|
[
"ZD",
":",
"Zone",
"definitions",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L207-L210
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._zp_decode
|
def _zp_decode(self, msg):
"""ZP: Zone partitions."""
zone_partitions = [ord(x)-0x31 for x in msg[4:4+Max.ZONES.value]]
return {'zone_partitions': zone_partitions}
|
python
|
def _zp_decode(self, msg):
"""ZP: Zone partitions."""
zone_partitions = [ord(x)-0x31 for x in msg[4:4+Max.ZONES.value]]
return {'zone_partitions': zone_partitions}
|
[
"def",
"_zp_decode",
"(",
"self",
",",
"msg",
")",
":",
"zone_partitions",
"=",
"[",
"ord",
"(",
"x",
")",
"-",
"0x31",
"for",
"x",
"in",
"msg",
"[",
"4",
":",
"4",
"+",
"Max",
".",
"ZONES",
".",
"value",
"]",
"]",
"return",
"{",
"'zone_partitions'",
":",
"zone_partitions",
"}"
] |
ZP: Zone partitions.
|
[
"ZP",
":",
"Zone",
"partitions",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L212-L215
|
gwww/elkm1
|
elkm1_lib/message.py
|
MessageDecode._zs_decode
|
def _zs_decode(self, msg):
"""ZS: Zone statuses."""
status = [_status_decode(int(x, 16)) for x in msg[4:4+Max.ZONES.value]]
return {'zone_statuses': status}
|
python
|
def _zs_decode(self, msg):
"""ZS: Zone statuses."""
status = [_status_decode(int(x, 16)) for x in msg[4:4+Max.ZONES.value]]
return {'zone_statuses': status}
|
[
"def",
"_zs_decode",
"(",
"self",
",",
"msg",
")",
":",
"status",
"=",
"[",
"_status_decode",
"(",
"int",
"(",
"x",
",",
"16",
")",
")",
"for",
"x",
"in",
"msg",
"[",
"4",
":",
"4",
"+",
"Max",
".",
"ZONES",
".",
"value",
"]",
"]",
"return",
"{",
"'zone_statuses'",
":",
"status",
"}"
] |
ZS: Zone statuses.
|
[
"ZS",
":",
"Zone",
"statuses",
"."
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L217-L220
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo.connect
|
def connect(self):
"""
Parameters
----------
prev_login : xnat.XNATSession
An XNAT login that has been opened in the code that calls
the method that calls login. It is wrapped in a
NoExitWrapper so the returned connection can be used
in a "with" statement in the method.
"""
sess_kwargs = {}
if self._user is not None:
sess_kwargs['user'] = self._user
if self._password is not None:
sess_kwargs['password'] = self._password
self._login = xnat.connect(server=self._server, **sess_kwargs)
|
python
|
def connect(self):
"""
Parameters
----------
prev_login : xnat.XNATSession
An XNAT login that has been opened in the code that calls
the method that calls login. It is wrapped in a
NoExitWrapper so the returned connection can be used
in a "with" statement in the method.
"""
sess_kwargs = {}
if self._user is not None:
sess_kwargs['user'] = self._user
if self._password is not None:
sess_kwargs['password'] = self._password
self._login = xnat.connect(server=self._server, **sess_kwargs)
|
[
"def",
"connect",
"(",
"self",
")",
":",
"sess_kwargs",
"=",
"{",
"}",
"if",
"self",
".",
"_user",
"is",
"not",
"None",
":",
"sess_kwargs",
"[",
"'user'",
"]",
"=",
"self",
".",
"_user",
"if",
"self",
".",
"_password",
"is",
"not",
"None",
":",
"sess_kwargs",
"[",
"'password'",
"]",
"=",
"self",
".",
"_password",
"self",
".",
"_login",
"=",
"xnat",
".",
"connect",
"(",
"server",
"=",
"self",
".",
"_server",
",",
"*",
"*",
"sess_kwargs",
")"
] |
Parameters
----------
prev_login : xnat.XNATSession
An XNAT login that has been opened in the code that calls
the method that calls login. It is wrapped in a
NoExitWrapper so the returned connection can be used
in a "with" statement in the method.
|
[
"Parameters",
"----------",
"prev_login",
":",
"xnat",
".",
"XNATSession",
"An",
"XNAT",
"login",
"that",
"has",
"been",
"opened",
"in",
"the",
"code",
"that",
"calls",
"the",
"method",
"that",
"calls",
"login",
".",
"It",
"is",
"wrapped",
"in",
"a",
"NoExitWrapper",
"so",
"the",
"returned",
"connection",
"can",
"be",
"used",
"in",
"a",
"with",
"statement",
"in",
"the",
"method",
"."
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L144-L159
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo.get_fileset
|
def get_fileset(self, fileset):
"""
Caches a single fileset (if the 'path' attribute is accessed
and it has not been previously cached for example
Parameters
----------
fileset : Fileset
The fileset to cache
prev_login : xnat.XNATSession
An XNATSession object to use for the connection. A new
one is created if one isn't provided
Returns
-------
primary_path : str
The path of the primary file once it has been cached
aux_paths : dict[str, str]
A dictionary containing a mapping of auxiliary file names to
paths
"""
if fileset.format is None:
raise ArcanaUsageError(
"Attempting to download {}, which has not been assigned a "
"file format (see Fileset.formatted)".format(fileset))
self._check_repository(fileset)
with self: # Connect to the XNAT repository if haven't already
xsession = self.get_xsession(fileset)
xscan = xsession.scans[fileset.name]
# Set URI so we can retrieve checksums if required
fileset.uri = xscan.uri
fileset.id = xscan.id
cache_path = self._cache_path(fileset)
need_to_download = True
if op.exists(cache_path):
if self._check_md5:
md5_path = cache_path + XnatRepo.MD5_SUFFIX
try:
with open(md5_path, 'r') as f:
cached_checksums = json.load(f)
if cached_checksums == fileset.checksums:
need_to_download = False
except IOError:
pass
else:
need_to_download = False
if need_to_download:
# if fileset._resource_name is not None:
xresource = xscan.resources[fileset._resource_name]
# else:
# xresources = []
# for resource_name in fileset.format.xnat_resource_names:
# try:
# xresources.append(xscan.resources[resource_name])
# except KeyError:
# pass
# if not xresources:
# raise ArcanaError(
# "Could not find matching resource for {} ('{}') "
# "in {}, available resources are '{}'"
# .format(
# self.format,
# "', '".join(
# fileset.format.xnat_resource_names),
# xscan.uri,
# "', '".join(
# r.label
# for r in list(xscan.resources.values()))))
# elif len(xresources) > 1:
# logger.warning(
# "Found multiple acceptable resources for {}: {}"
# .format(fileset,
# ', '.join(str(r) for r in xresources)))
# xresource = xresources[0]
# The path to the directory which the files will be
# downloaded to.
tmp_dir = cache_path + '.download'
try:
# Attempt to make tmp download directory. This will
# fail if another process (or previous attempt) has
# already created it. In that case this process will
# wait to see if that download finishes successfully,
# and if so use the cached version.
os.mkdir(tmp_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Another process may be concurrently downloading
# the same file to the cache. Wait for
# 'race_cond_delay' seconds and then check that it
# has been completed or assume interrupted and
# redownload.
self._delayed_download(
tmp_dir, xresource, xscan, fileset,
xsession.label, cache_path,
delay=self._race_cond_delay)
else:
raise
else:
self.download_fileset(
tmp_dir, xresource, xscan, fileset,
xsession.label, cache_path)
shutil.rmtree(tmp_dir)
if not fileset.format.directory:
(primary_path, aux_paths) = fileset.format.assort_files(
op.join(cache_path, f) for f in os.listdir(cache_path))
else:
primary_path = cache_path
aux_paths = None
return primary_path, aux_paths
|
python
|
def get_fileset(self, fileset):
"""
Caches a single fileset (if the 'path' attribute is accessed
and it has not been previously cached for example
Parameters
----------
fileset : Fileset
The fileset to cache
prev_login : xnat.XNATSession
An XNATSession object to use for the connection. A new
one is created if one isn't provided
Returns
-------
primary_path : str
The path of the primary file once it has been cached
aux_paths : dict[str, str]
A dictionary containing a mapping of auxiliary file names to
paths
"""
if fileset.format is None:
raise ArcanaUsageError(
"Attempting to download {}, which has not been assigned a "
"file format (see Fileset.formatted)".format(fileset))
self._check_repository(fileset)
with self: # Connect to the XNAT repository if haven't already
xsession = self.get_xsession(fileset)
xscan = xsession.scans[fileset.name]
# Set URI so we can retrieve checksums if required
fileset.uri = xscan.uri
fileset.id = xscan.id
cache_path = self._cache_path(fileset)
need_to_download = True
if op.exists(cache_path):
if self._check_md5:
md5_path = cache_path + XnatRepo.MD5_SUFFIX
try:
with open(md5_path, 'r') as f:
cached_checksums = json.load(f)
if cached_checksums == fileset.checksums:
need_to_download = False
except IOError:
pass
else:
need_to_download = False
if need_to_download:
# if fileset._resource_name is not None:
xresource = xscan.resources[fileset._resource_name]
# else:
# xresources = []
# for resource_name in fileset.format.xnat_resource_names:
# try:
# xresources.append(xscan.resources[resource_name])
# except KeyError:
# pass
# if not xresources:
# raise ArcanaError(
# "Could not find matching resource for {} ('{}') "
# "in {}, available resources are '{}'"
# .format(
# self.format,
# "', '".join(
# fileset.format.xnat_resource_names),
# xscan.uri,
# "', '".join(
# r.label
# for r in list(xscan.resources.values()))))
# elif len(xresources) > 1:
# logger.warning(
# "Found multiple acceptable resources for {}: {}"
# .format(fileset,
# ', '.join(str(r) for r in xresources)))
# xresource = xresources[0]
# The path to the directory which the files will be
# downloaded to.
tmp_dir = cache_path + '.download'
try:
# Attempt to make tmp download directory. This will
# fail if another process (or previous attempt) has
# already created it. In that case this process will
# wait to see if that download finishes successfully,
# and if so use the cached version.
os.mkdir(tmp_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Another process may be concurrently downloading
# the same file to the cache. Wait for
# 'race_cond_delay' seconds and then check that it
# has been completed or assume interrupted and
# redownload.
self._delayed_download(
tmp_dir, xresource, xscan, fileset,
xsession.label, cache_path,
delay=self._race_cond_delay)
else:
raise
else:
self.download_fileset(
tmp_dir, xresource, xscan, fileset,
xsession.label, cache_path)
shutil.rmtree(tmp_dir)
if not fileset.format.directory:
(primary_path, aux_paths) = fileset.format.assort_files(
op.join(cache_path, f) for f in os.listdir(cache_path))
else:
primary_path = cache_path
aux_paths = None
return primary_path, aux_paths
|
[
"def",
"get_fileset",
"(",
"self",
",",
"fileset",
")",
":",
"if",
"fileset",
".",
"format",
"is",
"None",
":",
"raise",
"ArcanaUsageError",
"(",
"\"Attempting to download {}, which has not been assigned a \"",
"\"file format (see Fileset.formatted)\"",
".",
"format",
"(",
"fileset",
")",
")",
"self",
".",
"_check_repository",
"(",
"fileset",
")",
"with",
"self",
":",
"# Connect to the XNAT repository if haven't already",
"xsession",
"=",
"self",
".",
"get_xsession",
"(",
"fileset",
")",
"xscan",
"=",
"xsession",
".",
"scans",
"[",
"fileset",
".",
"name",
"]",
"# Set URI so we can retrieve checksums if required",
"fileset",
".",
"uri",
"=",
"xscan",
".",
"uri",
"fileset",
".",
"id",
"=",
"xscan",
".",
"id",
"cache_path",
"=",
"self",
".",
"_cache_path",
"(",
"fileset",
")",
"need_to_download",
"=",
"True",
"if",
"op",
".",
"exists",
"(",
"cache_path",
")",
":",
"if",
"self",
".",
"_check_md5",
":",
"md5_path",
"=",
"cache_path",
"+",
"XnatRepo",
".",
"MD5_SUFFIX",
"try",
":",
"with",
"open",
"(",
"md5_path",
",",
"'r'",
")",
"as",
"f",
":",
"cached_checksums",
"=",
"json",
".",
"load",
"(",
"f",
")",
"if",
"cached_checksums",
"==",
"fileset",
".",
"checksums",
":",
"need_to_download",
"=",
"False",
"except",
"IOError",
":",
"pass",
"else",
":",
"need_to_download",
"=",
"False",
"if",
"need_to_download",
":",
"# if fileset._resource_name is not None:",
"xresource",
"=",
"xscan",
".",
"resources",
"[",
"fileset",
".",
"_resource_name",
"]",
"# else:",
"# xresources = []",
"# for resource_name in fileset.format.xnat_resource_names:",
"# try:",
"# xresources.append(xscan.resources[resource_name])",
"# except KeyError:",
"# pass",
"# if not xresources:",
"# raise ArcanaError(",
"# \"Could not find matching resource for {} ('{}') \"",
"# \"in {}, available resources are '{}'\"",
"# .format(",
"# self.format,",
"# \"', '\".join(",
"# fileset.format.xnat_resource_names),",
"# xscan.uri,",
"# \"', '\".join(",
"# r.label",
"# for r in list(xscan.resources.values()))))",
"# elif len(xresources) > 1:",
"# logger.warning(",
"# \"Found multiple acceptable resources for {}: {}\"",
"# .format(fileset,",
"# ', '.join(str(r) for r in xresources)))",
"# xresource = xresources[0]",
"# The path to the directory which the files will be",
"# downloaded to.",
"tmp_dir",
"=",
"cache_path",
"+",
"'.download'",
"try",
":",
"# Attempt to make tmp download directory. This will",
"# fail if another process (or previous attempt) has",
"# already created it. In that case this process will",
"# wait to see if that download finishes successfully,",
"# and if so use the cached version.",
"os",
".",
"mkdir",
"(",
"tmp_dir",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
":",
"# Another process may be concurrently downloading",
"# the same file to the cache. Wait for",
"# 'race_cond_delay' seconds and then check that it",
"# has been completed or assume interrupted and",
"# redownload.",
"self",
".",
"_delayed_download",
"(",
"tmp_dir",
",",
"xresource",
",",
"xscan",
",",
"fileset",
",",
"xsession",
".",
"label",
",",
"cache_path",
",",
"delay",
"=",
"self",
".",
"_race_cond_delay",
")",
"else",
":",
"raise",
"else",
":",
"self",
".",
"download_fileset",
"(",
"tmp_dir",
",",
"xresource",
",",
"xscan",
",",
"fileset",
",",
"xsession",
".",
"label",
",",
"cache_path",
")",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")",
"if",
"not",
"fileset",
".",
"format",
".",
"directory",
":",
"(",
"primary_path",
",",
"aux_paths",
")",
"=",
"fileset",
".",
"format",
".",
"assort_files",
"(",
"op",
".",
"join",
"(",
"cache_path",
",",
"f",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"cache_path",
")",
")",
"else",
":",
"primary_path",
"=",
"cache_path",
"aux_paths",
"=",
"None",
"return",
"primary_path",
",",
"aux_paths"
] |
Caches a single fileset (if the 'path' attribute is accessed
and it has not been previously cached for example
Parameters
----------
fileset : Fileset
The fileset to cache
prev_login : xnat.XNATSession
An XNATSession object to use for the connection. A new
one is created if one isn't provided
Returns
-------
primary_path : str
The path of the primary file once it has been cached
aux_paths : dict[str, str]
A dictionary containing a mapping of auxiliary file names to
paths
|
[
"Caches",
"a",
"single",
"fileset",
"(",
"if",
"the",
"path",
"attribute",
"is",
"accessed",
"and",
"it",
"has",
"not",
"been",
"previously",
"cached",
"for",
"example"
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L165-L273
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo.get_checksums
|
def get_checksums(self, fileset):
"""
Downloads the MD5 digests associated with the files in the file-set.
These are saved with the downloaded files in the cache and used to
check if the files have been updated on the server
Parameters
----------
resource : xnat.ResourceCatalog
The xnat resource
file_format : FileFormat
The format of the fileset to get the checksums for. Used to
determine the primary file within the resource and change the
corresponding key in the checksums dictionary to '.' to match
the way it is generated locally by Arcana.
"""
if fileset.uri is None:
raise ArcanaUsageError(
"Can't retrieve checksums as URI has not been set for {}"
.format(fileset))
with self:
checksums = {r['Name']: r['digest']
for r in self._login.get_json(fileset.uri + '/files')[
'ResultSet']['Result']}
if not fileset.format.directory:
# Replace the key corresponding to the primary file with '.' to
# match the way that checksums are created by Arcana
primary = fileset.format.assort_files(checksums.keys())[0]
checksums['.'] = checksums.pop(primary)
return checksums
|
python
|
def get_checksums(self, fileset):
"""
Downloads the MD5 digests associated with the files in the file-set.
These are saved with the downloaded files in the cache and used to
check if the files have been updated on the server
Parameters
----------
resource : xnat.ResourceCatalog
The xnat resource
file_format : FileFormat
The format of the fileset to get the checksums for. Used to
determine the primary file within the resource and change the
corresponding key in the checksums dictionary to '.' to match
the way it is generated locally by Arcana.
"""
if fileset.uri is None:
raise ArcanaUsageError(
"Can't retrieve checksums as URI has not been set for {}"
.format(fileset))
with self:
checksums = {r['Name']: r['digest']
for r in self._login.get_json(fileset.uri + '/files')[
'ResultSet']['Result']}
if not fileset.format.directory:
# Replace the key corresponding to the primary file with '.' to
# match the way that checksums are created by Arcana
primary = fileset.format.assort_files(checksums.keys())[0]
checksums['.'] = checksums.pop(primary)
return checksums
|
[
"def",
"get_checksums",
"(",
"self",
",",
"fileset",
")",
":",
"if",
"fileset",
".",
"uri",
"is",
"None",
":",
"raise",
"ArcanaUsageError",
"(",
"\"Can't retrieve checksums as URI has not been set for {}\"",
".",
"format",
"(",
"fileset",
")",
")",
"with",
"self",
":",
"checksums",
"=",
"{",
"r",
"[",
"'Name'",
"]",
":",
"r",
"[",
"'digest'",
"]",
"for",
"r",
"in",
"self",
".",
"_login",
".",
"get_json",
"(",
"fileset",
".",
"uri",
"+",
"'/files'",
")",
"[",
"'ResultSet'",
"]",
"[",
"'Result'",
"]",
"}",
"if",
"not",
"fileset",
".",
"format",
".",
"directory",
":",
"# Replace the key corresponding to the primary file with '.' to",
"# match the way that checksums are created by Arcana",
"primary",
"=",
"fileset",
".",
"format",
".",
"assort_files",
"(",
"checksums",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"checksums",
"[",
"'.'",
"]",
"=",
"checksums",
".",
"pop",
"(",
"primary",
")",
"return",
"checksums"
] |
Downloads the MD5 digests associated with the files in the file-set.
These are saved with the downloaded files in the cache and used to
check if the files have been updated on the server
Parameters
----------
resource : xnat.ResourceCatalog
The xnat resource
file_format : FileFormat
The format of the fileset to get the checksums for. Used to
determine the primary file within the resource and change the
corresponding key in the checksums dictionary to '.' to match
the way it is generated locally by Arcana.
|
[
"Downloads",
"the",
"MD5",
"digests",
"associated",
"with",
"the",
"files",
"in",
"the",
"file",
"-",
"set",
".",
"These",
"are",
"saved",
"with",
"the",
"downloaded",
"files",
"in",
"the",
"cache",
"and",
"used",
"to",
"check",
"if",
"the",
"files",
"have",
"been",
"updated",
"on",
"the",
"server"
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L377-L406
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo.find_data
|
def find_data(self, subject_ids=None, visit_ids=None, **kwargs):
"""
Find all filesets, fields and provenance records within an XNAT project
Parameters
----------
subject_ids : list(str)
List of subject IDs with which to filter the tree with. If
None all are returned
visit_ids : list(str)
List of visit IDs with which to filter the tree with. If
None all are returned
Returns
-------
filesets : list[Fileset]
All the filesets found in the repository
fields : list[Field]
All the fields found in the repository
records : list[Record]
The provenance records found in the repository
"""
subject_ids = self.convert_subject_ids(subject_ids)
# Add derived visit IDs to list of visit ids to filter
all_filesets = []
all_fields = []
all_records = []
# Note we prefer the use of raw REST API calls here for performance
# reasons over using XnatPy's data structures.
with self:
# Get map of internal subject IDs to subject labels in project
subject_xids_to_labels = {
s['ID']: s['label'] for s in self._login.get_json(
'/data/projects/{}/subjects'.format(self.project_id))[
'ResultSet']['Result']}
# Get list of all sessions within project
session_xids = [
s['ID'] for s in self._login.get_json(
'/data/projects/{}/experiments'.format(self.project_id))[
'ResultSet']['Result']
if (self.session_filter is None or
self.session_filter.match(s['label']))]
for session_xid in session_xids:
session_json = self._login.get_json(
'/data/projects/{}/experiments/{}'.format(
self.project_id, session_xid))['items'][0]
subject_xid = session_json['data_fields']['subject_ID']
subject_id = subject_xids_to_labels[subject_xid]
session_label = session_json['data_fields']['label']
session_uri = (
'/data/archive/projects/{}/subjects/{}/experiments/{}'
.format(self.project_id, subject_xid, session_xid))
# Get field values. We do this first so we can check for the
# DERIVED_FROM_FIELD to determine the correct session label and
# study name
field_values = {}
try:
fields_json = next(
c['items'] for c in session_json['children']
if c['field'] == 'fields/field')
except StopIteration:
pass
else:
for js in fields_json:
try:
value = js['data_fields']['field']
except KeyError:
pass
else:
field_values[js['data_fields']['name']] = value
# Extract study name and derived-from session
if self.DERIVED_FROM_FIELD in field_values:
df_sess_label = field_values.pop(self.DERIVED_FROM_FIELD)
from_study = session_label[len(df_sess_label) + 1:]
session_label = df_sess_label
else:
from_study = None
# Strip subject ID from session label if required
if session_label.startswith(subject_id + '_'):
visit_id = session_label[len(subject_id) + 1:]
else:
visit_id = session_label
# Strip project ID from subject ID if required
if subject_id.startswith(self.project_id + '_'):
subject_id = subject_id[len(self.project_id) + 1:]
# Check subject is summary or not and whether it is to be
# filtered
if subject_id == XnatRepo.SUMMARY_NAME:
subject_id = None
elif not (subject_ids is None or subject_id in subject_ids):
continue
# Check visit is summary or not and whether it is to be
# filtered
if visit_id == XnatRepo.SUMMARY_NAME:
visit_id = None
elif not (visit_ids is None or visit_id in visit_ids):
continue
# Determine frequency
if (subject_id, visit_id) == (None, None):
frequency = 'per_study'
elif visit_id is None:
frequency = 'per_subject'
elif subject_id is None:
frequency = 'per_visit'
else:
frequency = 'per_session'
# Append fields
for name, value in field_values.items():
value = value.replace('"', '"')
all_fields.append(Field(
name=name, value=value, repository=self,
frequency=frequency,
subject_id=subject_id,
visit_id=visit_id,
from_study=from_study,
**kwargs))
# Extract part of JSON relating to files
try:
scans_json = next(
c['items'] for c in session_json['children']
if c['field'] == 'scans/scan')
except StopIteration:
scans_json = []
for scan_json in scans_json:
scan_id = scan_json['data_fields']['ID']
scan_type = scan_json['data_fields'].get('type', '')
scan_uri = '{}/scans/{}'.format(session_uri, scan_id)
try:
resources_json = next(
c['items'] for c in scan_json['children']
if c['field'] == 'file')
except StopIteration:
resources = {}
else:
resources = {js['data_fields']['label']:
js['data_fields'].get('format', None)
for js in resources_json}
# Remove auto-generated snapshots directory
resources.pop('SNAPSHOTS', None)
if scan_type == self.PROV_SCAN:
# Download provenance JSON files and parse into
# records
temp_dir = tempfile.mkdtemp()
try:
with tempfile.TemporaryFile() as temp_zip:
self._login.download_stream(
scan_uri + '/files', temp_zip,
format='zip')
with ZipFile(temp_zip) as zip_file:
zip_file.extractall(temp_dir)
for base_dir, _, fnames in os.walk(temp_dir):
for fname in fnames:
if fname.endswith('.json'):
pipeline_name = fname[:-len('.json')]
json_path = op.join(base_dir, fname)
all_records.append(
Record.load(
pipeline_name, frequency,
subject_id, visit_id,
from_study, json_path))
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
for resource in resources:
all_filesets.append(Fileset(
scan_type, id=scan_id, uri=scan_uri,
repository=self, frequency=frequency,
subject_id=subject_id, visit_id=visit_id,
from_study=from_study,
resource_name=resource, **kwargs))
logger.debug("Found node {}:{} on {}:{}".format(
subject_id, visit_id, self.server, self.project_id))
return all_filesets, all_fields, all_records
|
python
|
def find_data(self, subject_ids=None, visit_ids=None, **kwargs):
"""
Find all filesets, fields and provenance records within an XNAT project
Parameters
----------
subject_ids : list(str)
List of subject IDs with which to filter the tree with. If
None all are returned
visit_ids : list(str)
List of visit IDs with which to filter the tree with. If
None all are returned
Returns
-------
filesets : list[Fileset]
All the filesets found in the repository
fields : list[Field]
All the fields found in the repository
records : list[Record]
The provenance records found in the repository
"""
subject_ids = self.convert_subject_ids(subject_ids)
# Add derived visit IDs to list of visit ids to filter
all_filesets = []
all_fields = []
all_records = []
# Note we prefer the use of raw REST API calls here for performance
# reasons over using XnatPy's data structures.
with self:
# Get map of internal subject IDs to subject labels in project
subject_xids_to_labels = {
s['ID']: s['label'] for s in self._login.get_json(
'/data/projects/{}/subjects'.format(self.project_id))[
'ResultSet']['Result']}
# Get list of all sessions within project
session_xids = [
s['ID'] for s in self._login.get_json(
'/data/projects/{}/experiments'.format(self.project_id))[
'ResultSet']['Result']
if (self.session_filter is None or
self.session_filter.match(s['label']))]
for session_xid in session_xids:
session_json = self._login.get_json(
'/data/projects/{}/experiments/{}'.format(
self.project_id, session_xid))['items'][0]
subject_xid = session_json['data_fields']['subject_ID']
subject_id = subject_xids_to_labels[subject_xid]
session_label = session_json['data_fields']['label']
session_uri = (
'/data/archive/projects/{}/subjects/{}/experiments/{}'
.format(self.project_id, subject_xid, session_xid))
# Get field values. We do this first so we can check for the
# DERIVED_FROM_FIELD to determine the correct session label and
# study name
field_values = {}
try:
fields_json = next(
c['items'] for c in session_json['children']
if c['field'] == 'fields/field')
except StopIteration:
pass
else:
for js in fields_json:
try:
value = js['data_fields']['field']
except KeyError:
pass
else:
field_values[js['data_fields']['name']] = value
# Extract study name and derived-from session
if self.DERIVED_FROM_FIELD in field_values:
df_sess_label = field_values.pop(self.DERIVED_FROM_FIELD)
from_study = session_label[len(df_sess_label) + 1:]
session_label = df_sess_label
else:
from_study = None
# Strip subject ID from session label if required
if session_label.startswith(subject_id + '_'):
visit_id = session_label[len(subject_id) + 1:]
else:
visit_id = session_label
# Strip project ID from subject ID if required
if subject_id.startswith(self.project_id + '_'):
subject_id = subject_id[len(self.project_id) + 1:]
# Check subject is summary or not and whether it is to be
# filtered
if subject_id == XnatRepo.SUMMARY_NAME:
subject_id = None
elif not (subject_ids is None or subject_id in subject_ids):
continue
# Check visit is summary or not and whether it is to be
# filtered
if visit_id == XnatRepo.SUMMARY_NAME:
visit_id = None
elif not (visit_ids is None or visit_id in visit_ids):
continue
# Determine frequency
if (subject_id, visit_id) == (None, None):
frequency = 'per_study'
elif visit_id is None:
frequency = 'per_subject'
elif subject_id is None:
frequency = 'per_visit'
else:
frequency = 'per_session'
# Append fields
for name, value in field_values.items():
value = value.replace('"', '"')
all_fields.append(Field(
name=name, value=value, repository=self,
frequency=frequency,
subject_id=subject_id,
visit_id=visit_id,
from_study=from_study,
**kwargs))
# Extract part of JSON relating to files
try:
scans_json = next(
c['items'] for c in session_json['children']
if c['field'] == 'scans/scan')
except StopIteration:
scans_json = []
for scan_json in scans_json:
scan_id = scan_json['data_fields']['ID']
scan_type = scan_json['data_fields'].get('type', '')
scan_uri = '{}/scans/{}'.format(session_uri, scan_id)
try:
resources_json = next(
c['items'] for c in scan_json['children']
if c['field'] == 'file')
except StopIteration:
resources = {}
else:
resources = {js['data_fields']['label']:
js['data_fields'].get('format', None)
for js in resources_json}
# Remove auto-generated snapshots directory
resources.pop('SNAPSHOTS', None)
if scan_type == self.PROV_SCAN:
# Download provenance JSON files and parse into
# records
temp_dir = tempfile.mkdtemp()
try:
with tempfile.TemporaryFile() as temp_zip:
self._login.download_stream(
scan_uri + '/files', temp_zip,
format='zip')
with ZipFile(temp_zip) as zip_file:
zip_file.extractall(temp_dir)
for base_dir, _, fnames in os.walk(temp_dir):
for fname in fnames:
if fname.endswith('.json'):
pipeline_name = fname[:-len('.json')]
json_path = op.join(base_dir, fname)
all_records.append(
Record.load(
pipeline_name, frequency,
subject_id, visit_id,
from_study, json_path))
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
for resource in resources:
all_filesets.append(Fileset(
scan_type, id=scan_id, uri=scan_uri,
repository=self, frequency=frequency,
subject_id=subject_id, visit_id=visit_id,
from_study=from_study,
resource_name=resource, **kwargs))
logger.debug("Found node {}:{} on {}:{}".format(
subject_id, visit_id, self.server, self.project_id))
return all_filesets, all_fields, all_records
|
[
"def",
"find_data",
"(",
"self",
",",
"subject_ids",
"=",
"None",
",",
"visit_ids",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"subject_ids",
"=",
"self",
".",
"convert_subject_ids",
"(",
"subject_ids",
")",
"# Add derived visit IDs to list of visit ids to filter",
"all_filesets",
"=",
"[",
"]",
"all_fields",
"=",
"[",
"]",
"all_records",
"=",
"[",
"]",
"# Note we prefer the use of raw REST API calls here for performance",
"# reasons over using XnatPy's data structures.",
"with",
"self",
":",
"# Get map of internal subject IDs to subject labels in project",
"subject_xids_to_labels",
"=",
"{",
"s",
"[",
"'ID'",
"]",
":",
"s",
"[",
"'label'",
"]",
"for",
"s",
"in",
"self",
".",
"_login",
".",
"get_json",
"(",
"'/data/projects/{}/subjects'",
".",
"format",
"(",
"self",
".",
"project_id",
")",
")",
"[",
"'ResultSet'",
"]",
"[",
"'Result'",
"]",
"}",
"# Get list of all sessions within project",
"session_xids",
"=",
"[",
"s",
"[",
"'ID'",
"]",
"for",
"s",
"in",
"self",
".",
"_login",
".",
"get_json",
"(",
"'/data/projects/{}/experiments'",
".",
"format",
"(",
"self",
".",
"project_id",
")",
")",
"[",
"'ResultSet'",
"]",
"[",
"'Result'",
"]",
"if",
"(",
"self",
".",
"session_filter",
"is",
"None",
"or",
"self",
".",
"session_filter",
".",
"match",
"(",
"s",
"[",
"'label'",
"]",
")",
")",
"]",
"for",
"session_xid",
"in",
"session_xids",
":",
"session_json",
"=",
"self",
".",
"_login",
".",
"get_json",
"(",
"'/data/projects/{}/experiments/{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"session_xid",
")",
")",
"[",
"'items'",
"]",
"[",
"0",
"]",
"subject_xid",
"=",
"session_json",
"[",
"'data_fields'",
"]",
"[",
"'subject_ID'",
"]",
"subject_id",
"=",
"subject_xids_to_labels",
"[",
"subject_xid",
"]",
"session_label",
"=",
"session_json",
"[",
"'data_fields'",
"]",
"[",
"'label'",
"]",
"session_uri",
"=",
"(",
"'/data/archive/projects/{}/subjects/{}/experiments/{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"subject_xid",
",",
"session_xid",
")",
")",
"# Get field values. We do this first so we can check for the",
"# DERIVED_FROM_FIELD to determine the correct session label and",
"# study name",
"field_values",
"=",
"{",
"}",
"try",
":",
"fields_json",
"=",
"next",
"(",
"c",
"[",
"'items'",
"]",
"for",
"c",
"in",
"session_json",
"[",
"'children'",
"]",
"if",
"c",
"[",
"'field'",
"]",
"==",
"'fields/field'",
")",
"except",
"StopIteration",
":",
"pass",
"else",
":",
"for",
"js",
"in",
"fields_json",
":",
"try",
":",
"value",
"=",
"js",
"[",
"'data_fields'",
"]",
"[",
"'field'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"field_values",
"[",
"js",
"[",
"'data_fields'",
"]",
"[",
"'name'",
"]",
"]",
"=",
"value",
"# Extract study name and derived-from session",
"if",
"self",
".",
"DERIVED_FROM_FIELD",
"in",
"field_values",
":",
"df_sess_label",
"=",
"field_values",
".",
"pop",
"(",
"self",
".",
"DERIVED_FROM_FIELD",
")",
"from_study",
"=",
"session_label",
"[",
"len",
"(",
"df_sess_label",
")",
"+",
"1",
":",
"]",
"session_label",
"=",
"df_sess_label",
"else",
":",
"from_study",
"=",
"None",
"# Strip subject ID from session label if required",
"if",
"session_label",
".",
"startswith",
"(",
"subject_id",
"+",
"'_'",
")",
":",
"visit_id",
"=",
"session_label",
"[",
"len",
"(",
"subject_id",
")",
"+",
"1",
":",
"]",
"else",
":",
"visit_id",
"=",
"session_label",
"# Strip project ID from subject ID if required",
"if",
"subject_id",
".",
"startswith",
"(",
"self",
".",
"project_id",
"+",
"'_'",
")",
":",
"subject_id",
"=",
"subject_id",
"[",
"len",
"(",
"self",
".",
"project_id",
")",
"+",
"1",
":",
"]",
"# Check subject is summary or not and whether it is to be",
"# filtered",
"if",
"subject_id",
"==",
"XnatRepo",
".",
"SUMMARY_NAME",
":",
"subject_id",
"=",
"None",
"elif",
"not",
"(",
"subject_ids",
"is",
"None",
"or",
"subject_id",
"in",
"subject_ids",
")",
":",
"continue",
"# Check visit is summary or not and whether it is to be",
"# filtered",
"if",
"visit_id",
"==",
"XnatRepo",
".",
"SUMMARY_NAME",
":",
"visit_id",
"=",
"None",
"elif",
"not",
"(",
"visit_ids",
"is",
"None",
"or",
"visit_id",
"in",
"visit_ids",
")",
":",
"continue",
"# Determine frequency",
"if",
"(",
"subject_id",
",",
"visit_id",
")",
"==",
"(",
"None",
",",
"None",
")",
":",
"frequency",
"=",
"'per_study'",
"elif",
"visit_id",
"is",
"None",
":",
"frequency",
"=",
"'per_subject'",
"elif",
"subject_id",
"is",
"None",
":",
"frequency",
"=",
"'per_visit'",
"else",
":",
"frequency",
"=",
"'per_session'",
"# Append fields",
"for",
"name",
",",
"value",
"in",
"field_values",
".",
"items",
"(",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'"'",
",",
"'\"'",
")",
"all_fields",
".",
"append",
"(",
"Field",
"(",
"name",
"=",
"name",
",",
"value",
"=",
"value",
",",
"repository",
"=",
"self",
",",
"frequency",
"=",
"frequency",
",",
"subject_id",
"=",
"subject_id",
",",
"visit_id",
"=",
"visit_id",
",",
"from_study",
"=",
"from_study",
",",
"*",
"*",
"kwargs",
")",
")",
"# Extract part of JSON relating to files",
"try",
":",
"scans_json",
"=",
"next",
"(",
"c",
"[",
"'items'",
"]",
"for",
"c",
"in",
"session_json",
"[",
"'children'",
"]",
"if",
"c",
"[",
"'field'",
"]",
"==",
"'scans/scan'",
")",
"except",
"StopIteration",
":",
"scans_json",
"=",
"[",
"]",
"for",
"scan_json",
"in",
"scans_json",
":",
"scan_id",
"=",
"scan_json",
"[",
"'data_fields'",
"]",
"[",
"'ID'",
"]",
"scan_type",
"=",
"scan_json",
"[",
"'data_fields'",
"]",
".",
"get",
"(",
"'type'",
",",
"''",
")",
"scan_uri",
"=",
"'{}/scans/{}'",
".",
"format",
"(",
"session_uri",
",",
"scan_id",
")",
"try",
":",
"resources_json",
"=",
"next",
"(",
"c",
"[",
"'items'",
"]",
"for",
"c",
"in",
"scan_json",
"[",
"'children'",
"]",
"if",
"c",
"[",
"'field'",
"]",
"==",
"'file'",
")",
"except",
"StopIteration",
":",
"resources",
"=",
"{",
"}",
"else",
":",
"resources",
"=",
"{",
"js",
"[",
"'data_fields'",
"]",
"[",
"'label'",
"]",
":",
"js",
"[",
"'data_fields'",
"]",
".",
"get",
"(",
"'format'",
",",
"None",
")",
"for",
"js",
"in",
"resources_json",
"}",
"# Remove auto-generated snapshots directory",
"resources",
".",
"pop",
"(",
"'SNAPSHOTS'",
",",
"None",
")",
"if",
"scan_type",
"==",
"self",
".",
"PROV_SCAN",
":",
"# Download provenance JSON files and parse into",
"# records",
"temp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"with",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"as",
"temp_zip",
":",
"self",
".",
"_login",
".",
"download_stream",
"(",
"scan_uri",
"+",
"'/files'",
",",
"temp_zip",
",",
"format",
"=",
"'zip'",
")",
"with",
"ZipFile",
"(",
"temp_zip",
")",
"as",
"zip_file",
":",
"zip_file",
".",
"extractall",
"(",
"temp_dir",
")",
"for",
"base_dir",
",",
"_",
",",
"fnames",
"in",
"os",
".",
"walk",
"(",
"temp_dir",
")",
":",
"for",
"fname",
"in",
"fnames",
":",
"if",
"fname",
".",
"endswith",
"(",
"'.json'",
")",
":",
"pipeline_name",
"=",
"fname",
"[",
":",
"-",
"len",
"(",
"'.json'",
")",
"]",
"json_path",
"=",
"op",
".",
"join",
"(",
"base_dir",
",",
"fname",
")",
"all_records",
".",
"append",
"(",
"Record",
".",
"load",
"(",
"pipeline_name",
",",
"frequency",
",",
"subject_id",
",",
"visit_id",
",",
"from_study",
",",
"json_path",
")",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"temp_dir",
",",
"ignore_errors",
"=",
"True",
")",
"else",
":",
"for",
"resource",
"in",
"resources",
":",
"all_filesets",
".",
"append",
"(",
"Fileset",
"(",
"scan_type",
",",
"id",
"=",
"scan_id",
",",
"uri",
"=",
"scan_uri",
",",
"repository",
"=",
"self",
",",
"frequency",
"=",
"frequency",
",",
"subject_id",
"=",
"subject_id",
",",
"visit_id",
"=",
"visit_id",
",",
"from_study",
"=",
"from_study",
",",
"resource_name",
"=",
"resource",
",",
"*",
"*",
"kwargs",
")",
")",
"logger",
".",
"debug",
"(",
"\"Found node {}:{} on {}:{}\"",
".",
"format",
"(",
"subject_id",
",",
"visit_id",
",",
"self",
".",
"server",
",",
"self",
".",
"project_id",
")",
")",
"return",
"all_filesets",
",",
"all_fields",
",",
"all_records"
] |
Find all filesets, fields and provenance records within an XNAT project
Parameters
----------
subject_ids : list(str)
List of subject IDs with which to filter the tree with. If
None all are returned
visit_ids : list(str)
List of visit IDs with which to filter the tree with. If
None all are returned
Returns
-------
filesets : list[Fileset]
All the filesets found in the repository
fields : list[Field]
All the fields found in the repository
records : list[Record]
The provenance records found in the repository
|
[
"Find",
"all",
"filesets",
"fields",
"and",
"provenance",
"records",
"within",
"an",
"XNAT",
"project"
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L408-L580
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo.convert_subject_ids
|
def convert_subject_ids(self, subject_ids):
"""
Convert subject ids to strings if they are integers
"""
# TODO: need to make this generalisable via a
# splitting+mapping function passed to the repository
if subject_ids is not None:
subject_ids = set(
('{:03d}'.format(s)
if isinstance(s, int) else s) for s in subject_ids)
return subject_ids
|
python
|
def convert_subject_ids(self, subject_ids):
"""
Convert subject ids to strings if they are integers
"""
# TODO: need to make this generalisable via a
# splitting+mapping function passed to the repository
if subject_ids is not None:
subject_ids = set(
('{:03d}'.format(s)
if isinstance(s, int) else s) for s in subject_ids)
return subject_ids
|
[
"def",
"convert_subject_ids",
"(",
"self",
",",
"subject_ids",
")",
":",
"# TODO: need to make this generalisable via a",
"# splitting+mapping function passed to the repository",
"if",
"subject_ids",
"is",
"not",
"None",
":",
"subject_ids",
"=",
"set",
"(",
"(",
"'{:03d}'",
".",
"format",
"(",
"s",
")",
"if",
"isinstance",
"(",
"s",
",",
"int",
")",
"else",
"s",
")",
"for",
"s",
"in",
"subject_ids",
")",
"return",
"subject_ids"
] |
Convert subject ids to strings if they are integers
|
[
"Convert",
"subject",
"ids",
"to",
"strings",
"if",
"they",
"are",
"integers"
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L582-L592
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo.get_xsession
|
def get_xsession(self, item):
"""
Returns the XNAT session and cache dir corresponding to the
item.
"""
subj_label, sess_label = self._get_item_labels(item)
with self:
xproject = self._login.projects[self.project_id]
try:
xsubject = xproject.subjects[subj_label]
except KeyError:
xsubject = self._login.classes.SubjectData(
label=subj_label, parent=xproject)
try:
xsession = xsubject.experiments[sess_label]
except KeyError:
xsession = self._login.classes.MrSessionData(
label=sess_label, parent=xsubject)
if item.derived:
xsession.fields[
self.DERIVED_FROM_FIELD] = self._get_item_labels(
item, no_from_study=True)[1]
return xsession
|
python
|
def get_xsession(self, item):
"""
Returns the XNAT session and cache dir corresponding to the
item.
"""
subj_label, sess_label = self._get_item_labels(item)
with self:
xproject = self._login.projects[self.project_id]
try:
xsubject = xproject.subjects[subj_label]
except KeyError:
xsubject = self._login.classes.SubjectData(
label=subj_label, parent=xproject)
try:
xsession = xsubject.experiments[sess_label]
except KeyError:
xsession = self._login.classes.MrSessionData(
label=sess_label, parent=xsubject)
if item.derived:
xsession.fields[
self.DERIVED_FROM_FIELD] = self._get_item_labels(
item, no_from_study=True)[1]
return xsession
|
[
"def",
"get_xsession",
"(",
"self",
",",
"item",
")",
":",
"subj_label",
",",
"sess_label",
"=",
"self",
".",
"_get_item_labels",
"(",
"item",
")",
"with",
"self",
":",
"xproject",
"=",
"self",
".",
"_login",
".",
"projects",
"[",
"self",
".",
"project_id",
"]",
"try",
":",
"xsubject",
"=",
"xproject",
".",
"subjects",
"[",
"subj_label",
"]",
"except",
"KeyError",
":",
"xsubject",
"=",
"self",
".",
"_login",
".",
"classes",
".",
"SubjectData",
"(",
"label",
"=",
"subj_label",
",",
"parent",
"=",
"xproject",
")",
"try",
":",
"xsession",
"=",
"xsubject",
".",
"experiments",
"[",
"sess_label",
"]",
"except",
"KeyError",
":",
"xsession",
"=",
"self",
".",
"_login",
".",
"classes",
".",
"MrSessionData",
"(",
"label",
"=",
"sess_label",
",",
"parent",
"=",
"xsubject",
")",
"if",
"item",
".",
"derived",
":",
"xsession",
".",
"fields",
"[",
"self",
".",
"DERIVED_FROM_FIELD",
"]",
"=",
"self",
".",
"_get_item_labels",
"(",
"item",
",",
"no_from_study",
"=",
"True",
")",
"[",
"1",
"]",
"return",
"xsession"
] |
Returns the XNAT session and cache dir corresponding to the
item.
|
[
"Returns",
"the",
"XNAT",
"session",
"and",
"cache",
"dir",
"corresponding",
"to",
"the",
"item",
"."
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L690-L712
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo._get_item_labels
|
def _get_item_labels(self, item, no_from_study=False):
"""
Returns the labels for the XNAT subject and sessions given
the frequency and provided IDs.
"""
subject_id = self.inv_map_subject_id(item.subject_id)
visit_id = self.inv_map_visit_id(item.visit_id)
subj_label, sess_label = self._get_labels(
item.frequency, subject_id, visit_id)
if not no_from_study and item.from_study is not None:
sess_label += '_' + item.from_study
return (subj_label, sess_label)
|
python
|
def _get_item_labels(self, item, no_from_study=False):
"""
Returns the labels for the XNAT subject and sessions given
the frequency and provided IDs.
"""
subject_id = self.inv_map_subject_id(item.subject_id)
visit_id = self.inv_map_visit_id(item.visit_id)
subj_label, sess_label = self._get_labels(
item.frequency, subject_id, visit_id)
if not no_from_study and item.from_study is not None:
sess_label += '_' + item.from_study
return (subj_label, sess_label)
|
[
"def",
"_get_item_labels",
"(",
"self",
",",
"item",
",",
"no_from_study",
"=",
"False",
")",
":",
"subject_id",
"=",
"self",
".",
"inv_map_subject_id",
"(",
"item",
".",
"subject_id",
")",
"visit_id",
"=",
"self",
".",
"inv_map_visit_id",
"(",
"item",
".",
"visit_id",
")",
"subj_label",
",",
"sess_label",
"=",
"self",
".",
"_get_labels",
"(",
"item",
".",
"frequency",
",",
"subject_id",
",",
"visit_id",
")",
"if",
"not",
"no_from_study",
"and",
"item",
".",
"from_study",
"is",
"not",
"None",
":",
"sess_label",
"+=",
"'_'",
"+",
"item",
".",
"from_study",
"return",
"(",
"subj_label",
",",
"sess_label",
")"
] |
Returns the labels for the XNAT subject and sessions given
the frequency and provided IDs.
|
[
"Returns",
"the",
"labels",
"for",
"the",
"XNAT",
"subject",
"and",
"sessions",
"given",
"the",
"frequency",
"and",
"provided",
"IDs",
"."
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L714-L725
|
MonashBI/arcana
|
arcana/repository/xnat.py
|
XnatRepo._get_labels
|
def _get_labels(self, frequency, subject_id=None, visit_id=None):
"""
Returns the labels for the XNAT subject and sessions given
the frequency and provided IDs.
"""
if frequency == 'per_session':
subj_label = '{}_{}'.format(self.project_id,
subject_id)
sess_label = '{}_{}_{}'.format(self.project_id,
subject_id,
visit_id)
elif frequency == 'per_subject':
subj_label = '{}_{}'.format(self.project_id,
subject_id)
sess_label = '{}_{}_{}'.format(self.project_id,
subject_id,
self.SUMMARY_NAME)
elif frequency == 'per_visit':
subj_label = '{}_{}'.format(self.project_id,
self.SUMMARY_NAME)
sess_label = '{}_{}_{}'.format(self.project_id,
self.SUMMARY_NAME,
visit_id)
elif frequency == 'per_study':
subj_label = '{}_{}'.format(self.project_id,
self.SUMMARY_NAME)
sess_label = '{}_{}_{}'.format(self.project_id,
self.SUMMARY_NAME,
self.SUMMARY_NAME)
else:
assert False
return (subj_label, sess_label)
|
python
|
def _get_labels(self, frequency, subject_id=None, visit_id=None):
"""
Returns the labels for the XNAT subject and sessions given
the frequency and provided IDs.
"""
if frequency == 'per_session':
subj_label = '{}_{}'.format(self.project_id,
subject_id)
sess_label = '{}_{}_{}'.format(self.project_id,
subject_id,
visit_id)
elif frequency == 'per_subject':
subj_label = '{}_{}'.format(self.project_id,
subject_id)
sess_label = '{}_{}_{}'.format(self.project_id,
subject_id,
self.SUMMARY_NAME)
elif frequency == 'per_visit':
subj_label = '{}_{}'.format(self.project_id,
self.SUMMARY_NAME)
sess_label = '{}_{}_{}'.format(self.project_id,
self.SUMMARY_NAME,
visit_id)
elif frequency == 'per_study':
subj_label = '{}_{}'.format(self.project_id,
self.SUMMARY_NAME)
sess_label = '{}_{}_{}'.format(self.project_id,
self.SUMMARY_NAME,
self.SUMMARY_NAME)
else:
assert False
return (subj_label, sess_label)
|
[
"def",
"_get_labels",
"(",
"self",
",",
"frequency",
",",
"subject_id",
"=",
"None",
",",
"visit_id",
"=",
"None",
")",
":",
"if",
"frequency",
"==",
"'per_session'",
":",
"subj_label",
"=",
"'{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"subject_id",
")",
"sess_label",
"=",
"'{}_{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"subject_id",
",",
"visit_id",
")",
"elif",
"frequency",
"==",
"'per_subject'",
":",
"subj_label",
"=",
"'{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"subject_id",
")",
"sess_label",
"=",
"'{}_{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"subject_id",
",",
"self",
".",
"SUMMARY_NAME",
")",
"elif",
"frequency",
"==",
"'per_visit'",
":",
"subj_label",
"=",
"'{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"self",
".",
"SUMMARY_NAME",
")",
"sess_label",
"=",
"'{}_{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"self",
".",
"SUMMARY_NAME",
",",
"visit_id",
")",
"elif",
"frequency",
"==",
"'per_study'",
":",
"subj_label",
"=",
"'{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"self",
".",
"SUMMARY_NAME",
")",
"sess_label",
"=",
"'{}_{}_{}'",
".",
"format",
"(",
"self",
".",
"project_id",
",",
"self",
".",
"SUMMARY_NAME",
",",
"self",
".",
"SUMMARY_NAME",
")",
"else",
":",
"assert",
"False",
"return",
"(",
"subj_label",
",",
"sess_label",
")"
] |
Returns the labels for the XNAT subject and sessions given
the frequency and provided IDs.
|
[
"Returns",
"the",
"labels",
"for",
"the",
"XNAT",
"subject",
"and",
"sessions",
"given",
"the",
"frequency",
"and",
"provided",
"IDs",
"."
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L727-L758
|
gwww/elkm1
|
elkm1_lib/outputs.py
|
Output.turn_on
|
def turn_on(self, time):
"""(Helper) Turn on an output"""
self._elk.send(cn_encode(self._index, time))
|
python
|
def turn_on(self, time):
"""(Helper) Turn on an output"""
self._elk.send(cn_encode(self._index, time))
|
[
"def",
"turn_on",
"(",
"self",
",",
"time",
")",
":",
"self",
".",
"_elk",
".",
"send",
"(",
"cn_encode",
"(",
"self",
".",
"_index",
",",
"time",
")",
")"
] |
(Helper) Turn on an output
|
[
"(",
"Helper",
")",
"Turn",
"on",
"an",
"output"
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/outputs.py#L17-L19
|
gwww/elkm1
|
elkm1_lib/outputs.py
|
Outputs.sync
|
def sync(self):
"""Retrieve areas from ElkM1"""
self.elk.send(cs_encode())
self.get_descriptions(TextDescriptions.OUTPUT.value)
|
python
|
def sync(self):
"""Retrieve areas from ElkM1"""
self.elk.send(cs_encode())
self.get_descriptions(TextDescriptions.OUTPUT.value)
|
[
"def",
"sync",
"(",
"self",
")",
":",
"self",
".",
"elk",
".",
"send",
"(",
"cs_encode",
"(",
")",
")",
"self",
".",
"get_descriptions",
"(",
"TextDescriptions",
".",
"OUTPUT",
".",
"value",
")"
] |
Retrieve areas from ElkM1
|
[
"Retrieve",
"areas",
"from",
"ElkM1"
] |
train
|
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/outputs.py#L33-L36
|
MonashBI/arcana
|
arcana/environment/requirement/matlab.py
|
MatlabPackageRequirement.detect_version_str
|
def detect_version_str(self):
"""
Try to detect version of package from command help text. Bit of a long
shot as they are typically included
"""
help_text = run_matlab_cmd("help('{}')".format(self.test_func))
if not help_text:
raise ArcanaRequirementNotFoundError(
"Did not find test function '{}' for {}"
.format(self.test_func, self))
return self.parse_help_text(help_text)
|
python
|
def detect_version_str(self):
"""
Try to detect version of package from command help text. Bit of a long
shot as they are typically included
"""
help_text = run_matlab_cmd("help('{}')".format(self.test_func))
if not help_text:
raise ArcanaRequirementNotFoundError(
"Did not find test function '{}' for {}"
.format(self.test_func, self))
return self.parse_help_text(help_text)
|
[
"def",
"detect_version_str",
"(",
"self",
")",
":",
"help_text",
"=",
"run_matlab_cmd",
"(",
"\"help('{}')\"",
".",
"format",
"(",
"self",
".",
"test_func",
")",
")",
"if",
"not",
"help_text",
":",
"raise",
"ArcanaRequirementNotFoundError",
"(",
"\"Did not find test function '{}' for {}\"",
".",
"format",
"(",
"self",
".",
"test_func",
",",
"self",
")",
")",
"return",
"self",
".",
"parse_help_text",
"(",
"help_text",
")"
] |
Try to detect version of package from command help text. Bit of a long
shot as they are typically included
|
[
"Try",
"to",
"detect",
"version",
"of",
"package",
"from",
"command",
"help",
"text",
".",
"Bit",
"of",
"a",
"long",
"shot",
"as",
"they",
"are",
"typically",
"included"
] |
train
|
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/requirement/matlab.py#L75-L85
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.target_heating_level
|
def target_heating_level(self):
"""Return target heating level."""
try:
if self.side == 'left':
level = self.device.device_data['leftTargetHeatingLevel']
elif self.side == 'right':
level = self.device.device_data['rightTargetHeatingLevel']
return level
except TypeError:
return None
|
python
|
def target_heating_level(self):
"""Return target heating level."""
try:
if self.side == 'left':
level = self.device.device_data['leftTargetHeatingLevel']
elif self.side == 'right':
level = self.device.device_data['rightTargetHeatingLevel']
return level
except TypeError:
return None
|
[
"def",
"target_heating_level",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"side",
"==",
"'left'",
":",
"level",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'leftTargetHeatingLevel'",
"]",
"elif",
"self",
".",
"side",
"==",
"'right'",
":",
"level",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'rightTargetHeatingLevel'",
"]",
"return",
"level",
"except",
"TypeError",
":",
"return",
"None"
] |
Return target heating level.
|
[
"Return",
"target",
"heating",
"level",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L41-L50
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.heating_level
|
def heating_level(self):
"""Return heating level."""
try:
if self.side == 'left':
level = self.device.device_data['leftHeatingLevel']
elif self.side == 'right':
level = self.device.device_data['rightHeatingLevel']
return level
except TypeError:
return None
|
python
|
def heating_level(self):
"""Return heating level."""
try:
if self.side == 'left':
level = self.device.device_data['leftHeatingLevel']
elif self.side == 'right':
level = self.device.device_data['rightHeatingLevel']
return level
except TypeError:
return None
|
[
"def",
"heating_level",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"side",
"==",
"'left'",
":",
"level",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'leftHeatingLevel'",
"]",
"elif",
"self",
".",
"side",
"==",
"'right'",
":",
"level",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'rightHeatingLevel'",
"]",
"return",
"level",
"except",
"TypeError",
":",
"return",
"None"
] |
Return heating level.
|
[
"Return",
"heating",
"level",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L53-L62
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.past_heating_level
|
def past_heating_level(self, num):
"""Return a heating level from the past."""
if num > 9:
return 0
try:
if self.side == 'left':
level = self.device.device_data_history[
num]['leftHeatingLevel']
elif self.side == 'right':
level = self.device.device_data_history[
num]['rightHeatingLevel']
return level
except TypeError:
return 0
|
python
|
def past_heating_level(self, num):
"""Return a heating level from the past."""
if num > 9:
return 0
try:
if self.side == 'left':
level = self.device.device_data_history[
num]['leftHeatingLevel']
elif self.side == 'right':
level = self.device.device_data_history[
num]['rightHeatingLevel']
return level
except TypeError:
return 0
|
[
"def",
"past_heating_level",
"(",
"self",
",",
"num",
")",
":",
"if",
"num",
">",
"9",
":",
"return",
"0",
"try",
":",
"if",
"self",
".",
"side",
"==",
"'left'",
":",
"level",
"=",
"self",
".",
"device",
".",
"device_data_history",
"[",
"num",
"]",
"[",
"'leftHeatingLevel'",
"]",
"elif",
"self",
".",
"side",
"==",
"'right'",
":",
"level",
"=",
"self",
".",
"device",
".",
"device_data_history",
"[",
"num",
"]",
"[",
"'rightHeatingLevel'",
"]",
"return",
"level",
"except",
"TypeError",
":",
"return",
"0"
] |
Return a heating level from the past.
|
[
"Return",
"a",
"heating",
"level",
"from",
"the",
"past",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L64-L78
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.now_heating
|
def now_heating(self):
"""Return current heating state."""
try:
if self.side == 'left':
heat = self.device.device_data['leftNowHeating']
elif self.side == 'right':
heat = self.device.device_data['rightNowHeating']
return heat
except TypeError:
return None
|
python
|
def now_heating(self):
"""Return current heating state."""
try:
if self.side == 'left':
heat = self.device.device_data['leftNowHeating']
elif self.side == 'right':
heat = self.device.device_data['rightNowHeating']
return heat
except TypeError:
return None
|
[
"def",
"now_heating",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"side",
"==",
"'left'",
":",
"heat",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'leftNowHeating'",
"]",
"elif",
"self",
".",
"side",
"==",
"'right'",
":",
"heat",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'rightNowHeating'",
"]",
"return",
"heat",
"except",
"TypeError",
":",
"return",
"None"
] |
Return current heating state.
|
[
"Return",
"current",
"heating",
"state",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L81-L90
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.heating_remaining
|
def heating_remaining(self):
"""Return seconds of heat time remaining."""
try:
if self.side == 'left':
timerem = self.device.device_data['leftHeatingDuration']
elif self.side == 'right':
timerem = self.device.device_data['rightHeatingDuration']
return timerem
except TypeError:
return None
|
python
|
def heating_remaining(self):
"""Return seconds of heat time remaining."""
try:
if self.side == 'left':
timerem = self.device.device_data['leftHeatingDuration']
elif self.side == 'right':
timerem = self.device.device_data['rightHeatingDuration']
return timerem
except TypeError:
return None
|
[
"def",
"heating_remaining",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"side",
"==",
"'left'",
":",
"timerem",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'leftHeatingDuration'",
"]",
"elif",
"self",
".",
"side",
"==",
"'right'",
":",
"timerem",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'rightHeatingDuration'",
"]",
"return",
"timerem",
"except",
"TypeError",
":",
"return",
"None"
] |
Return seconds of heat time remaining.
|
[
"Return",
"seconds",
"of",
"heat",
"time",
"remaining",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L93-L102
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.last_seen
|
def last_seen(self):
"""Return mattress last seen time."""
"""
These values seem to be rarely updated correctly in the API.
Don't expect accurate results from this property.
"""
try:
if self.side == 'left':
lastseen = self.device.device_data['leftPresenceEnd']
elif self.side == 'right':
lastseen = self.device.device_data['rightPresenceEnd']
date = datetime.fromtimestamp(int(lastseen)) \
.strftime('%Y-%m-%dT%H:%M:%S')
return date
except TypeError:
return None
|
python
|
def last_seen(self):
"""Return mattress last seen time."""
"""
These values seem to be rarely updated correctly in the API.
Don't expect accurate results from this property.
"""
try:
if self.side == 'left':
lastseen = self.device.device_data['leftPresenceEnd']
elif self.side == 'right':
lastseen = self.device.device_data['rightPresenceEnd']
date = datetime.fromtimestamp(int(lastseen)) \
.strftime('%Y-%m-%dT%H:%M:%S')
return date
except TypeError:
return None
|
[
"def",
"last_seen",
"(",
"self",
")",
":",
"\"\"\"\n These values seem to be rarely updated correctly in the API.\n Don't expect accurate results from this property.\n \"\"\"",
"try",
":",
"if",
"self",
".",
"side",
"==",
"'left'",
":",
"lastseen",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'leftPresenceEnd'",
"]",
"elif",
"self",
".",
"side",
"==",
"'right'",
":",
"lastseen",
"=",
"self",
".",
"device",
".",
"device_data",
"[",
"'rightPresenceEnd'",
"]",
"date",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"int",
"(",
"lastseen",
")",
")",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%S'",
")",
"return",
"date",
"except",
"TypeError",
":",
"return",
"None"
] |
Return mattress last seen time.
|
[
"Return",
"mattress",
"last",
"seen",
"time",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L105-L121
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.heating_values
|
def heating_values(self):
"""Return a dict of all the current heating values."""
heating_dict = {
'level': self.heating_level,
'target': self.target_heating_level,
'active': self.now_heating,
'remaining': self.heating_remaining,
'last_seen': self.last_seen,
}
return heating_dict
|
python
|
def heating_values(self):
"""Return a dict of all the current heating values."""
heating_dict = {
'level': self.heating_level,
'target': self.target_heating_level,
'active': self.now_heating,
'remaining': self.heating_remaining,
'last_seen': self.last_seen,
}
return heating_dict
|
[
"def",
"heating_values",
"(",
"self",
")",
":",
"heating_dict",
"=",
"{",
"'level'",
":",
"self",
".",
"heating_level",
",",
"'target'",
":",
"self",
".",
"target_heating_level",
",",
"'active'",
":",
"self",
".",
"now_heating",
",",
"'remaining'",
":",
"self",
".",
"heating_remaining",
",",
"'last_seen'",
":",
"self",
".",
"last_seen",
",",
"}",
"return",
"heating_dict"
] |
Return a dict of all the current heating values.
|
[
"Return",
"a",
"dict",
"of",
"all",
"the",
"current",
"heating",
"values",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L124-L133
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.current_sleep_stage
|
def current_sleep_stage(self):
"""Return sleep stage for in-progress session."""
try:
stages = self.intervals[0]['stages']
num_stages = len(stages)
if num_stages == 0:
return None
# API now always has an awake state last in the dict
# so always pull the second to last stage while we are
# in a processing state
if self.current_session_processing:
stage = stages[num_stages-2]['stage']
else:
stage = stages[num_stages-1]['stage']
# UNRELIABLE... Removing for now.
# Check sleep stage against last_seen time to make
# sure we don't get stuck in a non-awake state.
#delta_elap = datetime.fromtimestamp(time.time()) \
# - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S')
#_LOGGER.debug('User elap: %s', delta_elap.total_seconds())
#if stage != 'awake' and delta_elap.total_seconds() > 1800:
# Bed hasn't seen us for 30min so set awake.
# stage = 'awake'
except KeyError:
stage = None
return stage
|
python
|
def current_sleep_stage(self):
"""Return sleep stage for in-progress session."""
try:
stages = self.intervals[0]['stages']
num_stages = len(stages)
if num_stages == 0:
return None
# API now always has an awake state last in the dict
# so always pull the second to last stage while we are
# in a processing state
if self.current_session_processing:
stage = stages[num_stages-2]['stage']
else:
stage = stages[num_stages-1]['stage']
# UNRELIABLE... Removing for now.
# Check sleep stage against last_seen time to make
# sure we don't get stuck in a non-awake state.
#delta_elap = datetime.fromtimestamp(time.time()) \
# - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S')
#_LOGGER.debug('User elap: %s', delta_elap.total_seconds())
#if stage != 'awake' and delta_elap.total_seconds() > 1800:
# Bed hasn't seen us for 30min so set awake.
# stage = 'awake'
except KeyError:
stage = None
return stage
|
[
"def",
"current_sleep_stage",
"(",
"self",
")",
":",
"try",
":",
"stages",
"=",
"self",
".",
"intervals",
"[",
"0",
"]",
"[",
"'stages'",
"]",
"num_stages",
"=",
"len",
"(",
"stages",
")",
"if",
"num_stages",
"==",
"0",
":",
"return",
"None",
"# API now always has an awake state last in the dict",
"# so always pull the second to last stage while we are",
"# in a processing state",
"if",
"self",
".",
"current_session_processing",
":",
"stage",
"=",
"stages",
"[",
"num_stages",
"-",
"2",
"]",
"[",
"'stage'",
"]",
"else",
":",
"stage",
"=",
"stages",
"[",
"num_stages",
"-",
"1",
"]",
"[",
"'stage'",
"]",
"# UNRELIABLE... Removing for now.",
"# Check sleep stage against last_seen time to make",
"# sure we don't get stuck in a non-awake state.",
"#delta_elap = datetime.fromtimestamp(time.time()) \\",
"# - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S')",
"#_LOGGER.debug('User elap: %s', delta_elap.total_seconds())",
"#if stage != 'awake' and delta_elap.total_seconds() > 1800:",
"# Bed hasn't seen us for 30min so set awake.",
"# stage = 'awake'",
"except",
"KeyError",
":",
"stage",
"=",
"None",
"return",
"stage"
] |
Return sleep stage for in-progress session.
|
[
"Return",
"sleep",
"stage",
"for",
"in",
"-",
"progress",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L160-L188
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.current_sleep_breakdown
|
def current_sleep_breakdown(self):
"""Return durations of sleep stages for in-progress session."""
try:
stages = self.intervals[0]['stages']
breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0}
for stage in stages:
if stage['stage'] == 'awake':
breakdown['awake'] += stage['duration']
elif stage['stage'] == 'light':
breakdown['light'] += stage['duration']
elif stage['stage'] == 'deep':
breakdown['deep'] += stage['duration']
elif stage['stage'] == 'rem':
breakdown['rem'] += stage['duration']
except KeyError:
breakdown = None
return breakdown
|
python
|
def current_sleep_breakdown(self):
"""Return durations of sleep stages for in-progress session."""
try:
stages = self.intervals[0]['stages']
breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0}
for stage in stages:
if stage['stage'] == 'awake':
breakdown['awake'] += stage['duration']
elif stage['stage'] == 'light':
breakdown['light'] += stage['duration']
elif stage['stage'] == 'deep':
breakdown['deep'] += stage['duration']
elif stage['stage'] == 'rem':
breakdown['rem'] += stage['duration']
except KeyError:
breakdown = None
return breakdown
|
[
"def",
"current_sleep_breakdown",
"(",
"self",
")",
":",
"try",
":",
"stages",
"=",
"self",
".",
"intervals",
"[",
"0",
"]",
"[",
"'stages'",
"]",
"breakdown",
"=",
"{",
"'awake'",
":",
"0",
",",
"'light'",
":",
"0",
",",
"'deep'",
":",
"0",
",",
"'rem'",
":",
"0",
"}",
"for",
"stage",
"in",
"stages",
":",
"if",
"stage",
"[",
"'stage'",
"]",
"==",
"'awake'",
":",
"breakdown",
"[",
"'awake'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"elif",
"stage",
"[",
"'stage'",
"]",
"==",
"'light'",
":",
"breakdown",
"[",
"'light'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"elif",
"stage",
"[",
"'stage'",
"]",
"==",
"'deep'",
":",
"breakdown",
"[",
"'deep'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"elif",
"stage",
"[",
"'stage'",
"]",
"==",
"'rem'",
":",
"breakdown",
"[",
"'rem'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"except",
"KeyError",
":",
"breakdown",
"=",
"None",
"return",
"breakdown"
] |
Return durations of sleep stages for in-progress session.
|
[
"Return",
"durations",
"of",
"sleep",
"stages",
"for",
"in",
"-",
"progress",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L200-L216
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.current_bed_temp
|
def current_bed_temp(self):
"""Return current bed temperature for in-progress session."""
try:
bedtemps = self.intervals[0]['timeseries']['tempBedC']
num_temps = len(bedtemps)
if num_temps == 0:
return None
bedtemp = bedtemps[num_temps-1][1]
except KeyError:
bedtemp = None
return bedtemp
|
python
|
def current_bed_temp(self):
"""Return current bed temperature for in-progress session."""
try:
bedtemps = self.intervals[0]['timeseries']['tempBedC']
num_temps = len(bedtemps)
if num_temps == 0:
return None
bedtemp = bedtemps[num_temps-1][1]
except KeyError:
bedtemp = None
return bedtemp
|
[
"def",
"current_bed_temp",
"(",
"self",
")",
":",
"try",
":",
"bedtemps",
"=",
"self",
".",
"intervals",
"[",
"0",
"]",
"[",
"'timeseries'",
"]",
"[",
"'tempBedC'",
"]",
"num_temps",
"=",
"len",
"(",
"bedtemps",
")",
"if",
"num_temps",
"==",
"0",
":",
"return",
"None",
"bedtemp",
"=",
"bedtemps",
"[",
"num_temps",
"-",
"1",
"]",
"[",
"1",
"]",
"except",
"KeyError",
":",
"bedtemp",
"=",
"None",
"return",
"bedtemp"
] |
Return current bed temperature for in-progress session.
|
[
"Return",
"current",
"bed",
"temperature",
"for",
"in",
"-",
"progress",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L219-L231
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.current_room_temp
|
def current_room_temp(self):
"""Return current room temperature for in-progress session."""
try:
rmtemps = self.intervals[0]['timeseries']['tempRoomC']
num_temps = len(rmtemps)
if num_temps == 0:
return None
rmtemp = rmtemps[num_temps-1][1]
except KeyError:
rmtemp = None
return rmtemp
|
python
|
def current_room_temp(self):
"""Return current room temperature for in-progress session."""
try:
rmtemps = self.intervals[0]['timeseries']['tempRoomC']
num_temps = len(rmtemps)
if num_temps == 0:
return None
rmtemp = rmtemps[num_temps-1][1]
except KeyError:
rmtemp = None
return rmtemp
|
[
"def",
"current_room_temp",
"(",
"self",
")",
":",
"try",
":",
"rmtemps",
"=",
"self",
".",
"intervals",
"[",
"0",
"]",
"[",
"'timeseries'",
"]",
"[",
"'tempRoomC'",
"]",
"num_temps",
"=",
"len",
"(",
"rmtemps",
")",
"if",
"num_temps",
"==",
"0",
":",
"return",
"None",
"rmtemp",
"=",
"rmtemps",
"[",
"num_temps",
"-",
"1",
"]",
"[",
"1",
"]",
"except",
"KeyError",
":",
"rmtemp",
"=",
"None",
"return",
"rmtemp"
] |
Return current room temperature for in-progress session.
|
[
"Return",
"current",
"room",
"temperature",
"for",
"in",
"-",
"progress",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L234-L246
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.current_resp_rate
|
def current_resp_rate(self):
"""Return current respiratory rate for in-progress session."""
try:
rates = self.intervals[0]['timeseries']['respiratoryRate']
num_rates = len(rates)
if num_rates == 0:
return None
rate = rates[num_rates-1][1]
except KeyError:
rate = None
return rate
|
python
|
def current_resp_rate(self):
"""Return current respiratory rate for in-progress session."""
try:
rates = self.intervals[0]['timeseries']['respiratoryRate']
num_rates = len(rates)
if num_rates == 0:
return None
rate = rates[num_rates-1][1]
except KeyError:
rate = None
return rate
|
[
"def",
"current_resp_rate",
"(",
"self",
")",
":",
"try",
":",
"rates",
"=",
"self",
".",
"intervals",
"[",
"0",
"]",
"[",
"'timeseries'",
"]",
"[",
"'respiratoryRate'",
"]",
"num_rates",
"=",
"len",
"(",
"rates",
")",
"if",
"num_rates",
"==",
"0",
":",
"return",
"None",
"rate",
"=",
"rates",
"[",
"num_rates",
"-",
"1",
"]",
"[",
"1",
"]",
"except",
"KeyError",
":",
"rate",
"=",
"None",
"return",
"rate"
] |
Return current respiratory rate for in-progress session.
|
[
"Return",
"current",
"respiratory",
"rate",
"for",
"in",
"-",
"progress",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L258-L270
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.current_heart_rate
|
def current_heart_rate(self):
"""Return current heart rate for in-progress session."""
try:
rates = self.intervals[0]['timeseries']['heartRate']
num_rates = len(rates)
if num_rates == 0:
return None
rate = rates[num_rates-1][1]
except KeyError:
rate = None
return rate
|
python
|
def current_heart_rate(self):
"""Return current heart rate for in-progress session."""
try:
rates = self.intervals[0]['timeseries']['heartRate']
num_rates = len(rates)
if num_rates == 0:
return None
rate = rates[num_rates-1][1]
except KeyError:
rate = None
return rate
|
[
"def",
"current_heart_rate",
"(",
"self",
")",
":",
"try",
":",
"rates",
"=",
"self",
".",
"intervals",
"[",
"0",
"]",
"[",
"'timeseries'",
"]",
"[",
"'heartRate'",
"]",
"num_rates",
"=",
"len",
"(",
"rates",
")",
"if",
"num_rates",
"==",
"0",
":",
"return",
"None",
"rate",
"=",
"rates",
"[",
"num_rates",
"-",
"1",
"]",
"[",
"1",
"]",
"except",
"KeyError",
":",
"rate",
"=",
"None",
"return",
"rate"
] |
Return current heart rate for in-progress session.
|
[
"Return",
"current",
"heart",
"rate",
"for",
"in",
"-",
"progress",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L273-L285
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.current_values
|
def current_values(self):
"""Return a dict of all the 'current' parameters."""
current_dict = {
'date': self.current_session_date,
'score': self.current_sleep_score,
'stage': self.current_sleep_stage,
'breakdown': self.current_sleep_breakdown,
'tnt': self.current_tnt,
'bed_temp': self.current_bed_temp,
'room_temp': self.current_room_temp,
'resp_rate': self.current_resp_rate,
'heart_rate': self.current_heart_rate,
'processing': self.current_session_processing,
}
return current_dict
|
python
|
def current_values(self):
"""Return a dict of all the 'current' parameters."""
current_dict = {
'date': self.current_session_date,
'score': self.current_sleep_score,
'stage': self.current_sleep_stage,
'breakdown': self.current_sleep_breakdown,
'tnt': self.current_tnt,
'bed_temp': self.current_bed_temp,
'room_temp': self.current_room_temp,
'resp_rate': self.current_resp_rate,
'heart_rate': self.current_heart_rate,
'processing': self.current_session_processing,
}
return current_dict
|
[
"def",
"current_values",
"(",
"self",
")",
":",
"current_dict",
"=",
"{",
"'date'",
":",
"self",
".",
"current_session_date",
",",
"'score'",
":",
"self",
".",
"current_sleep_score",
",",
"'stage'",
":",
"self",
".",
"current_sleep_stage",
",",
"'breakdown'",
":",
"self",
".",
"current_sleep_breakdown",
",",
"'tnt'",
":",
"self",
".",
"current_tnt",
",",
"'bed_temp'",
":",
"self",
".",
"current_bed_temp",
",",
"'room_temp'",
":",
"self",
".",
"current_room_temp",
",",
"'resp_rate'",
":",
"self",
".",
"current_resp_rate",
",",
"'heart_rate'",
":",
"self",
".",
"current_heart_rate",
",",
"'processing'",
":",
"self",
".",
"current_session_processing",
",",
"}",
"return",
"current_dict"
] |
Return a dict of all the 'current' parameters.
|
[
"Return",
"a",
"dict",
"of",
"all",
"the",
"current",
"parameters",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L288-L302
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.last_session_date
|
def last_session_date(self):
"""Return date/time for start of last session data."""
try:
date = self.intervals[1]['ts']
except KeyError:
return None
date_f = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')
now = time.time()
offset = datetime.fromtimestamp(now) - datetime.utcfromtimestamp(now)
return date_f + offset
|
python
|
def last_session_date(self):
"""Return date/time for start of last session data."""
try:
date = self.intervals[1]['ts']
except KeyError:
return None
date_f = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')
now = time.time()
offset = datetime.fromtimestamp(now) - datetime.utcfromtimestamp(now)
return date_f + offset
|
[
"def",
"last_session_date",
"(",
"self",
")",
":",
"try",
":",
"date",
"=",
"self",
".",
"intervals",
"[",
"1",
"]",
"[",
"'ts'",
"]",
"except",
"KeyError",
":",
"return",
"None",
"date_f",
"=",
"datetime",
".",
"strptime",
"(",
"date",
",",
"'%Y-%m-%dT%H:%M:%S.%fZ'",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"offset",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"now",
")",
"-",
"datetime",
".",
"utcfromtimestamp",
"(",
"now",
")",
"return",
"date_f",
"+",
"offset"
] |
Return date/time for start of last session data.
|
[
"Return",
"date",
"/",
"time",
"for",
"start",
"of",
"last",
"session",
"data",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L305-L314
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.last_sleep_breakdown
|
def last_sleep_breakdown(self):
"""Return durations of sleep stages for last complete session."""
try:
stages = self.intervals[1]['stages']
except KeyError:
return None
breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0}
for stage in stages:
if stage['stage'] == 'awake':
breakdown['awake'] += stage['duration']
elif stage['stage'] == 'light':
breakdown['light'] += stage['duration']
elif stage['stage'] == 'deep':
breakdown['deep'] += stage['duration']
elif stage['stage'] == 'rem':
breakdown['rem'] += stage['duration']
return breakdown
|
python
|
def last_sleep_breakdown(self):
"""Return durations of sleep stages for last complete session."""
try:
stages = self.intervals[1]['stages']
except KeyError:
return None
breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0}
for stage in stages:
if stage['stage'] == 'awake':
breakdown['awake'] += stage['duration']
elif stage['stage'] == 'light':
breakdown['light'] += stage['duration']
elif stage['stage'] == 'deep':
breakdown['deep'] += stage['duration']
elif stage['stage'] == 'rem':
breakdown['rem'] += stage['duration']
return breakdown
|
[
"def",
"last_sleep_breakdown",
"(",
"self",
")",
":",
"try",
":",
"stages",
"=",
"self",
".",
"intervals",
"[",
"1",
"]",
"[",
"'stages'",
"]",
"except",
"KeyError",
":",
"return",
"None",
"breakdown",
"=",
"{",
"'awake'",
":",
"0",
",",
"'light'",
":",
"0",
",",
"'deep'",
":",
"0",
",",
"'rem'",
":",
"0",
"}",
"for",
"stage",
"in",
"stages",
":",
"if",
"stage",
"[",
"'stage'",
"]",
"==",
"'awake'",
":",
"breakdown",
"[",
"'awake'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"elif",
"stage",
"[",
"'stage'",
"]",
"==",
"'light'",
":",
"breakdown",
"[",
"'light'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"elif",
"stage",
"[",
"'stage'",
"]",
"==",
"'deep'",
":",
"breakdown",
"[",
"'deep'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"elif",
"stage",
"[",
"'stage'",
"]",
"==",
"'rem'",
":",
"breakdown",
"[",
"'rem'",
"]",
"+=",
"stage",
"[",
"'duration'",
"]",
"return",
"breakdown"
] |
Return durations of sleep stages for last complete session.
|
[
"Return",
"durations",
"of",
"sleep",
"stages",
"for",
"last",
"complete",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L336-L353
|
mezz64/pyEight
|
pyeight/user.py
|
EightUser.last_bed_temp
|
def last_bed_temp(self):
"""Return avg bed temperature for last session."""
try:
bedtemps = self.intervals[1]['timeseries']['tempBedC']
except KeyError:
return None
tmp = 0
num_temps = len(bedtemps)
if num_temps == 0:
return None
for temp in bedtemps:
tmp += temp[1]
bedtemp = tmp/num_temps
return bedtemp
|
python
|
def last_bed_temp(self):
"""Return avg bed temperature for last session."""
try:
bedtemps = self.intervals[1]['timeseries']['tempBedC']
except KeyError:
return None
tmp = 0
num_temps = len(bedtemps)
if num_temps == 0:
return None
for temp in bedtemps:
tmp += temp[1]
bedtemp = tmp/num_temps
return bedtemp
|
[
"def",
"last_bed_temp",
"(",
"self",
")",
":",
"try",
":",
"bedtemps",
"=",
"self",
".",
"intervals",
"[",
"1",
"]",
"[",
"'timeseries'",
"]",
"[",
"'tempBedC'",
"]",
"except",
"KeyError",
":",
"return",
"None",
"tmp",
"=",
"0",
"num_temps",
"=",
"len",
"(",
"bedtemps",
")",
"if",
"num_temps",
"==",
"0",
":",
"return",
"None",
"for",
"temp",
"in",
"bedtemps",
":",
"tmp",
"+=",
"temp",
"[",
"1",
"]",
"bedtemp",
"=",
"tmp",
"/",
"num_temps",
"return",
"bedtemp"
] |
Return avg bed temperature for last session.
|
[
"Return",
"avg",
"bed",
"temperature",
"for",
"last",
"session",
"."
] |
train
|
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L356-L371
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.